query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Preprocess requests by attempting to extract face image, and transforming to fit the model's input Returns list of NDArray Processed images in the model's expected input shape | def preprocess(self, request):
img_list = []
input_shape = self.signature['inputs'][0]['data_shape']
[height, width] = input_shape[2:]
param_name = self.signature['inputs'][0]['data_name']
# Iterate over all input images provided with the request, transform and append for inference
for idx, data in enumerate(request):
# Extract the input image
img = data.get(param_name)
if img is None:
img = data.get("body")
if img is None:
img = data.get("data")
if img is None or len(img) == 0:
self.error = "Empty image input"
return None
try:
img_arr = image.read(img).asnumpy()
except Exception as e:
logging.warning(e, exc_info=True)
self.error = "Corrupted image input"
return None
# Try to identify face to crop
face = crop_face(img_arr)
if face is not None:
face = transform.resize(face, (height, width))
# If no face identified - use the entire input image
else:
face = cv.cvtColor(img_arr, cv.COLOR_BGR2GRAY)
# Transform image into tensor of the required shape
face = np.resize(face, input_shape)
face = normalize(face, height, width)
face = mx.nd.array(face)
img_list.append(face)
return img_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def preprocess(self, request):\n img_list = []\n param_name = self.signature['inputs'][0]['data_name']\n input_shape = self.signature['inputs'][0]['data_shape']\n\n for idx, data in enumerate(request):\n img = data.get(param_name)\n if img is None:\n img = data.get(\"body\")\n\n if img is None:\n img = data.get(\"data\")\n\n if img is None or len(img) == 0:\n self.error = \"Empty image input\"\n return None\n\n # We are assuming input shape is NCHW\n [h, w] = input_shape[2:]\n\n try:\n img_arr = image.read(img)\n except Exception as e:\n logging.warn(e, exc_info=True)\n self.error = \"Corrupted image input\"\n return None\n\n img_arr = image.resize(img_arr, w, h)\n img_arr = image.transform_shape(img_arr)\n img_list.append(img_arr)\n return img_list",
"def preprocess_image(self, batched_inputs):\n images = [x.to(self.device) for x in batched_inputs]\n norms = [self.normalizer(x) for x in images]\n size = (norms[0].shape[1],norms[0].shape[2])\n images = ImageList.from_tensors(norms, self.backbone.size_divisibility)\n return images, size",
"def detect_face_task(img):\n\n # paramter for detect\n # image_size = 160\n # margin = 44\n minsize = 20 # minimum size of face\n threshold = [0.6, 0.7, 0.7] # three steps's threshold\n factor = 0.709 # scale factor\n\n # caffe model\n pnet = caffe_model.get_pnet()\n rnet = caffe_model.get_rnet()\n onet = caffe_model.get_onet()\n\n bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)\n print('detect bounding: ', bounding_boxes)\n print('Find faces: ', bounding_boxes.shape[0])\n\n # all_faces is faces information list, include face bytes, face position\n all_faces = []\n for face_position in bounding_boxes:\n face_position = face_position.astype(int)\n print('face position: ', face_position)\n\n # each face information, include position, face image\n head_rect = face_position[:4].tolist() # numpy array to python list\n head_img = misc.toimage(img).crop(head_rect)\n head_img_io = StringIO.StringIO()\n head_img.save(head_img_io, format='JPEG')\n head_img_b64 = base64.b64encode(head_img_io.getvalue())\n\n # construct response\n face_info = {}\n face_info['rect'] = head_rect\n face_info['image'] = head_img_b64\n\n all_faces.append(face_info)\n\n return all_faces",
"def preprocess_image(self, batched_inputs):\n images = [x[\"image\"].float().to(self.device) for x in batched_inputs]\n images = [self.normalizer(img) for img in images]\n images = ImageList.from_tensors(images, self.backbone.size_divisibility)\n return images",
"def extract_face_detections(self):\n self.detector.setInput(self.image_blob)\n self.detections = self.detector.forward()",
"def process_batch(self, image_batch):\n images = []\n for image_data in image_batch:\n image_resize = cv2.resize(image_data, (0,0), fx=0.5, fy=0.5) #NOTE\n images.append(image_resize)\n\n return np.array(images)",
"def face_detector_preprocess(img):\n input_face_det, scale, padding = resize_image(img[..., ::-1], 128, return_scale_padding=True)\n input_face_det = input_face_det.astype(np.float32) / 127.5 - 1.0\n input_face_det = np.moveaxis(input_face_det, -1, 0)[np.newaxis]\n return input_face_det, scale, padding",
"def run_inference(model: nn.Module,\n model_inputs: Dict[str, torch.Tensor]) -> List:\n result = model(\n return_loss=False,\n points=model_inputs['points'],\n img_metas=model_inputs['img_metas'])\n return [result]",
"def pre_process(self, images: Union[np.ndarray, List]) -> np.ndarray:\n images = validate_image(images)\n image_sizes = []\n image_arr = []\n for image in images:\n image_sizes.append(image.shape)\n image = resize(image,\n height=self.in_h,\n width=self.in_w)\n image = normalize(image)\n image_arr.append(image)\n image_arr = np.array(image_arr)\n return image_arr, image_sizes",
"def process_images(self):\n self.processed_content_image = tf.keras.applications.vgg19.preprocess_input(\n self.content_image)\n self.processed_style_image = tf.keras.applications.vgg19.preprocess_input(\n self.style_image)",
"def preprocess(self, listofimages):\n # transform input\n shape = self.net.blobs['data'].shape\n np_shape = [shape[i] for i in range(len(shape))]\n np_shape[0] = len(listofimages)\n\n data = np.zeros(np_shape)\n\n for i, h in enumerate(listofimages):\n if type(h) is str:\n data[i] = self.transformer.preprocess('data', caffe.io.load_image(h))\n elif type(h) is np.ndarray:\n data[i] = self.transformer.preprocess('data', h)\n\n return data",
"def pre_process_data(input_path: list, cuts: int, shape: int = 32, normalize: bool = True) -> list:\n images = []\n images_uncut = []\n for files_path in input_path:\n\n files = os.listdir(files_path) # TODO paths\n for f in files:\n file_path = f'{files_path}/{f}'\n im_uncut = cv2.imread(file_path)\n im_uncut = cv2.cvtColor(im_uncut, cv2.COLOR_RGB2GRAY)\n images_uncut.append(cv2.resize(im_uncut, (shape * cuts, shape * cuts)))\n x = np.array(images_uncut)\n\n if normalize:\n x_mean = np.mean(x, axis=(0, 1, 2))\n x_std = np.std(x, axis=(0, 1, 2))\n x = (x - x_mean) / (x_std + 1e-9)\n\n for im in x:\n height = im.shape[0]\n width = im.shape[1]\n frac_h = height // cuts\n frac_w = width // cuts\n i = 0\n image = []\n for h in range(cuts):\n for w in range(cuts):\n crop = im[h * frac_h:(h + 1) * frac_h, w * frac_w:(w + 1) * frac_w]\n crop_rehaped = cv2.resize(crop, (shape, shape))\n image.append([crop_rehaped, i, number_to_angle(i, cuts), neighbours(i, cuts)])\n i = i + 1\n images.append(image)\n # return np.array(images) # todo back to array\n return images",
"def extract_detections(self):\n self.rescue_model.setInput(self.human_blob)\n self.predictions = self.rescue_model.forward()",
"def preprocess_image(image, model_image_size):\n #resized_image = cv2.resize(image, tuple(reversed(model_image_size)), cv2.INTER_AREA)\n resized_image = letterbox_resize(image, tuple(reversed(model_image_size)))\n image_data = np.asarray(resized_image).astype('float32')\n image_data = normalize_image(image_data)\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\n return image_data",
"def imagenet_preprocess(image, label):\n i = image\n i = tf.cast(i, tf.float32)\n i = tf.image.resize_with_crop_or_pad(i, 224, 224)\n if model_name == 'ResNet50' or model_name == 'ResNet152':\n i = tf.keras.applications.resnet.preprocess_input(i)\n else:\n i = tf.keras.applications.densenet.preprocess_input(i)\n return (i, label)",
"def pre_analyse():\n t = transform()\n model = modified_resnet50()\n model.load_state_dict(\n torch.load(\n \"model.pth.tar\",\n map_location=torch.device(\"cpu\"),\n )[\"state_dict\"]\n )\n model.eval()\n\n def get_preds(img_path):\n \"\"\"\n Gives labelds and probabilities for a single image\n This is were we preprocess the image, using a function defined in the model class\n \"\"\"\n # load image\n img = Image.open(img_path).convert(\"RGB\")\n # process it\n x = t(img)\n # get in in the right format\n x = Variable(x).unsqueeze(0)\n # predictions\n output = model(x)\n # decode\n output = decode(output.cpu().data.numpy()[0])\n\n # filter\n # return pred, proba\n return output\n\n return get_preds(\"image.jpg\")",
"def infer():\n\n # Create StreamManagerApi object\n stream_manager_api = StreamManagerApi()\n # Use InitManager method init StreamManagerApi\n ret = stream_manager_api.InitManager()\n if ret != 0:\n print(\"Failed to init Stream manager, ret=%s\" % str(ret))\n exit()\n\n # create streams by pipeline config file\n with open(args.pipeline_path, \"rb\") as f:\n pipeline_str = f.read()\n\n # Configuring a stream\n ret = stream_manager_api.CreateMultipleStreams(pipeline_str)\n if ret != 0:\n print(\"Failed to create Stream, ret=%s\" % str(ret))\n exit()\n\n # Construct the input of the stream\n data_input = MxDataInput()\n # Stream_name encoded in UTF-8\n stream_name = args.stream_name.encode()\n print(stream_name)\n predictions = []\n with open(args.label_path, 'rt') as f:\n val_cls = f.read().rstrip(\"\\n\").split(\"\\n\")\n val_cls_dict = {}\n for i, cls in enumerate(val_cls):\n val_cls_dict[i] = cls\n coco_gt = COCO(args.instances_path)\n classs_dict = {}\n cat_ids = coco_gt.loadCats(coco_gt.getCatIds())\n for cat in cat_ids:\n classs_dict[cat[\"name\"]] = cat[\"id\"]\n\n for file_name in os.listdir(args.img_path):\n pred_data = []\n # Gets the Address of each image\n img_id = int(file_name.split('.')[0])\n file_path = args.img_path + file_name\n size = (cv2.imread(file_path)).shape\n\n # Read each photo in turn\n with open(file_path, \"rb\") as f:\n img_data = f.read()\n if not img_data:\n print(f\"read empty data from img:{file_name}\")\n continue\n # The element value img_data\n data_input.data = img_data\n boxes_output, scores_output = send_data_get_output(stream_name, data_input, stream_manager_api)\n pred_data.append({\"boxes\": boxes_output,\n \"box_scores\": scores_output,\n \"img_id\": img_id,\n \"image_shape\": size})\n\n parse_img_infer_result(pred_data[0], predictions, val_cls_dict, classs_dict)\n print(f\"Inferred image:{file_name} success!\")\n\n # Save the result in JSON format\n if not os.path.exists(args.res_path):\n os.makedirs(args.res_path)\n with open(args.res_path + 'predictions_test.json', 'w') as f:\n json.dump(predictions, f)\n stream_manager_api.DestroyAllStreams()",
"def preprocess_image(self, batched_inputs):\n images = [x[\"image\"].to(self.device) for x in batched_inputs]\n images_aug = [x[\"image_color\"].to(self.device) for x in batched_inputs]\n\n images = [self.normalizer(x) for x in images]\n images_aug = [self.normalizer(x) for x in images_aug]\n\n images = ImageList.from_tensors(images,\n self.backbone.size_divisibility)\n images_aug = ImageList.from_tensors(images_aug,\n self.backbone.size_divisibility)\n return images, images_aug",
"def train(self):\r\n faces = [] #empty list for faces\r\n Ids = [] #empty list for Id's\r\n path = f\"{PARENT_PATH}\\\\{DATASET_DIR}\" #dataset path\r\n\r\n #join each and every image paths\r\n image_paths = [os.path.join(path, i) for i in os.listdir(path)]\r\n #print(image_paths)\r\n\r\n for image in image_paths:\r\n face_img = Image.open(image).convert('L') #Pillow Image\r\n np_face = np.array(face_img, 'uint8') #into numpy array - usigned 8 bit -1byte\r\n Id = int(os.path.split(image)[-1].split('.')[1]) #get id from image path\r\n #print(Id)\r\n faces.append(np_face) #append in faces array/list\r\n Ids.append(Id) #append in Ids list/array\r\n\r\n RECOGNIZER.train(faces, np.array(Ids)) #train model using faces and Id (numpy arrays)\r\n RECOGNIZER.save(f\"{PARENT_PATH}\\\\{TRAINED_FILE}\")\r\n\r\n self.pop_window(title=\"Restart Needed!\", msg=\"Training Successful.\\nRestart the app Now.\")\r\n return",
"def inference_input():\n # Decode image into float range [0,1]\n jpegs = tf.placeholder(tf.string, shape=(1), name='input')\n image_buffer = tf.squeeze(jpegs, [0])\n image = tf.image.decode_jpeg(image_buffer, channels=3)\n image = tf.image.convert_image_dtype(image, dtype=tf.float32)\n image = tf.image.central_crop(image, central_fraction=0.875)\n image = tf.expand_dims(image, 0)\n image = tf.image.resize_bilinear(image, [FLAGS.image_size, FLAGS.image_size], align_corners=False)\n image = tf.squeeze(image, [0])\n\n # Rescale the image to [-1,-1]\n image = tf.sub(image, 0.5)\n image = tf.mul(image, 2.0)\n images = tf.expand_dims(image, 0)\n\n return images, jpegs",
"def preprocess(self, img):\n img_ = image.load_img(img, target_size=(299, 299))\n img_ = image.img_to_array(img_)\n img_ = np.expand_dims(img_, axis=0)\n img_ = preprocess_input(img_)\n return img_",
"def transform_fn(net, data, input_content_type, output_content_type):\n # we can use content types to vary input/output handling, but\n # here we just assume json for both\n ctx = mx.cpu() \n batch_size=64\n output=[]\n print (\"Start Parsing input\")\n parsed = json.loads(data)\n print (\"End Parsing input\")\n images=[]\n multisp=False\n \n #Check for Multispectral input type\n if 'type' in parsed.keys():\n if parsed['type'].lower() == 'rgb':\n multisp=False\n job_data=parsed['instances']\n else:\n multisp=True\n encodedBytes=parsed['instances'].encode(\"utf-8\")\n zip_value=base64.b64decode(encodedBytes)\n dump_value=gzip.decompress(zip_value)\n job_data=pickle.loads(dump_value)\n \n print (\"Multispacial\",multisp)\n \n for item in job_data:\n image_data=np.array(item['data'])\n print (\"Input Image Shape:\", image_data.shape)\n images.append ({'data':image_data})\n \n print (\"MultiSP:\", multisp)\n loader=BatchLoader(images,64,ctx,multisp)\n \n for idxb, batch in enumerate(loader.get_batches()):\n preds = nd.argmax(net(batch), axis=1)\n for pred in preds:\n output.append({'data':pred.asnumpy().astype('uint8').tolist() })\n\n response_body = json.dumps({'predictions':output})\n return response_body, output_content_type",
"def inference():\n if request.method == \"POST\":\n data = request.json #\n src_img = np.array(data[\"src\"]).astype(np.uint8) # Parsing data\n ref_img = np.array(data[\"ref\"]).astype(np.uint8) #\n ref_label = int(data[\"ref_label\"]) #\n result = get_inference(src_img, ref_img, ref_label) # Calling helper function\n return jsonify({\"result\": result.tolist()}) # Returning results into json",
"def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool, ) -> List[Dict[str, paddle.Tensor]]:\n input_images = paddle.stack(\n [self.preprocess(x[\"image\"]) for x in batched_input], axis=0)\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input,\n image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"],\n image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None), )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output, )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"], )\n masks = masks > self.mask_threshold\n outputs.append({\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n })\n return outputs",
"def face_detector_process(self, frame):\n frame = self.frame_pre_process(frame)\n\n # Clear Face detector from previous frame\n self.face_detector.clear()\n\n # When we use async IE use buffer by using Queue\n self.face_detector.start_async(frame)\n\n # Predict and return ROI\n rois = self.face_detector.get_roi_proposals(frame)\n\n if self.QUEUE_SIZE_NUM < len(rois):\n log.warning(\"Too many faces for processing.\" \\\n \" Will be processed only %s of %s.\" % \\\n (self.QUEUE_SIZE_NUM, len(rois)))\n rois = rois[:self.QUEUE_SIZE_NUM]\n \n self.rois = rois\n \n return (rois)",
"def predict() -> Any:\n threshold = request.form.get(\"threshold\", type=float)\n source_size = request.form.get(\"source_size\", type=bool)\n images = request.files.getlist(\"images\")\n result = {}\n for image in images:\n input_image = prepare_input(image)\n if input_image is not None:\n output_image = model.predict(input_image, threshold, source_size)\n if output_image is not None:\n result[image.filename] = prepare_output(output_image)\n else:\n result[image.filename] = None\n else:\n result[image.filename] = None\n return result",
"def extract_faces(image_path: str, pk: int):\n image = Image.open(image_path)\n image = np.array(image)\n\n if image.shape[0] <= 0 or image.shape[1] <= 0:\n return None\n\n import mtcnn\n\n # detect faces from image\n face_detector = mtcnn.MTCNN()\n detections = face_detector.detect_faces(image)\n\n if len(detections) < 1:\n return None\n\n from deepface.basemodels.Facenet import InceptionResNetV2\n\n # load InceptionResNet model provided by deepface\n facenet_model = InceptionResNetV2()\n facenet_model.load_weights(get_weights(\"facenet\"))\n\n # normalize faces and get embeddings\n faces = [normalize_face(image, face) for face in detections]\n embeddings = facenet_model.predict(np.vstack(faces), batch_size=len(faces))\n\n for i in range(len(faces)):\n person_id = recognize_person(embeddings[i])\n print(person_id, flush=True)\n face_obj = models.Face.objects.create(\n confidence=detections[i]['confidence'],\n left=detections[i]['box'][0],\n top=detections[i]['box'][1],\n width=detections[i]['box'][2],\n height=detections[i]['box'][3],\n photo_id=pk,\n person_id=person_id\n )\n\n save_embeddings(embeddings[i], face_obj.id, person_id)",
"def preprocess_image(self, inputs):\n raise NotImplementedError('preprocess_image method not implemented.')",
"def input_handler(data, context):\n if context.request_content_type == 'application/x-image':\n payload = data.read()\n\n img = Image.open(io.BytesIO(payload))\n img = img.convert('RGB')\n img = img.resize((IMG_SIZE, IMG_SIZE), Image.NEAREST)\n img_array = image.img_to_array(img)\n img_array = img_array.astype(np.uint8)\n \n img_preprocessed = preprocess_input(img_array)[None, :]\n\n return json.dumps({\"instances\": np.array(img_preprocessed).tolist()})\n else:\n _return_error(415, 'Unsupported content type was \"{}\"'.format(\n context.request_content_type or 'Unknown'))",
"def image_preprocessing(image_buffer, bbox, image_size, is_training):\n if is_training:\n image = _decode_and_random_crop(image_buffer, bbox, image_size)\n image = _normalize(image)\n image = tf.image.random_flip_left_right(image)\n else:\n image = _decode_and_center_crop(image_buffer, image_size)\n image = _normalize(image)\n image = tf.reshape(image, [image_size, image_size, 3])\n return image"
] | [
"0.73400545",
"0.6653662",
"0.65381366",
"0.65293646",
"0.6363785",
"0.6331957",
"0.6321112",
"0.6304423",
"0.62506974",
"0.6235382",
"0.6233115",
"0.6188294",
"0.61358917",
"0.61200666",
"0.6102832",
"0.60749215",
"0.6074793",
"0.6067623",
"0.6028816",
"0.6026498",
"0.60255986",
"0.6024678",
"0.60128784",
"0.6002284",
"0.5999818",
"0.5995797",
"0.59468997",
"0.5944487",
"0.59418935",
"0.5941684"
] | 0.81999916 | 0 |
Postprocess inference result to normalize probabilities and render with labels | def postprocess(self, data):
if self.error is not None:
return [self.error]
# Iterating over inference results to render the normalized probabilities
response = []
for inference_result in data:
softmax_result = inference_result.softmax().asnumpy()
for idx, label in enumerate(self.labels):
response.append({label: float(softmax_result[0][idx])})
return [response] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def inference_preprocess(self):\n return",
"def post(self):\n result = {'status': 'error'}\n\n args = input_parser.parse_args()\n input_data = args['image'].read()\n image = self.model_wrapper._read_image(input_data)\n preds = self.model_wrapper._predict(image)\n\n # Modify this code if the schema is changed\n label_preds = [{'label_id': p[0], 'label': p[1], 'probability': p[2]} for p in [x for x in preds]]\n result['predictions'] = label_preds\n result['status'] = 'ok'\n\n return result",
"def post_process_predictions(self, labels, scene):\n pass",
"def postprocess(self, inference_output):\n logger.info(inference_output)\n return inference_output",
"def make_output_human_readable(\n self, predictions: torch.Tensor\n ) -> Dict[str, torch.Tensor]:\n all_predictions = predictions.cpu().data.numpy()\n if all_predictions.ndim == 3:\n predictions_list = [all_predictions[i] for i in range(all_predictions.shape[0])]\n else:\n predictions_list = [all_predictions]\n all_tags = []\n for predictions in predictions_list:\n outside_index = self.vocab.get_token_index(\"O\", namespace=self.task)\n\n # @AR: Get the thresholded matrix and prepare the prediction sequence\n pred_over_thresh = (predictions >= self.threshold) * predictions\n #print(pred_over_thresh)\n sequence_token_labels = []\n maxxx = numpy.argmax(predictions, axis=-1).tolist()\n\n # @AR: For each label set, check if to apply argmax or sigmoid thresh\n j=0\n for pred in pred_over_thresh:\n num_pred_over_thresh = numpy.count_nonzero(pred)\n if (num_pred_over_thresh == 0) or (num_pred_over_thresh == 1):\n pred_idx_list = [maxxx[j]]\n\n else:\n try:\n outside_position = pred_idx_list.index(outside_index)\n except ValueError:\n outside_position = -1\n # get ranked list\n tuples = [[score, idx] for idx, score in enumerate(pred) if score > self.threshold and idx != outside_position]\n # check for max_heads\n if self.max_heads != 0 and len(tuples) > self.max_heads:\n tuples = tuples[:self.max_heads]\n if len(tuples) == 0:\n tuples = [1.0, outside_position]\n pred_idx_list = [x[1] for x in tuples]\n \n\n sequence_token_labels.append(pred_idx_list)\n j += 1\n\n # @AR: Create the list of tags to append for the output\n tags = []\n for token_labels in sequence_token_labels:\n curr_labels = []\n for token_label in token_labels:\n curr_labels.append(\n self.vocab.get_token_from_index(token_label, namespace=self.task))\n tags.append(curr_labels)\n\n all_tags.append(tags)\n return all_tags",
"def inference():\n if request.method == \"POST\":\n data = request.json #\n src_img = np.array(data[\"src\"]).astype(np.uint8) # Parsing data\n ref_img = np.array(data[\"ref\"]).astype(np.uint8) #\n ref_label = int(data[\"ref_label\"]) #\n result = get_inference(src_img, ref_img, ref_label) # Calling helper function\n return jsonify({\"result\": result.tolist()}) # Returning results into json",
"def predict():\n\n predict_cfg = get_predict_args()\n device = get_device()\n print(device)\n\n # load checkpoint\n ckpt_path = find_ckpt_in_directory(predict_cfg.ckpt)\n ckpt = torch.load(ckpt_path, map_location=device)\n best_iter = ckpt[\"best_iter\"]\n cfg = ckpt[\"cfg\"]\n aspect = cfg[\"aspect\"]\n\n for k, v in cfg.items():\n print(\"{:20} : {:10}\".format(k, str(v)))\n\n eval_batch_size = 64\n\n print(\"Loading data\")\n dev_data = list(beer_reader(cfg[\"dev_path\"]))\n test_data = beer_annotations_reader(cfg[\"test_path\"], aspect=aspect)\n\n print(\"dev\", len(dev_data))\n print(\"test\", len(test_data))\n\n print(\"Loading pre-trained word embeddings\")\n vocab = Vocabulary()\n vectors = load_embeddings(cfg[\"embeddings\"], vocab) # required for vocab\n\n # build model\n model = build_model(cfg[\"model\"], vocab, cfg=cfg)\n\n # load parameters from checkpoint into model\n print(\"Loading saved model..\")\n model.load_state_dict(ckpt[\"state_dict\"])\n model.to(device)\n print(\"Done\")\n\n print(model)\n print_parameters(model)\n\n print(\"Evaluating\")\n dev_eval = evaluate_loss(\n model, dev_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n test_eval = evaluate_loss(\n model, test_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n\n if hasattr(model, \"z\"):\n path = os.path.join(\n cfg[\"save_path\"], \"final_rationales.txt\")\n test_precision, test_macro_prec = evaluate_rationale(\n model, test_data, aspect=aspect, device=device,\n batch_size=eval_batch_size, path=path)\n else:\n test_precision = 0.\n test_macro_prec = 0.\n test_eval[\"precision\"] = test_precision\n test_eval[\"macro_precision\"] = test_macro_prec\n\n dev_s = make_kv_string(dev_eval)\n test_s = make_kv_string(test_eval)\n\n print(\"best model iter {:d} dev {} test {}\".format(\n best_iter, dev_s, test_s))",
"def postprocess(self, prediction_dict, **params):\r\n pass",
"def inference(self):\r\n\t\tfor partition, loader in self.loaders.items():\r\n\t\t\tavg_loss, (y, y_hat), post, attentions, tags = self.eval_loader(\r\n\t\t\t\tloader)\r\n\t\t\tself.preds[partition] = {\r\n\t\t\t\t'tag': tags,\r\n\t\t\t\t'y': y,\r\n\t\t\t\t'y_hat': y_hat,\r\n\t\t\t\t# 'posteriors': post,\r\n\t\t\t\t# 'attentions': attentions\r\n\t\t\t}",
"def predict_probability_model(*args):\n final_data = None\n any_null = validate_none(args)\n if any_null:\n final_data = transform_fields(args[-3:])\n final_data = list(args[0:5]) + final_data\n predicted = test_model(final_data)\n converts, styles = user_converts(predicted)\n\n return [f'{predicted} %', converts] + styles",
"def postprocess(output):\n text=''\n order = output.argsort()[::-1][:6]\n # print('\\n------- predictions --------')\n for i in range(1):\n # print ('prediction ' + str(i) + ' (probability ' + str(output[order[i]]*100) + '%) is ' + gNetworkCategories[order[i]] + ' label index is: ' + str(order[i]) )\n text=text+str(gNetworkCategories[order[i]])\n\n return text",
"def predict():\n\n\n json_payload = request.json\n #LOG.info(f\"JSON payload: %s\" %json_payload)\n inference_payload = pd.DataFrame(json_payload)\n #LOG.info(\"inference payload DataFrame: %s\" %inference_payload)\n scaled_payload = scale(inference_payload)\n prediction = list(clf.predict(scaled_payload))\n return jsonify({'prediction': prediction})",
"def predict(image_data):\n PAYLOAD = {}\n PAYLOAD[\"timestamp\"] = str(datetime.now())\n PAYLOAD[\"inference-type\"] = \"image-classification\"\n PAYLOAD[\"inference-description\"] = \"Top {} predictions with score {} or above \".format(\n config_utils.MAX_NO_OF_RESULTS, config_utils.SCORE_THRESHOLD\n )\n PAYLOAD[\"inference-results\"] = []\n\n try:\n # Run DLR to perform inference with DLC optimized model\n model_output = dlr_model.run(image_data)\n config_utils.logger.info(\"pred shape: '{}'.\".format(model_output[0][0].shape)) \n probabilities = softmax(model_output[0][0])\n config_utils.logger.info(\"pred shape softmax: '{}'.\".format(probabilities.shape)) \n sort_classes_by_probability = argsort(probabilities)[::-1]\n\n config_utils.logger.info(\"pred classes: '{}'.\".format(sort_classes_by_probability[: config_utils.MAX_NO_OF_RESULTS])) \n\n for i in sort_classes_by_probability[: config_utils.MAX_NO_OF_RESULTS]:\n if probabilities[i] >= config_utils.SCORE_THRESHOLD:\n result = {\"Label\": str(synset[i]), \"Score\": str(probabilities[i])}\n PAYLOAD[\"inference-results\"].append(result)\n\n config_utils.logger.info(dumps(PAYLOAD))\n\n if config_utils.TOPIC.strip() != \"\":\n ipc_utils.IPCUtils().publish_results_to_cloud(PAYLOAD)\n else:\n config_utils.logger.info(\"No topic set to publish the inference results to the cloud.\")\n\n except Exception as e:\n config_utils.logger.error(\"Exception occured during prediction: {}\".format(e))",
"def post_process_predictions(self, labels: Labels, scene: Scene) -> Labels:\n return labels",
"def inference():\n\n sents = request.get_json(force=True)['sents']\n\n vecs = tokenize_inputs(sents)\n results = model(vecs)\n\n result = dict()\n result['pred'] = [str(sample.numpy()[0]) for sample in results]\n \n response = flask.Response()\n response.headers.add(\"Access-Control-Allow-Origin\", \"*\")\n\n print(result)\n\n return result",
"def prepare_for_predict(self) -> None:\n _, self.all_labels_embed = self._create_all_labels_embed()",
"def prediction_processing(predictions, labels, threshold, step_nb):\n new_labels = []\n new_predictions = []\n number_sequences = step_nb//50\n\n for k in range(len(labels)//number_sequences):\n total_prediction = 0\n isLabelTrue = labels[number_sequences*k]\n for i in range(number_sequences):\n total_prediction += (1/predictions[number_sequences*k+i])\n if not(isLabelTrue == (labels[number_sequences*k+i])):\n logger.error('Problem.')\n if total_prediction > threshold:\n total_prediction = False\n else:\n total_prediction = True\n new_labels.append(isLabelTrue)\n new_predictions.append(total_prediction)\n\n recall_1 = recall_score(new_labels, new_predictions)\n recall_0 = recall_score(new_labels, new_predictions, pos_label=0)\n precision_1 = precision_score(new_labels, new_predictions)\n precision_0 = precision_score(new_labels, new_predictions, pos_label=0)\n return((recall_1, recall_0, precision_1, precision_0), new_predictions, new_labels)",
"def _postprocess(self, output: Dict[str, np.ndarray]):\n # Slice to remove padding, omitting initial [CLS] and final [SEP]\n slicer = slice(1, output.pop(\"ntok\") - 1)\n output[\"tokens\"] = self.tokenizer.convert_ids_to_tokens(\n output.pop(\"input_ids\")[slicer])\n probas = output.pop(\"probas\")\n\n # Predictions at every position, regardless of masking.\n output[\"pred_tokens\"] = self._get_topk_tokens(probas[slicer]) # pytype: disable=container-type-mismatch\n\n return output",
"def chainercv_postprocess_change_labels(results):\n bboxes, labels, scores = results\n # loop over the results and add them to the list of\n # returned predictions\n classes = []\n boxes = []\n confs = []\n for index, bbox in enumerate(bboxes[0]):\n classes.append(str(voc_bbox_label_names[int(labels[0][index])]))\n boxes.append([bbox[0], bbox[1], bbox[2], bbox[3]])\n confs.append(scores[0][index])\n\n return (boxes, classes, confs)",
"def postprocess(self, predicted_output, original_input=None, stats=None,\n **kwargs):\n pass",
"def postprocess_for_inference(self,\n prediction: Dict[str, torch.Tensor],\n *args, **kwargs,\n ) -> Dict[str, torch.Tensor]:\n return {\"pred_seg\": self.logits_convert_fn(prediction[\"seg_logits\"])}",
"def main(self, data):\n\t\ttokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\", do_lower_case=True)\n\t\teval_features = self.get_features(data, self.labels, tokenizer, self.max_seq_length)\n\t\tlabel, prob = self.predict(eval_features)\n\t\treturn label, prob",
"def postprocess(self, data):\n all_predictions, all_nbest_json, scores_diff_json = predictions(self._dev_dataset,\n data,\n self._tokenizer)\n\n if len(all_nbest_json) == 0 or len(all_nbest_json[0]) == 0:\n return [{'predicted': '',\n 'confidence': 0}]\n\n return [{'predicted': all_nbest_json[0][0]['text'],\n 'confidence': all_nbest_json[0][0]['probability']}]",
"def predict_proba(self):\n ...",
"def predict():\n # initialize the data dictionary that will be returned from the\n # view\n data = {\"success\": False}\n\n # ensure an image was properly uploaded to our endpoint\n if flask.request.method == \"POST\":\n if flask.request.files.get(\"image\"):\n # read the image in PIL formats\n img = flask.request.files[\"image\"].read()\n img = Image.open(io.BytesIO(img))\n\n # preprocess the image and prepare it for classification\n img = predictor.prepare_image(img, target_size=(299, 299), http_request=True)\n\n # classify the input image and then initialize the list\n # of predictions to return to the client\n predictions = predictor.model.predict(img)\n\n dog_label = predictor.decode_prediction(np.argmax(predictions, axis=-1)[0])\n print(dog_label)\n result = {\"label\" : str(dog_label), \"probability\" : float(np.max(predictions[0]))}\n data[\"predictions\"] = result\n\n # indicate that the request was a success\n data[\"success\"] = True\n\n # return the data dictionary as a JSON response\n return flask.jsonify(data)",
"def _postprocess(self, preds):\n ntok = preds.pop(\"ntok\")\n ids = preds.pop(\"input_ids\")[:ntok]\n preds[\"tokens\"] = self._detokenize(ids)\n\n # Decode predicted top-k tokens.\n # token_topk_preds will be a List[List[(word, prob)]]\n # Initialize prediction for 0th token as N/A.\n token_topk_preds = [[(\"N/A\", 1.)]]\n pred_ids = preds.pop(\"top_k_indices\")[:ntok] # <int>[num_tokens, k]\n pred_probs = preds.pop(\"top_k_probs\")[:ntok] # <float32>[num_tokens, k]\n for token_pred_ids, token_pred_probs in zip(pred_ids, pred_probs):\n token_pred_words = self._detokenize(token_pred_ids)\n token_topk_preds.append(list(zip(token_pred_words, token_pred_probs)))\n preds[\"pred_tokens\"] = token_topk_preds\n\n # Process attention.\n for key in preds:\n if not re.match(r\"layer_(\\d+)/attention\", key):\n continue\n # Select only real tokens, since most of this matrix is padding.\n # <float32>[num_heads, max_seq_length, max_seq_length]\n # -> <float32>[num_heads, num_tokens, num_tokens]\n preds[key] = preds[key][:, :ntok, :ntok].transpose((0, 2, 1))\n # Make a copy of this array to avoid memory leaks, since NumPy otherwise\n # keeps a pointer around that prevents the source array from being GCed.\n preds[key] = preds[key].copy()\n\n return preds",
"def chainercv_postprocess_pack_each_item(results):\n bboxes, labels, scores = results\n\n # loop over the results and add them to the list of\n # returned predictions\n predictions = []\n for index, bbox in enumerate(bboxes[0]):\n r = {\"class\": str(voc_bbox_label_names[int(labels[0][index])]),\n \"bbox\": {\n \"ymin\": str(bbox[0]),\n \"xmin\": str(bbox[1]),\n \"ymax\": str(bbox[2]),\n \"xmax\": str(bbox[3])\n },\n \"probability\": str(scores[0][index])\n }\n predictions.append(r)\n\n return predictions",
"def inference(self, inputs):\n\n input_ids = torch.tensor([inputs[\"head_ids\"]], dtype=torch.long).to(self.device)\n attention_masks = torch.tensor([inputs[\"attention_masks\"]], dtype=torch.bool).to(self.device)\n \n # Handling inference for sequence_classification.\n with torch.no_grad():\n output = self.model(input_ids, attention_masks)\n predict_label = output[0].argmax(dim=2)\n predict_string = self.tokenizer.decode_sent(input_ids[0].detach().cpu().numpy(), predict_label[0].detach().cpu().numpy())\n\n logger.info(\"Model predicted: '%s'\", predict_string)\n return [{'predict': predict_string}]",
"def postprocess_model_outputs(self, predictions, expected):\n\n predictions = {k: t.numpy() for k, t in predictions.items()}\n\n return predictions, expected",
"def compute(self) -> Tensor:\n\n if self.samples:\n return self.average_precisions.float() / self.total\n else:\n # pred_image_indices = torch.cat(self.pred_image_indices, dim=0)\n pred_probs = torch.cat(self.pred_probs, dim=0)\n pred_labels = torch.cat(self.pred_labels, dim=0)\n pred_bboxes = torch.cat(self.pred_bboxes, dim=0)\n\n # target_image_indices = torch.cat(self.target_image_indices, dim=0)\n target_labels = torch.cat(self.target_labels, dim=0)\n target_bboxes = torch.cat(self.target_bboxes, dim=0)\n\n # pred_index = torch.nonzero((pred_labels == 1))\n # pred_probs = pred_probs[pred_index]\n # pred_bboxes = pred_bboxes[pred_index]\n # target_index = torch.nonzero((target_labels == 1))\n # target_bboxes = target_bboxes[target_index]\n\n\n # _, index_sorted = torch.sort(pred_probs)\n # pred_bboxes = pred_bboxes[index_sorted].cpu().detach().numpy()\n # target_bboxes = target_bboxes.cpu().detach().numpy()\n pred_probs = pred_probs.cpu().detach().numpy()\n pred_labels = pred_labels.cpu().detach().numpy()\n pred_bboxes = pred_bboxes.cpu().detach().numpy()\n target_labels = target_labels.cpu().detach().numpy()\n target_bboxes = target_bboxes.cpu().detach().numpy()\n\n pred_probs = pred_probs[pred_labels == 1]\n pred_bboxes = pred_bboxes[pred_labels == 1]\n target_bboxes = target_bboxes[target_labels == 1]\n\n preds_sorted_idx = np.argsort(pred_probs)[::-1]\n pred_bboxes = pred_bboxes[preds_sorted_idx]\n\n x, y = calculate_precision_recall(target_bboxes, pred_bboxes)\n\n if len(x) >= 2:\n return auc(x, y)\n else:\n return 0\n\n # return mean_average_precision(\n # pred_image_indices,\n # pred_probs,\n # pred_labels,\n # pred_bboxes,\n # target_image_indices,\n # target_labels,\n # target_bboxes,\n # self.iou_threshold,\n # self.ap_calculation,\n # )"
] | [
"0.6822541",
"0.6789945",
"0.6525151",
"0.6503865",
"0.635317",
"0.63490814",
"0.63463706",
"0.6325388",
"0.6312468",
"0.6298555",
"0.627382",
"0.62542534",
"0.6231594",
"0.62100625",
"0.6208489",
"0.61715907",
"0.6155044",
"0.61538804",
"0.6099723",
"0.6094014",
"0.60685134",
"0.6002145",
"0.5999253",
"0.5967611",
"0.59581786",
"0.5954968",
"0.5948356",
"0.59454316",
"0.59398514",
"0.59373987"
] | 0.7199529 | 0 |
Get Mapbox access token from arg or environment | def _get_token(token=None):
if token is not None:
return token
else:
return os.environ.get("MAPBOX_ACCESS_TOKEN") or os.environ.get(
"MapboxAccessToken"
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def access_token(*args, **kwargs):\n return None",
"def _get_api():\n return os.environ.get(\"MAPBOX_API\", \"https://api.mapbox.com\")",
"def get_access_token() -> str:\n\n gcloud_access_token = (\n subprocess.check_output(\n \"gcloud auth print-access-token\".split(\" \")).decode().rstrip(\"\\n\"))\n\n return gcloud_access_token",
"def get_personal_access_token() -> str:\n return getpass.getpass(\"Enter SurveyMonkey API personal access token: \")",
"def build_access_token_guest():\n return do_build_access_token(tenant_id='guest_tenant_id')",
"def get_mgnt_token():\n # Get the Environment variables from App Container.\n app_auth_token = os.getenv('APP_AUTHENTICATION_TOKEN')\n app_endpoint_ip = os.getenv('APPS_API_ENDPOINT_IP')\n app_endpoint_port = os.getenv('APPS_API_ENDPOINT_PORT')\n\n\n # Initialize the client.\n app_cli = AppClient(app_auth_token, app_endpoint_ip, app_endpoint_port)\n app_cli.config.disable_logging()\n\n # Get the management access token.\n token = app_cli.token_management\n mgmt_auth_token = token.create_management_access_token()\n return mgmt_auth_token",
"def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")",
"def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")",
"def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")",
"def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")",
"def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")",
"def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")",
"def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")",
"def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")",
"def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")",
"def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")",
"def access_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_token\")",
"def get_project_access_token(*args, **kwargs):\n return get_project_access_token_async(*args, **kwargs).get_result()",
"def get_access_token(self, request) -> str or Exception:\n pass",
"def get_token(self, access_token):\n if access_token:\n return access_token\n elif self.default_access_token:\n return self.default_access_token\n else:\n return ''",
"def get_token():\n token = getpass.getpass('Paste in your RDR API token and press Enter:')\n return {'Authorization': 'token ' + token}",
"def token():\n return os.environ.get('TOKEN', None)",
"def get_access_token(*args, **kwargs):\n return get_access_token_async(*args, **kwargs).get_result()",
"def get_request_token():\n oauth = OAuth1(CLIENT_KEY, client_secret=CLIENT_SECRET)\n response = requests.post(REQUEST_TOKEN_URL, auth=oauth)\n credentials = urlparse.parse_qs(response.content)\n\n request_token = credentials.get(\"oauth_token\")[0]\n request_secret = credentials.get(\"oauth_token_secret\")[0]\n return request_token, request_secret",
"def get_oauth_token():\n return session.get('remote_oauth')",
"def build_access_token():\n return do_build_access_token(tenant_id='intility_tenant_id')",
"def get_auth_token():\n if CFG.auth_enabled:\n auth_token = get_keystone_token()\n else:\n auth_token = 'notrealtoken'\n\n return auth_token",
"def get_global_access_token(self) -> str:\n headers = apps.create_jwt_headers(\n private_key_pem=self.private_pem_key, app_id=self.app_id, expire_in=600 # Max allowed: 60*10 (10 minutes)\n )\n url = f\"https://api.github.com/app/installations/{self.installation_id}/access_tokens\"\n response = requests.post(url=url, headers=headers)\n if response.status_code != 201:\n raise Exception(\n \"Failed to get the global access token. \"\n f\"Status code: {response.status_code} \"\n f\"Response: {response.json()} \"\n )\n return response.json()[\"token\"]",
"def get_token():\n return session.get('microsoft_token')",
"def get_token():\n return session.get('microsoft_token')"
] | [
"0.6571186",
"0.641618",
"0.6319432",
"0.6312531",
"0.62517345",
"0.62223494",
"0.61801493",
"0.61801493",
"0.61801493",
"0.61801493",
"0.61801493",
"0.61801493",
"0.61801493",
"0.61801493",
"0.61801493",
"0.61801493",
"0.61801493",
"0.61220384",
"0.61139506",
"0.60954535",
"0.6009204",
"0.6000249",
"0.5946743",
"0.587041",
"0.58497536",
"0.5848139",
"0.5840146",
"0.5773087",
"0.5762624",
"0.5762624"
] | 0.7944125 | 0 |
Create a new tileset with a recipe. $ tilesets create is in the form of username.handle for example "mapbox.neattileset". The handle may only include "" or "_" special characters. | def create(
tileset, recipe, name=None, description=None, privacy=None, token=None, indent=None
):
mapbox_api = _get_api()
mapbox_token = _get_token(token)
url = "{0}/tilesets/v1/{1}?access_token={2}".format(
mapbox_api, tileset, mapbox_token
)
body = {}
body["name"] = name or ""
body["description"] = description or ""
if privacy:
body["private"] = True if privacy == "private" else False
if not utils.validate_tileset_id(tileset):
raise errors.TilesetNameError
if recipe:
with open(recipe) as json_recipe:
body["recipe"] = json.load(json_recipe)
r = requests.post(url, json=body)
click.echo(json.dumps(r.json(), indent=indent)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_tile(self, name):\n return self.subgrids[name[0:2]].tilesys.create_tile(name)",
"def create_mapset(self, mapset, dbase=None, location=None):\n module = 'g.c.mapset'\n gs.run_command(module, mapset=mapset, dbase=dbase, location=location)",
"def create_mapset(self, mapset, dbase=None, location=None):\n module = 'g.mapset'\n gs.run_command(module, flags='c', mapset=mapset, dbase=dbase, location=location)",
"def _create_hotkey_set():\n message_str = 'You must use a custom hotkey profile.\\n\\n' \\\n 'To continue adding Mimic hotkeys, switch \\n' \\\n 'to a custom hotkey set in the Hotkey Editor\\n' \\\n 'or create a new profile below. \\n'\n\n user_input = pm.promptDialog(\n title='New Hotkey Profile',\n message=message_str,\n messageAlign='center',\n button=['Cancel', 'Create'],\n defaultButton='Create',\n text='Mimic_Hotkeys',\n style='text',\n cancelButton='Cancel',\n dismissString='Cancel')\n\n if user_input == 'Create':\n hotkey_set_name = pm.promptDialog(query=True, text=True)\n hotkey_set_name_filtered = _filter_hotkey_set_name(hotkey_set_name)\n pm.hotkeySet(hotkey_set_name_filtered, current=True)\n print('New Hotkey Set created: {}'.format(hotkey_set_name_filtered))\n return True",
"def add_tile(self, input_name, multiples, name=None):\n return self._build_op('Tile', [input_name, multiples], name=name)",
"def create_sets(\n path: tuple,\n maps_ath: str,\n gt_maps_path: str,\n ds_index: int = 0,\n skip_black: bool = True,\n skip_water: bool = True,\n skip_no_class: bool = True,\n):\n maps = get_maps(maps_ath, MAPS_EXT)\n gt_maps = get_maps(gt_maps_path, GT_MAPS_EXT)\n logger.info(\n \"Found %i aerial maps and %i ground truth maps.\", len(\n maps), len(gt_maps)\n )\n with tqdm(total=len(maps), desc=\"Maps\") as pbar:\n for m in maps:\n try:\n ortho_map = Image.open(m)\n gt_map = Image.open(get_gt_map(m, gt_maps))\n\n if ortho_map.size == gt_map.size:\n ortho_map_cv2 = pil_to_opencv(ortho_map)\n gt_map_cv2 = pil_to_opencv(gt_map)\n boxes = gen_crop_area(\n SET_RESOLUTION[0], SET_RESOLUTION[1], ortho_map.size\n )\n center_points = gen_center_points(\n SET_RESOLUTION[0], SET_RESOLUTION[1], ortho_map.size\n )\n with tqdm(\n total=len(boxes),\n leave=False,\n desc=\"Sets for {}\".format(os.path.basename(m)),\n ) as pbar2:\n for b in boxes:\n map_crop = ortho_map.crop(b)\n gt_map_crop = gt_map.crop(b)\n\n if add_to_set(\n map_crop,\n gt_map_crop,\n skip_black=skip_black,\n skip_water=skip_water,\n skip_no_class=skip_no_class,\n ):\n map_fn = os.path.join(\n path[1], \"{}_x.png\".format(ds_index)\n )\n gt_map_fn = os.path.join(\n path[2], \"{}_y.png\".format(ds_index)\n )\n map_crop.save(map_fn)\n gray_gt_map_crop = reduce_and_grayscale(\n gt_map_crop)\n gray_gt_map_crop.save(gt_map_fn)\n ds_index += 1\n\n pbar2.set_description(\n \"Sets for {}(index: {})\".format(\n os.path.basename(m), ds_index\n )\n )\n pbar2.update()\n else:\n continue\n except Exception as e:\n logger.error(\"Error occurred while creating set: %s\", e)\n logger.error(\"Skipping %s\", m)\n pbar.update()",
"def _create(self, name):\n command = [\n 'ipset create -exist ' + name + ' hash:net family inet maxelem 536870912',\n ]\n self.__run(command)",
"def build_tiles(cls):\n\n LOGGER.debug(\"Building tiles\")\n\n for tile_id in tiledata.TILE_DATA:\n if not Tile.tile_factory(tile_id):\n LOGGER.error(\"Could not construct tile with ID %d\", tile_id)\n sys.exit(1)",
"def create_tile(self, name=None, x=None, y=None):\n\n # use the x and y coordinates for specifing the tile\n if x is not None and y is not None and name is None:\n llx, lly = self.round_xy2lowerleft(x, y)\n # use the tile name for specifing the tile\n elif name is not None and x is None and y is None:\n llx, lly = self.tilename2lowerleft(name)\n else:\n raise AttributeError('\"name\" or \"x\"&\"y\" must be defined!')\n\n # get name of tile (assures long-form of tilename, even if short-form\n # is given)\n name = self._encode_tilename(llx, lly)\n # set True if land in the tile\n covers_land = self.check_tile_covers_land(tilename=name)\n\n return Equi7Tile(self.core, name, llx, lly, covers_land=covers_land)",
"def create_recipe(request, pk):\n recipeform = RecipeForm()\n IngredientFormSet = formset_factory(IngredientForm)\n InstructionFormSet = formset_factory(InstructionForm)\n cookbook = CookBook.objects.get(pk=pk)\n if request.method == \"POST\":\n recipeform = RecipeForm(request.POST, request.FILES)\n ingredientformset = IngredientFormSet(request.POST)\n instructionformset = InstructionFormSet(request.POST)\n if recipeform.is_valid() and ingredientformset.is_valid() and instructionformset.is_valid():\n new_ingredients = []\n picture = recipeform['image']\n for letter in picture:\n if letter in [' ', '20', '%']:\n letter.replace(letter, '_')\n new_recipe = Recipe(\n user=request.user,\n cookbook=cookbook,\n title=recipeform.cleaned_data['title'],\n image=picture,\n prep_time=recipeform.cleaned_data['prep_time'],\n cook_time=recipeform.cleaned_data['cook_time'],\n tags=recipeform.cleaned_data['tags'],\n )\n new_recipe.save()\n for ingredient_form in ingredientformset:\n description = ingredient_form.cleaned_data['ingredient']\n if ingredient_form:\n new_ingredients.append(Ingredient.objects.create(recipe=new_recipe, ingredient=description))\n Instruction.objects.create(recipe=new_recipe, direction=request.POST.get('direction'))\n return HttpResponseRedirect(reverse('list_cookbooks'))\n else:\n recipe_form = RecipeForm()\n ingredient_form_set = IngredientFormSet()\n instruction_form_set = InstructionFormSet()\n return render(request, 'cookbook/recipe_form.html', {'recipe_form': recipe_form,\n 'ingredient_formset': ingredient_form_set,\n 'instruction_formset': instruction_form_set})",
"def create_tiles(self, zoom):\n # Compute the tile x-y-z index range for the rasterlayer for this zoomlevel\n bbox = self.rasterlayer.extent()\n indexrange = tiler.tile_index_range(bbox, zoom)\n\n # Compute scale of tiles for this zoomlevel\n tilescale = tiler.tile_scale(zoom)\n\n # Count the number of tiles that are required to cover the raster at this zoomlevel\n nr_of_tiles = (indexrange[2] - indexrange[0] + 1) * (indexrange[3] - indexrange[1] + 1)\n\n # Create destination raster file\n self.log('Snapping dataset to zoom level {0}'.format(zoom))\n\n bounds = tiler.tile_bounds(indexrange[0], indexrange[1], zoom)\n sizex = (indexrange[2] - indexrange[0] + 1) * self.tilesize\n sizey = (indexrange[3] - indexrange[1] + 1) * self.tilesize\n dest_file = os.path.join(self.tmpdir, 'djangowarpedraster' + str(zoom) + '.tif')\n\n snapped_dataset = self.dataset.warp({\n 'name': dest_file,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'width': sizex,\n 'height': sizey,\n })\n\n self.log('Creating {0} tiles for zoom {1}.'.format(nr_of_tiles, zoom))\n\n counter = 0\n for tilex in range(indexrange[0], indexrange[2] + 1):\n for tiley in range(indexrange[1], indexrange[3] + 1):\n # Log progress\n counter += 1\n if counter % 250 == 0:\n self.log('{0} tiles created at zoom {1}'.format(counter, zoom))\n\n # Calculate raster tile origin\n bounds = tiler.tile_bounds(tilex, tiley, zoom)\n\n # Construct band data arrays\n pixeloffset = (\n (tilex - indexrange[0]) * self.tilesize,\n (tiley - indexrange[1]) * self.tilesize\n )\n\n band_data = [\n {\n 'data': band.data(offset=pixeloffset, size=(self.tilesize, self.tilesize)),\n 'nodata_value': band.nodata_value\n } for band in snapped_dataset.bands\n ]\n\n # Add tile data to histogram\n if zoom == self.max_zoom:\n self.push_histogram(band_data)\n\n # Warp source raster into this tile (in memory)\n dest = GDALRaster({\n 'width': self.tilesize,\n 'height': self.tilesize,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'srid': WEB_MERCATOR_SRID,\n 'datatype': snapped_dataset.bands[0].datatype(),\n 'bands': band_data,\n })\n\n # Store tile\n RasterTile.objects.create(\n rast=dest,\n rasterlayer=self.rasterlayer,\n tilex=tilex,\n tiley=tiley,\n tilez=zoom\n )\n\n # Store histogram data\n if zoom == self.max_zoom:\n bandmetas = RasterLayerBandMetadata.objects.filter(rasterlayer=self.rasterlayer)\n for bandmeta in bandmetas:\n bandmeta.hist_values = self.hist_values[bandmeta.band].tolist()\n bandmeta.save()\n\n # Remove snapped dataset\n self.log('Removing snapped dataset.', zoom=zoom)\n snapped_dataset = None\n os.remove(dest_file)",
"def create(self):\n self._finish_creation_settings()\n return self.project.create_recipe(self.recipe_proto, self.creation_settings)",
"def create(configsetname):\n cnfset = configsetPath(configsetname)\n os.mkdir(cnfset)\n return None",
"def create_recipe(*, recipe_in: RecipeCreate) -> dict:\n new_entry_id = len(RECIPES) + 1\n recipe_entry = Recipe(\n id=new_entry_id,\n label=recipe_in.label,\n source=recipe_in.source,\n url=recipe_in.url,\n )\n RECIPES.append(recipe_entry.dict())\n\n return recipe_entry",
"def tile_set():\n TILES = {\n \"ocean\":\"~\"\n ,\"rock\":\"R\"\n ,\"mountain\":\"M\"\n ,\"player\":\"X\"\n ,\"end\":\"⋆\"\n ,\"npc\":\"I\"\n ,\"cave\":\"C\"\n ,\"dirt\":\"+\"\n ,\"sign\":\"!\"\n }\n\n return TILES",
"def generate_tile(self, tms_x, tms_y, tms_z, arguments):\n pass",
"def create(self, dataset_name, labels=None, driver=None, attributes=None, ontology_ids=None,\n checkout=False) -> entities.Dataset:\n # labels to list\n if labels is not None:\n if not all(isinstance(label, entities.Label) for label in labels):\n labels = entities.Dataset.serialize_labels(labels)\n else:\n labels = list()\n # get creator from token\n payload = {'name': dataset_name,\n 'projects': [self.project.id]}\n if driver is not None:\n payload['driver'] = driver\n success, response = self._client_api.gen_request(req_type='post',\n path='/datasets',\n json_req=payload)\n if success:\n dataset = entities.Dataset.from_json(client_api=self._client_api,\n _json=response.json(),\n datasets=self,\n project=self.project)\n # create ontology and recipe\n dataset = dataset.recipes.create(ontology_ids=ontology_ids, labels=labels, attributes=attributes).dataset\n # # patch recipe to dataset\n # dataset = self.update(dataset=dataset, system_metadata=True)\n else:\n raise exceptions.PlatformException(response)\n logger.info('Dataset was created successfully. Dataset id: {}'.format(dataset.id))\n assert isinstance(dataset, entities.Dataset)\n if checkout:\n self.checkout(dataset=dataset)\n return dataset",
"def make_network_set(name, networkUris=[]):\n\n return {\n 'name': name,\n 'type': 'network-set',\n 'nativeNetworkUri': None,\n 'networkUris': networkUris[:],\n 'connectionTemplateUri': None}",
"def createNewSetup(self, show, sequence, beat=\"p\"):\n importer = DrawingImporter(show, sequence, beat,\n [{\"imageFile\":DrawingImporter.BLANK_SETUP, \"useClearCompImage\": \"1\"}],\n setupCallback=self.feedImportedSetups, saveBlankMultitrack=True)\n\n recipies = importer.getNewSetups()\n mode = Mode(show, sequence)\n properties = ET.fromstring(recipies[0]).find('Properties').attrib\n properties['frame'] = '0001'\n multiTrackFile = mode.get('[recipeMultiTrackFile]', properties)\n\n compFile = mode.get('[recipeCompedFile]', properties)\n self.addFeedback(\"reloadImages\", [compFile])\n\n multiTrack = self.fileServiceLocal.loadTextFile(multiTrackFile)\n\n data = []\n data.append('<Recipies>')\n data.append('<Setup show=\"%(show)s\" sequence=\"%(sequence)s\" beat=\"%(beat)s\" setup=\"%(setup)s\" version=\"%(version)s\">' % properties)\n data.append(multiTrack + \"</Setup>\" + \"</Recipies>\")\n dataString = \"\".join(data)\n\n self.feedReloadSetupsMultiTracks(dataString)",
"def duplicateCreateBlankSetup(self, *arg, **properties):\n recipe = CopySetup.createBlankSetupBookedMatchingSetupRange(properties,\n setupCallback=self.feedDuplicatedSetups,\n renderCallback=self.__renderCallback,\n multiTrackCallback=self.feedReloadSetupsMultiTracks,\n username=properties.get('username', ''))\n return recipe",
"def create_recipe(current_user):\n data = request.get_json()\n\n try:\n for item in data:\n new_recipe = Recipe(\n name=item['name'],\n text=item['text'],\n author=current_user\n )\n for ingredient_item in item['ingredients']:\n # check for an existing ingredient\n new_ingredient = Ingredient.query.filter(Ingredient.name.ilike(ingredient_item)).first()\n if not new_ingredient:\n new_ingredient = Ingredient(name=ingredient_item)\n db.session.add(new_ingredient)\n db.session.commit()\n\n # either way create a relationship\n new_recipe.used.append(new_ingredient)\n \n db.session.commit()\n except:\n return jsonify({'message': 'Invalid or missing attributes'}), 400\n\n\n return jsonify({'message': 'Recipe/s successfully created'})",
"def load_recipes_from_test_set(cls, args):\n cls._recipes = Dataset().load_test(\n use_full_test_set=args.use_full_test_set,\n use_english=args.use_english,\n use_english_intelligible=args.use_english_intelligible,\n use_gold=args.use_gold)\n cls._add_indices_to_recipes()\n cls._initialize_recipes_status()\n logging.info(\"Recipes loaded.\")",
"def create(data):\n \n return Setlist(\n list_id = data['id'],\n name = data['name'],\n items = data['num_sets'])",
"def create_new_recipe(cls, user_id, recipe_title, instructions, source=''):\n\n new_recipe = Recipe(user_id=user_id, recipe_title=recipe_title, instructions=instructions, source=source)\n\n db.session.add(new_recipe)\n db.session.commit()\n\n return new_recipe",
"def create(self, spec, force_cache=False, image_dir=\"~/.hyperkit\"):",
"def create_dataset(client: DatasetClient, name: str, props: dict,\n dataset_type: str, override: bool = True):\n if override:\n response = client.datasets.list()\n datasets = {r.name: r.dataset_id for r in response}\n if name in datasets:\n client.datasets.delete(datasets[name])\n response = client.datasets.create(name, dataset_type, props=props)\n dataset_id = response.dataset_id\n return dataset_id",
"def test_recipes_create(self):\n app = self.create_app()\n c = app.test_client()\n\n # test if authorization is required to create a recipe\n rv = c.get('/recipes/create')\n self.assertRedirects(rv, \"/auth/login\")\n\n # test recipe page\n register(c, app.config[\"USERNAME\"], app.config[\"PASSWORD\"])\n login(c, app.config[\"USERNAME\"], app.config[\"PASSWORD\"])\n c.get('/recipes/create')\n self.assert_template_used(\"recipes/create.html\")\n\n # test adding recipe\n recipe = {'author_id': \"unittest\", 'title': \"recipe_unittest2\", 'body': \"Empty body\",\n 'servings': 4, 'tag': \"dessert\", 'ingredients': [{'ingName': \"ing_unittest3_solid\", 'quantity': 180, 'portion': 'g'}, {\n 'ingName': \"ing_unittest1_liquid\", 'quantity': 2, 'portion': 'cup'}]}\n with app.app_context():\n create_recipe(c, recipe)\n self.assert_template_used(\"recipes/index.html\")",
"def _create_tile(cls, onnx_node, inputs, opset_version):\n # we move several inputs to singa's attribuates\n # and mark them so we don't use them when we run this operator\n repeats = tensor.to_numpy(inputs.pop(1)).astype(np.int32).tolist()\n onnx_node.consumed_inputs.append(onnx_node.inputs[1])\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(repeats)",
"def buildTiles(self, items, attributes):\n pass",
"def create_machine(self, name, ami, is_windows, key_name, key_data, username, password,\n instance_type=Consts.FREE_INSTANCE_TYPE, tags=None, allowed_ip_prefixes=Consts.EVERYONE):\n res = self.conn.run_instances(ami, key_name=key_name, instance_type=instance_type, security_groups=[\"default\"])\n inst = res.instances[0]\n assert inst, \"Machine creation failed!\"\n inst.add_tag(\"Name\", name)\n #TODO tags, key, username/password, security groups, billing, info\n t = threading.Thread(target=self.__stop_new_machine, args=[inst])\n t.start()\n return MachineDetails(inst)"
] | [
"0.5746558",
"0.56829774",
"0.56782365",
"0.5534749",
"0.54125917",
"0.53344876",
"0.52849835",
"0.52363425",
"0.52307814",
"0.52018946",
"0.5193289",
"0.5177163",
"0.51419634",
"0.5093439",
"0.5085827",
"0.50712436",
"0.5016903",
"0.4999963",
"0.49877235",
"0.49808466",
"0.49770543",
"0.49740487",
"0.49625984",
"0.49548075",
"0.49420592",
"0.49404785",
"0.49361855",
"0.492405",
"0.49240437",
"0.4912218"
] | 0.7657994 | 0 |
Publish your tileset. tilesets publish | def publish(tileset, token=None, indent=None):
mapbox_api = _get_api()
mapbox_token = _get_token(token)
url = "{0}/tilesets/v1/{1}/publish?access_token={2}".format(
mapbox_api, tileset, mapbox_token
)
r = requests.post(url)
if r.status_code == 200:
click.echo(json.dumps(r.json(), indent=indent))
click.echo(
f"You can view the status of your tileset with the `tilesets status {tileset}` command.",
err=True,
)
else:
raise errors.TilesetsError(f"{r.text}") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def publish():\n pass",
"def publish(self):\n return",
"def publish(self, settings, item):\n\n publisher = self.parent\n engine = publisher.engine\n document = item.properties[\"document\"]\n\n path = _document_path(document)\n item.properties[\"upload_path\"] = path\n item\n psdProject = PSDImage.open(path)\n\n #save layers to link and create new task to do so\n for layer in psdProject:\n layer.compose().save(layer.name+'.tiff')\n self.logger.info(\"Saved Layer {layerName}.psd\".format(layerName=layer.name))\n publish = sgtk.util.register_publish(publisher.sgtk,\n item.context,\n os.path.join(os.path.dirname(path),layer.name+'.tiff'),\n layer.name,\n version_number=None,\n published_file_type=\"Rendered Image\")",
"def publish(self):\n #vprint(\"PUBLISHING \",self.__dict__)\n \n js = self.compute_json()\n name = self.name\n #topicdir = \"/topicd/\" if constants.publishToS3Dev else \"/topic/\"\n s3path = constants.compositeDir+\"/\"+name+\"/main.json\" #the path where the page will finally end up\n s3.s3SetContents(s3path,contents=js,relativeTo=\"\",contentType=\"application/json\")\n self.genPage()",
"def publish():\n reset()\n compress()\n build()\n s3deploy()\n log_success()",
"def __publish_yeticache(self, item, output, work_template, primary_publish_path,\n sg_task, comment, thumbnail_path, progress_cb):\n # determine the publish info to use\n #\n progress_cb(10, \"Determining publish details\")\n\n # the file and folder name is derived from the fur node\n furNodeName = item['name']\n\n # get the current scene path and extract fields from it\n # using the work template:\n scene_path = os.path.abspath(cmds.file(query=True, sn=True))\n fields = work_template.get_fields(scene_path)\n publish_version = fields[\"version\"]\n tank_type = output[\"tank_type\"]\n\n # create the publish path by applying the fields\n # with the publish template:\n publish_template = output[\"publish_template\"]\n\n # publish path looks something like this at the time of writing\n # C:\\mnt\\workspace\\projects\\unPE\\spt\\tests\\furPipeDev\\fx\\pub\\fur\\008\n # this is what goes in shotgun, and i'll use it when loading in the\n # results at the other end\n sg_publish_path = publish_template.apply_fields(fields)\n\n # for performance i think it's best to put each sequence of fur cache\n # files in a subdirectory (we can more quickly get the list of caches\n # from a dir listing that way)\n # the final publish path will look like this\n # # C:\\mnt\\workspace\\projects\\unPE\\spt\\tests\\furPipeDev\\fx\\pub\\fur\\008\\namespace_furNodeShape\\namespace_furnodeShape.####.fur\n basename = furNodeName.replace(\":\",\"_\")\n filename = basename + \".%04d.fur\"\n actual_publish_path = os.path.join(sg_publish_path, basename, filename)\n\n # shotgun publish name will be the rest of the path, past the version\n # eg namespace_furNodeShape/namespace_furnodeShape.####.fur\n #sg_publish_name = \"%s/%s\" % (basename, filename)\n\n # determine the publish name (this is kinda the element name master/fur):\n publish_name = fields.get(\"name\")\n if not publish_name:\n publish_name = os.path.basename(sg_publish_path)\n\n # Find additional info from the scene:\n progress_cb(10, \"Analysing scene\")\n\n # for the given fur node work out the range to cache. this is the\n # minimum of playback start and the earliest simulation start time for\n # any of the connected grooms\n start_frame = int(cmds.playbackOptions(q=True, min=True))\n end_frame = int(cmds.playbackOptions(q=True, max=True))\n\n # get the groom nodes. to find an appropriate start frame\n # can't use the yeti command because it doesn't return the namespace of\n # the object\n # groomNodes = cmds.pgYetiCommand(furNodeName, listGrooms=True)\n groomNodes = [n for n in cmds.listConnections(furNodeName, sh=True)\n if cmds.nodeType(n)==\"pgYetiGroom\"]\n for groomNode in groomNodes:\n if cmds.getAttr(groomNode+\".doSimulation\"):\n start_frame = min([start_frame, cmds.getAttr(groomNode+\".simStartFrame\")])\n\n # ensure the publish folder exists:\n publish_folder = os.path.dirname(actual_publish_path)\n self.parent.ensure_folder_exists(publish_folder)\n\n # run the command:\n progress_cb(20, \"Exporting Yeti Cache\")\n self.parent.log_info(\"Executing command: pgYetiCommand(%s,%s,%s)\"\\\n % ( actual_publish_path, start_frame, end_frame ) )\n cmds.pgYetiCommand(furNodeName, writeCache=actual_publish_path,\n range=(start_frame, end_frame),\n samples=3,\n updateViewport=False)\n\n # register the publish:\n progress_cb(75, \"Registering the publish\")\n args = {\n \"tk\": self.parent.tank,\n \"context\": self.parent.context,\n \"comment\": comment,\n \"path\": sg_publish_path,\n \"name\": publish_name, # \"fur\"\n \"version_number\": publish_version,\n \"thumbnail_path\": thumbnail_path,\n \"task\": sg_task,\n \"dependency_paths\": [primary_publish_path],\n \"published_file_type\":tank_type,\n }\n tank.util.register_publish(**args)",
"def publish():\n if sys.argv[-1] == 'publish':\n os.system('python setup.py sdist')\n os.system('twine upload dist/*')\n sys.exit()",
"def create_tiles(self, zoom):\n # Compute the tile x-y-z index range for the rasterlayer for this zoomlevel\n bbox = self.rasterlayer.extent()\n indexrange = tiler.tile_index_range(bbox, zoom)\n\n # Compute scale of tiles for this zoomlevel\n tilescale = tiler.tile_scale(zoom)\n\n # Count the number of tiles that are required to cover the raster at this zoomlevel\n nr_of_tiles = (indexrange[2] - indexrange[0] + 1) * (indexrange[3] - indexrange[1] + 1)\n\n # Create destination raster file\n self.log('Snapping dataset to zoom level {0}'.format(zoom))\n\n bounds = tiler.tile_bounds(indexrange[0], indexrange[1], zoom)\n sizex = (indexrange[2] - indexrange[0] + 1) * self.tilesize\n sizey = (indexrange[3] - indexrange[1] + 1) * self.tilesize\n dest_file = os.path.join(self.tmpdir, 'djangowarpedraster' + str(zoom) + '.tif')\n\n snapped_dataset = self.dataset.warp({\n 'name': dest_file,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'width': sizex,\n 'height': sizey,\n })\n\n self.log('Creating {0} tiles for zoom {1}.'.format(nr_of_tiles, zoom))\n\n counter = 0\n for tilex in range(indexrange[0], indexrange[2] + 1):\n for tiley in range(indexrange[1], indexrange[3] + 1):\n # Log progress\n counter += 1\n if counter % 250 == 0:\n self.log('{0} tiles created at zoom {1}'.format(counter, zoom))\n\n # Calculate raster tile origin\n bounds = tiler.tile_bounds(tilex, tiley, zoom)\n\n # Construct band data arrays\n pixeloffset = (\n (tilex - indexrange[0]) * self.tilesize,\n (tiley - indexrange[1]) * self.tilesize\n )\n\n band_data = [\n {\n 'data': band.data(offset=pixeloffset, size=(self.tilesize, self.tilesize)),\n 'nodata_value': band.nodata_value\n } for band in snapped_dataset.bands\n ]\n\n # Add tile data to histogram\n if zoom == self.max_zoom:\n self.push_histogram(band_data)\n\n # Warp source raster into this tile (in memory)\n dest = GDALRaster({\n 'width': self.tilesize,\n 'height': self.tilesize,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'srid': WEB_MERCATOR_SRID,\n 'datatype': snapped_dataset.bands[0].datatype(),\n 'bands': band_data,\n })\n\n # Store tile\n RasterTile.objects.create(\n rast=dest,\n rasterlayer=self.rasterlayer,\n tilex=tilex,\n tiley=tiley,\n tilez=zoom\n )\n\n # Store histogram data\n if zoom == self.max_zoom:\n bandmetas = RasterLayerBandMetadata.objects.filter(rasterlayer=self.rasterlayer)\n for bandmeta in bandmetas:\n bandmeta.hist_values = self.hist_values[bandmeta.band].tolist()\n bandmeta.save()\n\n # Remove snapped dataset\n self.log('Removing snapped dataset.', zoom=zoom)\n snapped_dataset = None\n os.remove(dest_file)",
"def __publish_dirt(self, dirt):\n self.dirt_pub.publish(dirt)",
"def publish(self, kpi_dict):\n pass",
"def mbtiles(ctx, files, output, overwrite, title, description,\n layer_type, img_format, tile_size, zoom_levels, image_dump,\n num_workers, src_nodata, dst_nodata, resampling):\n output, files = resolve_inout(files=files, output=output,\n overwrite=overwrite)\n inputfile = files[0]\n\n logger = logging.getLogger('rio-mbtiles')\n\n with ctx.obj['env']:\n\n # Read metadata from the source dataset.\n with rasterio.open(inputfile) as src:\n\n validate_nodata(dst_nodata, src_nodata, src.profile.get('nodata'))\n base_kwds = {'dst_nodata': dst_nodata, 'src_nodata': src_nodata}\n\n if src_nodata is not None:\n base_kwds.update(nodata=src_nodata)\n\n if dst_nodata is not None:\n base_kwds.update(nodata=dst_nodata)\n\n # Name and description.\n title = title or os.path.basename(src.name)\n description = description or src.name\n\n # Compute the geographic bounding box of the dataset.\n (west, east), (south, north) = transform(\n src.crs, 'EPSG:4326', src.bounds[::2], src.bounds[1::2])\n\n # Resolve the minimum and maximum zoom levels for export.\n if zoom_levels:\n minzoom, maxzoom = map(int, zoom_levels.split('..'))\n else:\n zw = int(round(math.log(360.0 / (east - west), 2.0)))\n zh = int(round(math.log(170.1022 / (north - south), 2.0)))\n minzoom = min(zw, zh)\n maxzoom = max(zw, zh)\n\n logger.debug(\"Zoom range: %d..%d\", minzoom, maxzoom)\n\n # Parameters for creation of tile images.\n base_kwds.update({\n 'driver': img_format.upper(),\n 'dtype': 'uint8',\n 'nodata': 0,\n 'height': tile_size,\n 'width': tile_size,\n 'count': 3,\n 'crs': TILES_CRS})\n\n img_ext = 'jpg' if img_format.lower() == 'jpeg' else 'png'\n\n # Initialize the sqlite db.\n if os.path.exists(output):\n os.unlink(output)\n # workaround for bug here: https://bugs.python.org/issue27126\n sqlite3.connect(':memory:').close()\n\n conn = sqlite3.connect(output)\n cur = conn.cursor()\n cur.execute(\n \"CREATE TABLE tiles \"\n \"(zoom_level integer, tile_column integer, \"\n \"tile_row integer, tile_data blob);\")\n cur.execute(\n \"CREATE TABLE metadata (name text, value text);\")\n\n # Insert mbtiles metadata into db.\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"name\", title))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"type\", layer_type))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"version\", \"1.1\"))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"description\", description))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"format\", img_ext))\n cur.execute(\n \"INSERT INTO metadata (name, value) VALUES (?, ?);\",\n (\"bounds\", \"%f,%f,%f,%f\" % (west, south, east, north)))\n\n conn.commit()\n\n # Create a pool of workers to process tile tasks.\n pool = Pool(num_workers, init_worker,\n (inputfile, base_kwds, resampling), 100)\n\n # Constrain bounds.\n EPS = 1.0e-10\n west = max(-180 + EPS, west)\n south = max(-85.051129, south)\n east = min(180 - EPS, east)\n north = min(85.051129, north)\n\n # Initialize iterator over output tiles.\n tiles = mercantile.tiles(\n west, south, east, north, range(minzoom, maxzoom + 1))\n\n for tile, contents in pool.imap_unordered(process_tile, tiles):\n\n if contents is None:\n logger.info(\"Tile %r is empty and will be skipped\", tile)\n continue\n\n # MBTiles has a different origin than Mercantile/tilebelt.\n tiley = int(math.pow(2, tile.z)) - tile.y - 1\n\n # Optional image dump.\n if image_dump:\n img_name = '%d-%d-%d.%s' % (\n tile.x, tiley, tile.z, img_ext)\n img_path = os.path.join(image_dump, img_name)\n with open(img_path, 'wb') as img:\n img.write(contents)\n\n # Insert tile into db.\n cur.execute(\n \"INSERT INTO tiles \"\n \"(zoom_level, tile_column, tile_row, tile_data) \"\n \"VALUES (?, ?, ?, ?);\",\n (tile.z, tile.x, tiley, buffer(contents)))\n\n conn.commit()\n\n conn.close()\n # Done!",
"def __publish_geocache(self, item, output, work_template, primary_publish_path,\n sg_task, comment, thumbnail_path, progress_cb):\n # determine the publish info to use\n #\n progress_cb(10, \"Determining publish details\")\n\n # get the current scene path and extract fields from it\n # using the work template:\n scene_path = os.path.abspath(cmds.file(query=True, sn=True))\n fields = work_template.get_fields(scene_path)\n publish_version = fields[\"version\"]\n tank_type = output[\"tank_type\"]\n\n # create the publish path by applying the fields\n # with the publish template:\n publish_template = output[\"publish_template\"]\n publish_path = publish_template.apply_fields(fields)\n # doCreateGeometryCache expects forward slashes\n geo_publish_path = publish_path.replace(\"\\\\\", \"/\")\n\n # ensure the publish folder exists:\n publish_folder = os.path.dirname(publish_path)\n self.parent.ensure_folder_exists(publish_folder)\n\n # determine the publish name:\n publish_name = fields.get(\"name\")\n if not publish_name:\n publish_name = os.path.basename(publish_path)\n\n # Find additional info from the scene:\n #\n progress_cb(10, \"Analysing scene\")\n\n # find the animated frame range to use:\n frame_start = int(cmds.playbackOptions(q=True, min=True))\n frame_end = int(cmds.playbackOptions(q=True, max=True))\n\n namespace = item[\"name\"]\n setName = namespace + \":cache_SET\"\n members = pymel.core.sets(setName, q=True)\n transforms = map(lambda m: pymel.core.listRelatives(m, type=\"transform\", allDescendents=True) if not m.endswith(\"_GEO\") else [m], members)\n geos = [geo for geoList in transforms for geo in geoList if geo.endswith(\"_GEO\")]\n pymel.core.select(geos)\n\n # run the command:\n progress_cb(30, \"Exporting GeoCache\")\n try:\n # do it\n self.parent.log_debug(\"Executing command: aaPCGen.doExport(%s,%s,%s)\"\\\n % ( publish_path, frame_start, frame_end ) )\n aaPCGen.doExport(publish_path,frame_start,frame_end)\n except Exception, e:\n raise TankError(\"Failed to export GeoCache: %s\" % e)\n\n geo_export_cmd = 'doCreateGeometryCache 6 {{ \"0\", \"{}\", \"{}\", \"OneFile\", \"0\", \"{}/{}\", \"1\", \"\", \"0\", \"export\", \"0\", \"1\", \"1\", \"0\", \"1\", \"mcc\", \"1\" }} ;'.format(frame_start, frame_end, geo_publish_path, namespace)\n try:\n # do it\n self.parent.log_debug(\"Executing command: \" + geo_export_cmd)\n mel.eval(geo_export_cmd)\n except Exception, e:\n raise TankError(\"Failed to export GeoCache: %s\" % e)\n\n # code will be the basename of path (017)\n # register the publish:\n progress_cb(75, \"Registering the publish\")\n args = {\n \"tk\": self.parent.tank,\n \"context\": self.parent.context,\n \"comment\": comment,\n \"path\": publish_path,\n \"name\": publish_name,\n \"version_number\": publish_version,\n \"thumbnail_path\": thumbnail_path,\n \"task\": sg_task,\n \"dependency_paths\": [primary_publish_path],\n \"published_file_type\":tank_type,\n }\n tank.util.register_publish(**args)",
"def test_publish(self):\n\n adminuser,adminpass = self.testdata.find_account_for('toolmanager')\n\n self.utils.account.login_as(adminuser,adminpass)\n\n self.contribtool.publish(TOOLNAME)",
"def fast_publish(self, request):\n self.__connection.fast_publish(request)",
"def write_overview_tile(self, tx, ty, tz,tms_osm):\n\n image_format = self.get_overview_tile_format(tx, ty, tz)\n\n if image_format is None:\n return\n else:\n num_bands = self.get_num_bands(image_format)\n\n dsquery = self.mem_drv.Create('', 2*self.tile_size, 2*self.tile_size, num_bands)\n self.fill_init_dest(dsquery)\n # tms: z=19: 281626\n # -z=18-140813 176168*2=352336; 176168*2+1=352337\n # -- 352336,352337\n y_from=2*ty\n y_to=2*ty + 1\n ty_tms=ty;\n s_y_type=\"tms\"\n if tms_osm:\n # osm: z=19: 281626\n # -z=18-140813 85975*2+1=171951; 85975*2=171950\n # -- 171951,171950 [in range: last/end not used]\n y_from=2*ty + 1\n y_to=2*ty\n ty_tms=(2**tz-1) - ty\n s_y_type=\"osm\"\n s_tile_id=\"{0}-{1}-{2}.{3}\".format(str(tz), str(tx),str(ty),s_y_type)\n if self.verbose:\n # Build from zoom 19 tiles: (281626, 171951) (281627, 171951) (281626, 171950) (281627, 171950)\n print \"\\tBuild [\",s_tile_id,\"] from [\",self.output_dir,\"] zoom\", tz+1,\" tiles [\",s_y_type,\"]: \", (2*tx, y_from), (2*tx+1, y_from),(2*tx, y_to), (2*tx+1, y_to)\n\n for cx, cy, child_image_format in self.iter_children(tx, ty, tz):\n if (ty_tms==0 and cy==1) or (ty_tms!=0 and (cy % (y_from)) != 0):\n tileposy = 0\n else:\n tileposy = self.tile_size\n if tx:\n tileposx = cx % (2*tx) * self.tile_size\n elif tx==0 and cx==1:\n tileposx = self.tile_size\n else:\n tileposx = 0\n\n path = self.get_full_path(cx, cy, tz+1, format_extension[child_image_format])\n\n dsquerytile = gdal.Open(path, gdal.GA_ReadOnly)\n\n dsquery.WriteRaster(tileposx, tileposy, self.tile_size, self.tile_size,\n dsquerytile.ReadRaster(0, 0, self.tile_size, self.tile_size),\n band_list=range(1, dsquerytile.RasterCount+1))\n\n if image_format == \"PNG\" and dsquerytile.RasterCount != num_bands:\n dsquery.WriteRaster(tileposx, tileposy, self.tile_size, self.tile_size,\n self.get_alpha_filler(), band_list=[num_bands])\n\n dstile = self.mem_drv.Create('', self.tile_size, self.tile_size, num_bands)\n path = self.get_full_path(tx, ty, tz, format_extension[image_format])\n self.resampler(path, dsquery, dstile, image_format)",
"def main(parameters):\n metadata = get_metadata(parameters)\n # pprint(metadata)\n image_api = NswSatelliteImages(parameters, metadata)\n print('Zoom level:', image_api.zoom_level,\n 'Resolution:', image_api.resolution,\n 'Scale:', image_api.scale)\n image_api.download_tile(xtile=39000, ytile=60000)",
"def __publish_obj(self, item, output, work_template, primary_publish_path,\n sg_task, comment, thumbnail_path, progress_cb):\n # determine the publish info to use\n #\n progress_cb(10, \"Determining publish details\")\n\n # get the current scene path and extract fields from it\n # using the work template:\n scene_path = os.path.abspath(cmds.file(query=True, sn=True))\n fields = work_template.get_fields(scene_path)\n publish_version = fields[\"version\"]\n tank_type = output[\"tank_type\"]\n\n # create the publish path by applying the fields\n # with the publish template:\n publish_template = output[\"publish_template\"]\n publish_path = publish_template.apply_fields(fields)\n\n # ensure the publish folder exists:\n publish_folder = os.path.dirname(publish_path)\n self.parent.ensure_folder_exists(publish_folder)\n\n # determine the publish name:\n publish_name = fields.get(\"name\")\n if not publish_name:\n publish_name = os.path.basename(publish_path)\n\n # Find additional info from the scene:\n #\n progress_cb(20, \"Analysing scene\")\n\n # build the export command.\n obj_export_cmd = \"file -force -es -pr -typ \\\"OBJexport\\\"\"\n obj_export_cmd += \" -options \\\"groups=1;ptgroups=1;materials=0;smoothing=1;normals=1\\\"\"\n obj_export_cmd += \" \\\"%s\\\"\" % (publish_path.replace(\"\\\\\", \"/\"))\n\n # ...and execute it:\n progress_cb(30, \"Exporting OBJ file\")\n try:\n self.parent.log_debug(\"Executing command: %s\" % obj_export_cmd)\n\n # make sure plugin is loaded\n if not cmds.pluginInfo('objExport',query=True,loaded=True):\n cmds.loadPlugin('objExport')\n\n # clear selection, select what's in the set\n sel = cmds.ls(sl=True)\n set_contents = cmds.sets('publish_SET',q=True)\n cmds.select(clear=True)\n for obj in set_contents:\n cmds.select(obj,add=True)\n\n # do the actual export\n mel.eval(obj_export_cmd)\n\n # then restore the selection\n cmds.select(clear=True)\n for obj in sel:\n cmds.select(obj,add=True)\n\n except Exception, e:\n raise TankError(\"Failed to export OBJ file: %s\" % e)\n\n # register the publish:\n progress_cb(75, \"Registering the publish\")\n args = {\n \"tk\": self.parent.tank,\n \"context\": self.parent.context,\n \"comment\": comment,\n \"path\": publish_path,\n \"name\": publish_name,\n \"version_number\": publish_version,\n \"thumbnail_path\": thumbnail_path,\n \"task\": sg_task,\n \"dependency_paths\": [primary_publish_path],\n \"published_file_type\":tank_type\n }\n tank.util.register_publish(**args)",
"def publish():\n fab.local(\"env/bin/python setup.py sdist\")\n tar_filename = fab.local(\n \"env/bin/python setup.py --fullname\", capture=True\n )\n dist_filename = \"dist/{}.tar.gz\".format(tar_filename)\n fab.put(dist_filename, PYREPO_DIR)",
"def burn_tiles(region, zone, train_tier = 1, zoom_level = 19):\n \n os.system(f'cat ../../data/raw/train_tier_{train_tier}/{region}/{zone}/{zone}.json | supermercado burn {zoom_level} | mercantile shapes | fio collect > ../../data/raw/train_tier_{train_tier}/{region}/{zone}/tiles_{region}_{zone}_{zoom_level}.geojson')\n os.system(f'echo done with {region}_{zone}_{zoom_level}')",
"def publish(self, filename):\n # 1) Encrypt file\n # 2) Publish to remote cloud server\n # 3) Wait for the result\n # 4) Store results in files located inside RAM folder",
"def publish(self, waypoints): \n lane = Lane()\n lane.header.frame_id = '/world'\n lane.header.stamp = rospy.Time(0)\n lane.waypoints = waypoints\n self.final_waypoints_pub.publish(lane)",
"def publish(self):\n # Create a public collection with the same uuid and same fields\n public_collection = Collection.get_collection(self.session, self.id, CollectionVisibility.PUBLIC)\n if public_collection:\n public_collection.update(\n **self.to_dict(remove_attr=(\"update_at\", \"created_at\", \"visibility\", \"id\"), remove_relationships=True)\n )\n else:\n public_collection = Collection(\n clone(self.db_object, primary_key=dict(id=self.id, visibility=CollectionVisibility.PUBLIC))\n )\n self.session.add(public_collection)\n\n # Copy over relationships\n for link in self.links:\n link.collection_visibility = CollectionVisibility.PUBLIC\n for dataset in self.datasets:\n if dataset.original_id:\n \"skip modified datasets\"\n continue # TODO: expand to support tombstone and refresh corpora-data-portal/1177\n else:\n dataset.collection_visibility = CollectionVisibility.PUBLIC\n dataset.published = True\n self.session.commit()\n self.delete()\n self.db_object = public_collection.db_object",
"def on_publish( client, userdata, mid ):\n logging.info( \"Data published successfully.\" )",
"def publish(self, id: uplink.Path):\n pass",
"def publish(self):\n self.published = True\n self.save()# pylint: disable=no-member",
"def publish(self):\n msg_imu1, msg_mag1, msg_imu2, msg_mag2, msg_imu, msg_mag= self._create_msg()\n self.pub_imu.publish(msg_imu)\n self.pub_mag.publish(msg_mag)\n #------Uncomment to publish IMUs data separately------",
"def publish(self):\n data = self.read_all_values()\n logger.info(data)\n if self.mqtt:\n self.mqtt.publish_json(data)",
"def __publish_mayacamera(self, item, output, work_template, primary_publish_path,\n sg_task, comment, thumbnail_path, progress_cb):\n # determine the publish info to use\n #\n progress_cb(10, \"Determining publish details\")\n\n # get the current scene path and extract fields from it\n # using the work template:\n scene_path = os.path.abspath(cmds.file(query=True, sn=True))\n fields = work_template.get_fields(scene_path)\n publish_version = fields[\"version\"]\n tank_type = output[\"tank_type\"]\n\n # extract entity from camera node name\n # handle full paths, trim off everything after the _\n # e.g. |pivot_GRP|master_CAM -> master\n fields[\"name\"] = item[\"name\"].split(\"|\")[-1].split(\"_\")[0]\n\n # create the publish path by applying the fields\n # with the publish template:\n fields[\"Step\"] = \"cam\" # first force step to be camera\n publish_template = output[\"publish_template\"]\n publish_path = publish_template.apply_fields(fields)\n\n # ensure the publish folder exists:\n publish_folder = os.path.dirname(publish_path)\n self.parent.ensure_folder_exists(publish_folder)\n\n # determine the publish name:\n publish_name = fields.get(\"name\")\n if not publish_name:\n publish_name = os.path.basename(publish_path)\n\n\n progress_cb(50.0, \"Exporting from scene\")\n try:\n publish_folder = os.path.dirname(publish_path)\n self.parent.ensure_folder_exists(publish_folder)\n self.parent.log_debug(\"Exporting to %s...\" % (publish_path))\n\n # stash the selection\n sel = cmds.ls(sl=True)\n # clear it\n cmds.select(clear=True)\n # select just the specific camera we are processing\n cmds.select(item[\"name\"],add=True)\n\n # do export selection once camera selected\n cmds.file( publish_path,\n type='mayaBinary',\n exportSelected=True,\n force=True,\n )\n\n # reset the selection to what it was prior\n cmds.select(clear=True)\n for obj in sel:\n cmds.select(obj,add=True)\n\n except Exception, e:\n raise TankError(\"Failed to export to %s - %s\" % (publish_path, e))\n\n # register the publish:\n progress_cb(75, \"Registering the publish\")\n args = {\n \"tk\": self.parent.tank,\n \"context\": self.parent.context,\n \"comment\": comment,\n \"path\": publish_path,\n \"name\": publish_name,\n \"version_number\": publish_version,\n \"thumbnail_path\": thumbnail_path,\n \"task\": sg_task,\n \"dependency_paths\": [primary_publish_path],\n \"published_file_type\":tank_type\n }\n tank.util.register_publish(**args)",
"def mqtt_publish(image):\n logging.debug('publishing image to mqtt broker topic %s', \n config['mqtt']['publish_topic'])\n mqtt_client.publish(config['mqtt']['publish_topic'], image)",
"def publish(task_id, release_id, progress=0):\n time.sleep(5)\n # Processing here\n # Inform coordinator that the task has been published\n resp = requests.patch(COORDINATOR_API+'/tasks/'+task_id,\n json={'state': 'published', 'progress': 100})"
] | [
"0.6580128",
"0.5952823",
"0.5932906",
"0.58998835",
"0.5859718",
"0.56866664",
"0.5659581",
"0.5582952",
"0.55645955",
"0.5559791",
"0.5526525",
"0.5434319",
"0.5407289",
"0.54018307",
"0.53910875",
"0.5360681",
"0.5359375",
"0.5341747",
"0.53361744",
"0.5277855",
"0.52742517",
"0.5271758",
"0.52690774",
"0.5243856",
"0.5239999",
"0.5226573",
"0.520324",
"0.5197316",
"0.5181538",
"0.51790917"
] | 0.7308318 | 0 |
View the current queue/processing/complete status of your tileset. tilesets status | def status(tileset, token=None, indent=None):
mapbox_api = _get_api()
mapbox_token = _get_token(token)
url = "{0}/tilesets/v1/{1}/status?access_token={2}".format(
mapbox_api, tileset, mapbox_token
)
r = requests.get(url)
click.echo(json.dumps(r.json(), indent=indent)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def status(self):",
"def thread_status():\n global dataSession\n return jsonify(dict(status=('finished' if len(dataSession) > 1 else 'running')))",
"def report_queue_status(self):\n raise NotImplementedError",
"def update_status(cls):\n for job in cls.query.filter(cls.finished == False):\n num_hits_left = session.query(BoxHit).filter_by(training_job_id = job.id, outstanding=True).count()\n urls_left = session.query(VideoTrainingURL).filter_by(training_job_id=job.id, processed = False)\n dynamo = DynamoIngestionStatusClient()\n num_urls_left = 0\n for url in urls_left:\n dynamo_url = dynamo.get(url.url)\n if dynamo_url is None or dynamo_url['status'] == 'Failed':\n # will never be processed, so ignore for our purposes\n url.processed = True\n else:\n num_urls_left += 1\n if num_hits_left+num_urls_left == 0:\n job.finished = True\n print '*** Job ID: %s is complete ***' % str(job.id)\n\n print '------------- Stats for Job ID: %s -------------' % str(job.id)\n print 'Total URLs : %i' % VideoTrainingURL.query.filter_by(training_job_id = job.id).count()\n print 'Total HITs : %i' % BoxHit.query.filter_by(training_job_id = job.id).count()\n if not job.finished:\n print 'unprocessed URLs: %i' % num_urls_left\n print 'outstanding HITs: %i\\n' % num_hits_left\n session.flush()",
"def queueStatusAll():",
"def status(self):\n logging.debug(\"%s entered status\" % self)\n # print_config(self.infra)\n # print self.images\n # headers = [\"Machine Name\", \"Flavor\", \"IP Addresses\", \"Image Name\", \"Status\"]\n # pt = prettytable.PrettyTable(headers)\n # pt.align[\"Machine Name\"]=\"l\"\n # pt.align[\"IP Addresses\"] = \"l\"\n # pt.align[\"Image Name\"] = \"l\"\n # pt.align[\"Status\"] = \"r\"\n \n print \"Checking status of %s\" % self.footprint_name\n # tmpl = \"%(machine_name)-20s%(flavor)5s%(status)-30s\"\n tmpl1 = \"\"\"%-20s%-52s[%s]\"\"\"\n tmpl2 = \"\"\"%-20s%-60s\\n\"\"\"\n print tmpl1 % (\"Machine Name\", \"IP Addresses\", \"Status\")\n print 80 * \"-\"\n \n for machine in self.machines.keys():\n m = self.machines[machine]\n # machine_name = m.machine_name\n # ips = str(m.ip_addresses)\n # flavor = str(m.flavor)\n # img = str(m.image_id)\n # status = str(m.status)\n # pt.add_row([m, ips, status, img, status])\n # print \"FFF\", m, ips, flavor, img, status\n # print tmpl % locals()\n print m.status\n \n return \"%s is currently: %s\" % (self.footprint_name, self.footprint_status)",
"def overall_status(debug_stmts):\n print_header(\"Running/queued jobs\")\n in_progress_status(debug_stmts)\n\n # Try printing archiving status. If we hit a DebugNotFoundError then\n # report it but keep going (archiving status wasn't explicitly requested,\n # so it's OK if it's missing -- it's not present on all servers).\n print_header(\"Archiving status\")\n try:\n archive_summary(debug_stmts)\n except DebugNotFoundError as e:\n print(\"No archiving information found on this server\\n\")\n\n print_header(\"Backup set status\")\n # Print the status of each backup set in turn\n for backup_set in get_backup_sets(debug_stmts):\n status_of_single_set(debug_stmts, backup_set)",
"def celery_task_status(self):\n return self._get_celery_queue_data()",
"def refresh_queue_status(self):\n \n # Get the jobid and state for all jobs pending/running/completed for the current user\n qacct_stdout=self.run_grid_command_resubmit([\"qacct\",\"-o\",getpass.getuser(),\"-j\",\"*\"])\n \n # info list should include jobid, state, cpus, time, and maxrss\n info=[]\n job_status=[]\n for line in qacct_stdout.split(\"\\n\"):\n if line.startswith(\"jobnumber\") or line.startswith(\"job_number\"):\n if job_status:\n info.append(job_status)\n job_status=[line.rstrip().split()[-1],\"NA\",\"NA\",\"NA\",\"NA\"]\n # get the states for completed jobs\n elif line.startswith(\"failed\"):\n failed_code = line.rstrip().split()[1]\n if failed_code != \"0\":\n if failed_code in [\"37\",\"100\"]:\n job_status[1]=self.job_code_terminated\n else:\n job_status[1]=self.job_code_error\n elif line.startswith(\"deleted_by\"):\n if line.rstrip().split()[-1] != \"NONE\" and job_status[1] == self.job_code_terminated:\n job_status[1]=self.job_code_deleted\n elif line.startswith(\"exit_status\"):\n # only record if status has not yet been set\n if job_status[1] == \"NA\":\n exit_status = line.rstrip().split()[-1]\n if exit_status == \"0\":\n job_status[1]=self.job_code_completed\n elif exit_status == \"137\":\n job_status[1]=self.job_code_terminated\n else:\n job_status[1]=self.job_code_error\n # get the current state for running jobs\n elif line.startswith(\"job_state\"):\n job_status[1]=line.rstrip().split()[-1]\n elif line.startswith(\"slots\"):\n job_status[2]=line.rstrip().split()[-1]\n elif line.startswith(\"ru_wallclock\"):\n try:\n # get the elapsed time in minutes\n job_status[3]=str(float(line.rstrip().split()[-1])/60.0)\n except ValueError:\n job_status[3]=\"NA\"\n elif line.startswith(\"ru_maxrss\"):\n job_status[4]=line.rstrip().split()[-1]+\"K\"\n \n if job_status:\n info.append(job_status)\n\n return info",
"def status(self):\n # pdb.set_trace()\n print(\"\\u001b[4m\\t\\tTimer Status\\u001b[0m\")\n print(\"\\n\\t# active timers: {}\".format(len(self.timer_threads)))\n for n in range(0, len(self.timer_threads)):\n active = True\n print(\"\\n\\tID: {}\\n\\tName: {}\\n\\tInit: {} AlarmT: {}\\n\\tTest?: {}\\n\\tTarget: {}\"\n .format(n, self.tuplist[n][1]['name'],\n self.tuplist[n][1]['init_time'].strftime(\"(%m-%d-%y)%H:%M:%S\"),\n self.tuplist[n][1]['alarm_time'],\n self.tuplist[n][1]['testmode'],\n self.tuplist[n][1]['todo']))\n return True",
"def getStatus():",
"def status(self):\n pass",
"def status(self):\n pass",
"def status():\n _request('worklog/status/')",
"def status(self):\n\t\treturn self._status",
"def status():\n used = get_space_used()\n avail = get_space_available()\n allowed = config.download.space_to_use\n print \"Space used by downloaded files: %.2f GB of %.2f GB (%.2f%%)\" % \\\n (used/1024.0**3, allowed/1024.0**3, 100.0*used/allowed)\n print \"Space available on file system: %.2f GB\" % (avail/1024.0**3)\n\n numwait = jobtracker.query(\"SELECT COUNT(*) FROM requests \" \\\n \"WHERE status='waiting'\", \\\n fetchone=True)\n numfail = jobtracker.query(\"SELECT COUNT(*) FROM requests \" \\\n \"WHERE status='failed'\", \\\n fetchone=True)\n print \"Number of requests waiting: %d\" % numwait\n print \"Number of failed requests: %d\" % numfail\n\n numdlactive = jobtracker.query(\"SELECT COUNT(*) FROM files \" \\\n \"WHERE status='downloading'\", \\\n fetchone=True)\n numdlfail = jobtracker.query(\"SELECT COUNT(*) FROM files \" \\\n \"WHERE status='failed'\", \\\n fetchone=True)\n print \"Number of active downloads: %d\" % numdlactive\n print \"Number of failed downloads: %d\" % numdlfail",
"def status(self):\n assert(self.__complete)\n return self.__status",
"def status():\n pass",
"def task_status():\n pass",
"def status(self):\n with self.__lock:\n assert(self.__complete)\n return self.__status",
"def _get_job_status(self):\n total_hits = session.query(BoxHit).filter_by(training_job_id=self.id).count()\n num_hits_left = session.query(BoxHit).filter_by(training_job_id=self.id, outstanding=True).count()\n total_urls = self.num_urls\n num_urls_left = session.query(VideoTrainingURL).filter_by(job=self, processed=False).count()\n faces_obtained = MTurkBox.query.filter_by(label=self.evaluator.target_label, result=True).count()\n return '\\n'.join([\n '------------- Stats for Job ID: %s -------------' % str(self.id) ,\n 'Job for Label : %s' % self.label.name,\n 'Total URLs : %d' % total_urls,\n 'Total HITs : %d' % total_hits,\n 'unprocessed URLS : %d' % num_urls_left,\n 'outstanding Hits : %d' % num_hits_left,\n 'Job Finish Status : %s' % self.finished,\n 'Faces Obtained : %d' % faces_obtained,\n ]) + '\\n'",
"def queueStatus(targets):",
"def container_status(self):\n if self.status == 'complete':\n return 'complete'\n try:\n task_status = self._ecs.describe_tasks(tasks=[self.name])['tasks'][0]['lastStatus']\n return task_status\n except (IndexError, ClientError):\n return 'STOPPED'",
"def status(self):\n self.scion_sh('status')",
"def do_status(self, args):\n status = self._leet.job_status\n\n for job in self.finished_jobs:\n status.append({\"id\" : job.id,\n \"hostname\" : job.machine.hostname,\n \"plugin\": job.plugin_instance.LEET_PG_NAME,\n \"status\" : job.status})\n if status:\n pretty_jobs_status(status)\n else:\n print(\"***No jobs pending\")",
"def _print_status(self):",
"def get_status(self):\n url = \"data_request?id=jobstatus&job=%d&plugin=zwave\" % self.id\n return self.vera.get(url)",
"def status(self):\n return self.microblaze.state",
"def status(self):\n return self.m.status",
"def get_queue_status(self, mailing_id):\n if getattr(self.settings, 'AK_TEST', False):\n return self.TEST_DATA.get('get_queue_status')\n res = self.client.get(\n #the '/' at the end is IMPORTANT!\n '%s/rest/v1/mailer/%s/progress/' % (self.base_url, mailing_id)\n )\n rv = {'res': res}\n if res.status_code == 200:\n res_dict = res.json()\n rv['status'] = res_dict.get('status', None)\n rv['finished'] = res_dict.get('finished', None)\n rv['progress'] = res_dict.get('progress', None)\n rv['target_count'] = res_dict.get('expected_send_count', None)\n rv['started_at'] = res_dict.get('started_at', None)\n return rv"
] | [
"0.6251672",
"0.6249044",
"0.6178998",
"0.61220944",
"0.6083708",
"0.60823643",
"0.601419",
"0.5995323",
"0.5989828",
"0.5984694",
"0.5970482",
"0.5943197",
"0.5943197",
"0.5942803",
"0.5938031",
"0.5891603",
"0.5884071",
"0.58760405",
"0.5856712",
"0.58364433",
"0.58343846",
"0.5824144",
"0.5819547",
"0.581321",
"0.5785121",
"0.5772289",
"0.57642365",
"0.5758178",
"0.575374",
"0.5737286"
] | 0.6567368 | 0 |
View all jobs for a particular tileset. tilesets jobs | def jobs(tileset, stage, token=None, indent=None):
mapbox_api = _get_api()
mapbox_token = _get_token(token)
url = "{0}/tilesets/v1/{1}/jobs?access_token={2}".format(
mapbox_api, tileset, mapbox_token
)
if stage:
url = "{0}/tilesets/v1/{1}/jobs?stage={2}&access_token={3}".format(
mapbox_api, tileset, stage, mapbox_token
)
r = requests.get(url)
click.echo(json.dumps(r.json(), indent=indent)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all_jobs():\n\n jobs = Job.get_all()\n\n oneoffs = OneOff.get_all()\n\n job = JobView(None, jobs, oneoffs, False, Job.count() > 0)\n\n add_trello_task_links_to_g()\n\n return render_template(\"jobs.template.html\", page_title=\"Jobs\", job_info=job)",
"def job(tileset, job_id, token=None, indent=None):\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/{1}/jobs/{2}?access_token={3}\".format(\n mapbox_api, tileset, job_id, mapbox_token\n )\n r = requests.get(url)\n\n click.echo(json.dumps(r.json(), indent=indent))",
"def jobs(self):\n raise NotImplementedError()",
"def main(self):\n\t\tprint \"Retreiving view 'All\",\n\t\tview_all = self.hudson.getViewByName('All')\n\t\tprint \"Done\"\n\t\tprint \"iterating over jobs\"\n\t\tfor job in view_all.jobs.values():\n\t\t\tviewname = job.name.split(\".\")[0]\n\t\t\tif job.name not in self.getJobListFromDB():\n\t\t\t\tself.addJobToDb(job.name)\n\t\t\tif viewname not in self.getViewListFromDB():\n\t\t\t\tself.addViewToDb(viewname)\n\t\t\tfor build in job.builds:\n\t\t\t\tbo = HudsonConnector.HudsonObject( self.hudson.getDataFromUrl(build['url']) )\n\t\t\t\tstamp = datetime.datetime.fromtimestamp(bo.timestamp/1000)\n\t\t\t\tif stamp > self.lastrun:\n\t\t\t\t\tif bo.result is None:\n\t\t\t\t\t\trunname = job.name+\" #%d\" % bo.number\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tprint runname.ljust(29), str(stamp).ljust(24), bo.result.capitalize()\n\t\t\t\t\t\texcept AttributeError:\n\t\t\t\t\t\t\tprint runname.ljust(29), str(stamp).ljust(24), \"Unknown\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tjobdata = { 'name':job.name, 'view':job.name.split(\".\")[0], 'start':stamp, \n\t\t\t\t\t\t\t\t\t'end':stamp + datetime.timedelta(seconds=bo.duration),\n\t\t\t\t\t\t\t\t\t'duration':bo.duration,\n\t\t\t\t\t\t\t\t\t'result':bo.result\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\tself.uploadJobState(jobdata)\n\t\tself.saveState()",
"def active_jobs():\n\n jobs = Job.get_all_active()\n oneoffs = OneOff.get_all()\n\n job = JobView(None, jobs, oneoffs, True, Job.count() > 0)\n\n add_trello_task_links_to_g()\n\n return render_template(\"jobs.template.html\", page_title=\"Jobs\", job_info=job)",
"async def jobs(request):\n\n job_list = await get_jobs(request)\n return template('jobs.html',\n jobs=job_list)",
"async def get_jobs(): \n return mngr.getAllJobs()",
"def jobs(ctx, page):\n user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),\n ctx.obj.get('experiment'))\n page = page or 1\n try:\n response = PolyaxonClient().experiment.list_jobs(\n user, project_name, _experiment, page=page)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get jobs for experiment `{}`.'.format(_experiment))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n meta = get_meta_response(response)\n if meta:\n Printer.print_header('Jobs for experiment `{}`.'.format(_experiment))\n Printer.print_header('Navigation:')\n dict_tabulate(meta)\n else:\n Printer.print_header('No jobs found for experiment `{}`.'.format(_experiment))\n\n objects = [Printer.add_status_color(o.to_light_dict(humanize_values=True))\n for o in response['results']]\n objects = list_dicts_to_tabulate(objects)\n if objects:\n Printer.print_header(\"Jobs:\")\n objects.pop('experiment', None)\n dict_tabulate(objects, is_list_dict=True)",
"def jobs(self):\n return self.get_jobs()",
"def all_jobs_for_client(ClientID):\n\n client = Client.get(ClientID)\n\n jobs = Job.get_all_for_client(ClientID)\n\n oneoffs = OneOff.get_from_client_id_between_dates(ClientID)\n\n invoices = MonthlyInvoice.get_from_client_id_between_dates(ClientID)\n\n job = JobView(client, jobs, oneoffs, False, Job.get_count_for_client(ClientID) > 0)\n\n add_trello_task_links_to_g()\n\n return render_template(\"jobs.template.html\", page_title=\"Jobs\", job_info=job, invoices=invoices)",
"def get(self):\n\n meta_info = utils.get_all_available_jobs()\n self.render(settings.APP_INDEX_PAGE, jobs_meta_info=json.dumps(meta_info))",
"def get_jobs_list(self, response):\n pass",
"def list(self):\n return self.rpc.call(MsfRpcMethod.JobList)",
"def ListJobs(cls):\n return [key.parent().string_id() for key in cls.query().fetch(\n 100, keys_only=True)]",
"def list(username, verbose, token=None, indent=None):\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/{1}?access_token={2}\".format(\n mapbox_api, username, mapbox_token\n )\n r = requests.get(url)\n if r.status_code == 200:\n if verbose:\n for tileset in r.json():\n click.echo(json.dumps(tileset, indent=indent))\n else:\n for tileset in r.json():\n click.echo(tileset[\"id\"])\n else:\n raise errors.TilesetsError(r.text)",
"def list_jobs(exproot, **kwargs):\n for jobname, args, results in load_all(exproot):\n print jobname, args, results",
"def jobs(self):\n return self._jobs",
"def get_job_list(self):\n return self.job_list",
"def get_job_list(self):\n return self.job_list",
"def job_templates(self):\n return self._tower.job_templates.filter({'project__exact': self.id})",
"def get(self):\n # TODO: auth\n return list(self.app.db.jobs.find())",
"def view_job(options, job_name, client):\n if options.show_events:\n return display_events(client.job_events(job_name))\n\n job_content = client.job(job_name)\n return display.DisplayJobs(options).format_job(job_content)",
"def list(self, jobguid=\"\", executionparams=None):",
"def jobs(self, tags=None, tags_intersect=None):\n return list(self.all_jobs(tags=tags, tags_intersect=tags_intersect))",
"async def job_detail(request, job_id=None):\n current_jobs = dagobah._serialize().get('jobs', {})\n jobs = [job for job in current_jobs if str(job['job_id']) == job_id]\n if not jobs:\n raise ValueError('not find any jobs')\n return template('job_detail.html', job=jobs[0], hosts=dagobah.get_hosts())",
"def jobs(self):\n return JobCollection(client=self)",
"def describe_job(self):\n # GET /jobs/{job_id}\n pass",
"def list(self):\n self.background_scheduler.print_jobs()",
"def list(self, request):\n jobs = Job.objects.all()\n\n city = self.request.query_params.get('city', None)\n state = self.request.query_params.get('state', None)\n\n # Support filtering jobs by user id\n job = self.request.query_params.get('user', None)\n if job is not None:\n jobs = jobs.filter(user=request.user)\n\n if city is not None:\n jobs = jobs.filter(city=city)\n\n if state is not None:\n jobs = jobs.filter(state=state)\n\n serializer = JobSerializer(\n jobs, many=True, context={'request': request})\n return Response(serializer.data)",
"def get_queryset(self):\n return Job.objects.all()"
] | [
"0.7152179",
"0.71383125",
"0.6330445",
"0.6263424",
"0.6214478",
"0.6188785",
"0.61780936",
"0.61448723",
"0.60901165",
"0.6074674",
"0.6017765",
"0.6007803",
"0.59747595",
"0.59221345",
"0.589214",
"0.5855021",
"0.5803626",
"0.57746786",
"0.57746786",
"0.5773371",
"0.57521296",
"0.5690878",
"0.56652075",
"0.5647777",
"0.5641453",
"0.5626209",
"0.56204516",
"0.562001",
"0.5611736",
"0.55710226"
] | 0.75485474 | 0 |
View a single job for a particular tileset. tilesets job | def job(tileset, job_id, token=None, indent=None):
mapbox_api = _get_api()
mapbox_token = _get_token(token)
url = "{0}/tilesets/v1/{1}/jobs/{2}?access_token={3}".format(
mapbox_api, tileset, job_id, mapbox_token
)
r = requests.get(url)
click.echo(json.dumps(r.json(), indent=indent)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def view_job(options, job_name, client):\n if options.show_events:\n return display_events(client.job_events(job_name))\n\n job_content = client.job(job_name)\n return display.DisplayJobs(options).format_job(job_content)",
"async def job_detail(request, job_id=None):\n current_jobs = dagobah._serialize().get('jobs', {})\n jobs = [job for job in current_jobs if str(job['job_id']) == job_id]\n if not jobs:\n raise ValueError('not find any jobs')\n return template('job_detail.html', job=jobs[0], hosts=dagobah.get_hosts())",
"def jobs(tileset, stage, token=None, indent=None):\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/{1}/jobs?access_token={2}\".format(\n mapbox_api, tileset, mapbox_token\n )\n if stage:\n url = \"{0}/tilesets/v1/{1}/jobs?stage={2}&access_token={3}\".format(\n mapbox_api, tileset, stage, mapbox_token\n )\n\n r = requests.get(url)\n\n click.echo(json.dumps(r.json(), indent=indent))",
"def describe_job(self):\n # GET /jobs/{job_id}\n pass",
"def job_detail(request: HttpRequest, job_id: str) -> HttpResponse:\n table = dynamodb.Table(table_name)\n sis_account_id = request.LTI[\"custom_canvas_account_sis_id\"]\n school_id = sis_account_id.split(\":\")[1]\n school_key = f'SCHOOL#{school_id.upper()}'\n job_query_params = {\n 'KeyConditionExpression': Key('pk').eq(school_key) & Key('sk').eq(job_id),\n 'ScanIndexForward': False,\n }\n logger.debug(f'Retrieving job details for job {job_id}.')\n job = table.query(**job_query_params)['Items'][0]\n\n # Update string timestamp to datetime.\n job.update(created_at=parse_datetime(job['created_at']))\n job.update(updated_at=parse_datetime(job['updated_at']))\n\n tasks_query_params = {\n 'KeyConditionExpression': Key('pk').eq(job_id),\n 'ScanIndexForward': False,\n }\n task_query_result = table.query(**tasks_query_params)\n tasks = task_query_result['Items']\n\n # If there are additional items to be retrieved for this job, the LastEvaluatedKey will be present\n # Use this key as the starting point for subsequent queries to build a full list\n while task_query_result.get('LastEvaluatedKey', False):\n tasks_query_params['ExclusiveStartKey'] = task_query_result.get('LastEvaluatedKey')\n task_query_result = table.query(**tasks_query_params)\n tasks.extend(task_query_result['Items'])\n\n context = {\n 'job': job,\n 'tasks': tasks,\n 'canvas_url': settings.CANVAS_URL\n }\n logger.debug(f'Retrieved job details for job {job_id}.', extra=context)\n return render(request, \"bulk_site_creator/job_detail.html\", context=context)",
"def get_job_run_template(self, job_name, preview=True, request=None, object_id=None, view_name=None,\n extra_context=None):\n return 'django_admin_rq/job_run.html'",
"async def task_detail(request, job_id=None, task_name=None):\n jobs = dagobah._serialize().get('jobs', {})\n job = [job for job in jobs if str(job['job_id']) == job_id][0]\n return template('task_detail.html',\n job=job,\n task_name=task_name,\n task=[task for task in job['tasks']\n if task['name'] == task_name][0])",
"def job(self):\n return self.batch[self.job_id]",
"def job_display(self, job_id):\n job = self.backend.get_job(job_id)\n process_graph_job = self.backend.job_pg_info(job_id)\n download_dir = self.backend.job_result_download(job_id)\n failed_files = []\n if download_dir:\n for ddir in download_dir:\n info(self.iface, \"Downloaded to {}\".format(ddir))\n result = Result(path=ddir, process_graph=process_graph_job)\n if iface.activeLayer():\n crs_background = iface.activeLayer().crs().authid()\n QSettings().setValue('/Projections/defaultBehaviour', 'useGlobal')\n QSettings().setValue('/Projections/layerDefaultCrs', crs_background)\n else:\n QSettings().setValue('/Projections/defaultBehaviour', 'useGlobal')\n QSettings().setValue('/Projections/layerDefaultCrs', 'EPSG:4326')\n\n if job.title:\n title = job.title\n else:\n title = \"NoTitle\"\n\n if not result.display(layer_name=\"{}-{}\".format(title, job.created.strftime(\"%Y-%m-%d_%H-%M-%S\"))):\n failed_files.append(ddir)\n iface.zoomToActiveLayer()\n\n if failed_files:\n warning(self.iface, \"The following result files could not be loaded to layer: {}\"\n .format(str(failed_files).replace(\"[\", \"\").replace(\"]\", \"\")))\n\n self.refresh_jobs()",
"def job_display(self, row):\n job_id = self.jobsTableWidget.item(row, 0).text()\n download_dir = self.connection.job_result_download(job_id)\n if download_dir:\n info(self.iface, \"Downloaded to {}\".format(download_dir))\n result = Result(path=download_dir)\n result.display()\n\n self.refresh_jobs()\n # info(self.iface, \"New Job {}\".format(job_id))",
"async def log_detail(request, job_id=None, task_name=None, log_id=None):\n jobs = dagobah._serialize().get('jobs', {})\n job = [job for job in jobs if str(job['job_id']) == job_id][0]\n return template('log_detail.html',\n job=job,\n task_name=task_name,\n task=[task for task in job['tasks']\n if task['name'] == task_name][0],\n log_id=log_id)",
"def get_object(self) -> Job:\n project = ProjectPermissionsMixin.get_object(self)\n return project.jobs.get(id=self.kwargs[\"job\"])",
"def get_job(jid=None):\n if not jid:\n raise CommandExecutionError(\"ID option must not be none.\")\n\n query = {\"type\": \"op\", \"cmd\": \"<show><jobs><id>{}</id></jobs></show>\".format(jid)}\n\n return __proxy__[\"panos.call\"](query)",
"def get(ctx, job):\n\n def get_experiment():\n try:\n response = PolyaxonClient().experiment.get_experiment(user, project_name, _experiment)\n cache.cache(config_manager=ExperimentManager, response=response)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not load experiment `{}` info.'.format(_experiment))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n get_experiment_details(response)\n\n def get_experiment_job():\n try:\n response = PolyaxonClient().experiment_job.get_job(user,\n project_name,\n _experiment,\n _job)\n cache.cache(config_manager=ExperimentJobManager, response=response)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get job `{}`.'.format(_job))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n if response.resources:\n get_resources(response.resources.to_dict(), header=\"Job resources:\")\n\n response = Printer.add_status_color(response.to_light_dict(\n humanize_values=True,\n exclude_attrs=['uuid', 'definition', 'experiment', 'unique_name', 'resources']\n ))\n Printer.print_header(\"Job info:\")\n dict_tabulate(response)\n\n user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),\n ctx.obj.get('experiment'))\n\n if job:\n _job = get_experiment_job_or_local(job)\n get_experiment_job()\n else:\n get_experiment()",
"def get_job(self, job_reference):\n url = 'jobs/{0}'.format(job_reference)\n result = self.get(url)\n return result.get('job', result)",
"def get_job(self) -> Job:\n return self.jobs_list[self.sel_idx]",
"def get_job(self) -> Dict[Text, Text]:\n request = self._client.projects().jobs().get(name=self._job_name)\n return request.execute()",
"def get_job(self, job_id):\n\n try:\n exposure = Job.objects.filter(id=job_id)\n except:\n exposure = None\n\n return exposure",
"def show_job_details(self, _, id_):\n job = self.execution_manager.get(id_)\n if job is not None:\n self.details.original_widget = JobWidget(job) # use the job widget as the inner widget",
"def test_get_job(self):\n response = self.client.open(\n '/v1/job/{id}'.format(id='id_example'),\n method='GET',\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def main(self):\n\t\tprint \"Retreiving view 'All\",\n\t\tview_all = self.hudson.getViewByName('All')\n\t\tprint \"Done\"\n\t\tprint \"iterating over jobs\"\n\t\tfor job in view_all.jobs.values():\n\t\t\tviewname = job.name.split(\".\")[0]\n\t\t\tif job.name not in self.getJobListFromDB():\n\t\t\t\tself.addJobToDb(job.name)\n\t\t\tif viewname not in self.getViewListFromDB():\n\t\t\t\tself.addViewToDb(viewname)\n\t\t\tfor build in job.builds:\n\t\t\t\tbo = HudsonConnector.HudsonObject( self.hudson.getDataFromUrl(build['url']) )\n\t\t\t\tstamp = datetime.datetime.fromtimestamp(bo.timestamp/1000)\n\t\t\t\tif stamp > self.lastrun:\n\t\t\t\t\tif bo.result is None:\n\t\t\t\t\t\trunname = job.name+\" #%d\" % bo.number\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tprint runname.ljust(29), str(stamp).ljust(24), bo.result.capitalize()\n\t\t\t\t\t\texcept AttributeError:\n\t\t\t\t\t\t\tprint runname.ljust(29), str(stamp).ljust(24), \"Unknown\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tjobdata = { 'name':job.name, 'view':job.name.split(\".\")[0], 'start':stamp, \n\t\t\t\t\t\t\t\t\t'end':stamp + datetime.timedelta(seconds=bo.duration),\n\t\t\t\t\t\t\t\t\t'duration':bo.duration,\n\t\t\t\t\t\t\t\t\t'result':bo.result\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\tself.uploadJobState(jobdata)\n\t\tself.saveState()",
"def job_by_id(self, job_id):\n response = self._session.get(\n path='{base_api}/jobs/{job_id}.xml'.format(\n base_api=self.base_api,\n job_id=job_id\n ),\n headers={'Accept': 'application/xml'},\n )\n\n return response.text",
"def job(username, root_wf_id, wf_id, job_id, job_instance_id):\n dashboard = Dashboard(g.master_db_url, root_wf_id, wf_id)\n job = dashboard.get_job_information(wf_id, job_id, job_instance_id)\n job_states = dashboard.get_job_states(wf_id, job_id, job_instance_id)\n job_instances = dashboard.get_job_instances(wf_id, job_id)\n\n previous = None\n\n for state in job_states:\n timestamp = state.timestamp\n state.timestamp = datetime.fromtimestamp(state.timestamp).strftime('%a %b %d, %Y %I:%M:%S %p')\n\n if previous is None:\n state.interval = 0.0\n else:\n state.interval = timestamp - previous\n\n previous = timestamp\n\n if not job:\n return 'Bad Request', 400\n\n return render_template('workflow/job/job_details.html', root_wf_id=root_wf_id, wf_id=wf_id, job_id=job_id, job=job,\n job_instances=job_instances, job_states=job_states)",
"def all_jobs():\n\n jobs = Job.get_all()\n\n oneoffs = OneOff.get_all()\n\n job = JobView(None, jobs, oneoffs, False, Job.count() > 0)\n\n add_trello_task_links_to_g()\n\n return render_template(\"jobs.template.html\", page_title=\"Jobs\", job_info=job)",
"def job(job_name):\n ClientID = Job.get_client_id(job_name)\n return tasks_for_client_job(ClientID, job_name)",
"def get_job_context(self, request, job_name, object_id, view_name):\n\n info = self.model._meta.app_label, self.model._meta.model_name\n preview = self.is_preview_run_view(view_name)\n request.current_app = self.admin_site.name\n\n context = dict(\n self.admin_site.each_context(request),\n opts=self.model._meta,\n app_label=self.model._meta.app_label,\n title=self.get_job_title(job_name),\n job_name=job_name,\n view_name=view_name,\n form_view=FORM_VIEW,\n preview_run_view=PREVIEW_RUN_VIEW,\n main_run_view=MAIN_RUN_VIEW,\n complete_view=COMPLETE_VIEW,\n form_data_list=self.get_session_form_data_as_list(request, job_name),\n form_data_dict=self.get_session_form_data_as_dict(request, job_name),\n preview=preview,\n job_media=self.get_job_media(job_name, request=request, object_id=object_id, view_name=view_name),\n )\n if django.VERSION > (1, 8):\n jquery = static('admin/js/vendor/jquery/jquery.min.js')\n else:\n jquery = static('admin/js/jquery.min.js')\n context['jquery'] = jquery\n if object_id:\n try:\n obj = self.model.objects.get(pk=object_id)\n context['original'] = obj\n context['original_change_url'] = reverse(\n 'admin:%s_%s_change' % info, args=[object_id], current_app=self.admin_site.name\n )\n except:\n pass\n else:\n context['original_changelist_url'] = reverse(\n 'admin:%s_%s_changelist' % info, current_app=self.admin_site.name\n )\n if view_name in (PREVIEW_RUN_VIEW, MAIN_RUN_VIEW):\n job_status = self.get_session_job_status(request, job_name, view_name)\n if job_status is None:\n # job_status is None when no job has been started\n job_callable = self.get_job_callable(job_name, preview, request=request, object_id=object_id,\n view_name=view_name)\n if callable(job_callable):\n job_status = JobStatus()\n job_status.save()\n self.set_session_job_status(request, job_name, job_status, view_name)\n context.update({\n 'job_status': job_status,\n 'job_status_url': job_status.url() # The frontend starts polling the status url if it's present\n })\n job_callable.delay(\n job_status,\n self.get_session_form_data_as_dict(request, job_name),\n self.get_job_callable_extra_context(request, job_name, preview, object_id)\n )\n else:\n context['job_status'] = job_status\n # do not set job_status_url in this case otherwise it'll be an endless redirect loop\n\n if COMPLETE_VIEW in self.get_workflow_views(job_name):\n context['complete_view_url'] = self.get_workflow_url(COMPLETE_VIEW, job_name, object_id)\n else:\n context['complete_view_url'] = None\n return context",
"def get_object(self, queryset=None):\n # 404 if job doesn't exist\n try:\n job = Job.objects.select_related().get(pk=self.kwargs['pk'])\n except Job.DoesNotExist:\n raise Http404(\"No Job with PK#{} found.\".format(self.kwargs['pk']))\n\n # Staff can see all jobs\n if self.request.user.is_staff:\n return job\n\n # Creator can see their own jobs no matter the status\n if job.creator == self.request.user:\n return job\n\n # For everyone else the job needs to be visible\n if job.visible:\n return job\n\n # Return None to signal 401 unauthorized\n return None",
"def current_job(user):\n logs = user.log_set.filter(finish__isnull=True)[:1]\n if logs:\n log = logs[0]\n result = LabelResponse(log.job.name,\n log.get_duration_display())\n else:\n log = user.log_set.latest()\n result = LabelResponse('Not Working',\n str(timezone.localtime(log.start).date()))\n\n return result",
"def test_job_id(self):\n\n url = '/%s/jobs/?job_id=%s' % (self.api, self.job1.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['id'], self.job1.id)",
"def retrieve_job(self, job_id) -> AzureQuantumJob:\n return self._provider.get_job(job_id)"
] | [
"0.69182616",
"0.6617653",
"0.63476014",
"0.62747604",
"0.62315136",
"0.60969406",
"0.6079476",
"0.5952673",
"0.58088917",
"0.5790628",
"0.5686428",
"0.5582579",
"0.5581716",
"0.55650765",
"0.5540786",
"0.55388176",
"0.5536627",
"0.55236",
"0.55182064",
"0.5472965",
"0.5446276",
"0.54420376",
"0.5433372",
"0.53749305",
"0.53718615",
"0.5367174",
"0.53542405",
"0.5344823",
"0.53343487",
"0.5332826"
] | 0.7312882 | 0 |
List all tilesets for an account. By default the response is a simple list of tileset IDs. If you would like an array of all tileset's information, use the versbose flag. tilests list | def list(username, verbose, token=None, indent=None):
mapbox_api = _get_api()
mapbox_token = _get_token(token)
url = "{0}/tilesets/v1/{1}?access_token={2}".format(
mapbox_api, username, mapbox_token
)
r = requests.get(url)
if r.status_code == 200:
if verbose:
for tileset in r.json():
click.echo(json.dumps(tileset, indent=indent))
else:
for tileset in r.json():
click.echo(tileset["id"])
else:
raise errors.TilesetsError(r.text) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fixture_tile_list():\n return {\n \"version\": 1,\n \"revision\": 1,\n \"timestamp\": \"2018-06-19T23:04:32.442Z\",\n \"timestamp_ms\": 1529449472442,\n \"result_code\": 0,\n \"result\": [\n {\n \"tileType\": \"TILE\",\n \"user_uuid\": TILE_USER_UUID,\n \"tile_uuid\": TILE_TILE_UUID,\n \"other_user_uuid\": \"\",\n \"other_user_email\": TILE_EMAIL,\n \"mode\": \"OWNER\",\n \"last_modified_timestamp\": 1482711833985,\n }\n ],\n }",
"def tileslist(self, bbox, zoomlevels, tms_scheme=False):\n proj = GoogleProjection(self.tile_size, zoomlevels, tms_scheme)\n return proj.tileslist(bbox)",
"def get_tiles():\n\t\t\n\tcursor = get_cursor()\n\t\n\tcursor.execute(\"SELECT * FROM fitmeimages ORDER BY shade ASC, id ASC\")\n\treturn cursor.fetchall();",
"def status(tileset, token=None, indent=None):\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/{1}/status?access_token={2}\".format(\n mapbox_api, tileset, mapbox_token\n )\n r = requests.get(url)\n\n click.echo(json.dumps(r.json(), indent=indent))",
"def list_sources(username, token=None):\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/sources/{1}?access_token={2}\".format(\n mapbox_api, username, mapbox_token\n )\n r = requests.get(url)\n if r.status_code == 200:\n for source in r.json():\n click.echo(source[\"id\"])\n else:\n raise errors.TilesetsError(r.text)",
"def view_source(username, id, token=None, indent=None):\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/sources/{1}/{2}?access_token={3}\".format(\n mapbox_api, username, id, mapbox_token\n )\n r = requests.get(url)\n if r.status_code == 200:\n click.echo(json.dumps(r.json(), indent=indent))\n else:\n raise errors.TilesetsError(r.text)",
"def view_recipe(tileset, token=None, indent=None):\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/{1}/recipe?access_token={2}\".format(\n mapbox_api, tileset, mapbox_token\n )\n r = requests.get(url)\n if r.status_code == 200:\n click.echo(json.dumps(r.json(), indent=indent))\n else:\n raise errors.TilesetsError(r.text)",
"def tile_set():\n TILES = {\n \"ocean\":\"~\"\n ,\"rock\":\"R\"\n ,\"mountain\":\"M\"\n ,\"player\":\"X\"\n ,\"end\":\"⋆\"\n ,\"npc\":\"I\"\n ,\"cave\":\"C\"\n ,\"dirt\":\"+\"\n ,\"sign\":\"!\"\n }\n\n return TILES",
"def tileslist(self, bbox, zoomlevels, tms_osm=False):\n mercator = GlobalMercator(tms_osm,self.tile_size,zoomlevels)\n return mercator.tileslist(bbox)",
"def get_tiles(self) -> list:\n n_rows = self.mosaic_dimensions[0]\n n_columns = self.mosaic_dimensions[1]\n return [\n self.get_tile(i_row, i_column)\n for i_row in range(n_rows)\n for i_column in range(n_columns)\n ]",
"async def get_tile_cache_preview(\n *, request: Request, dataset: str, version: str, implementation\n):\n\n tile_caches = get_dataset_tile_caches(dataset, version, implementation)\n sources = {\n \"carto-dark\": {\n \"type\": \"raster\",\n \"tiles\": [\n \"https://a.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png\",\n \"https://b.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png\",\n \"https://c.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png\",\n \"https://d.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png\",\n ],\n },\n }\n\n layers = [\n {\n \"id\": \"carto-dark-layer\",\n \"type\": \"raster\",\n \"source\": \"carto-dark\",\n \"minzoom\": 0,\n \"maxzoom\": 22,\n },\n ]\n for tile in tile_caches:\n if tile[\"asset_type\"] == \"Static vector tile cache\":\n try:\n style_specs = await get_static_vector_tile_cache_style_spec(tile)\n except ClientError:\n style_specs = get_default_style_spec(tile)\n else:\n style_specs = get_default_style_spec(tile)\n\n layers = [*layers, *style_specs[\"layers\"]]\n sources[dataset] = style_specs[\"sources\"][dataset]\n\n if len(layers) == 1:\n raise HTTPException(\n status_code=404, detail=\"No tile caches available for this dataset.\"\n )\n\n return templates.TemplateResponse(\n \"tile_preview.html\",\n context={\"sources\": sources, \"layers\": layers, \"request\": request},\n )",
"async def get(self, server_name_id):\n server_id, server_name = super().get_id_name(server_name_id)\n results = lkp.Lookups.get_metricsetlist(server_id, server_name)\n super().get_results_json(results)\n return",
"def find_tiles(self):\n lat1, lat2 = self.bbox.south, self.bbox.north\n lon1, lon2 = self.bbox.west, self.bbox.east\n # convert to geographic bounding box\n minlat, minlon = min(lat1, lat2), min(lon1, lon2)\n maxlat, maxlon = max(lat1, lat2), max(lon1, lon2)\n\n # convert to tile-space bounding box\n _, xmin, ymin = self.mercator(maxlat, minlon, self.zoom)\n _, xmax, ymax = self.mercator(minlat, maxlon, self.zoom)\n\n # generate a list of tiles\n xs, ys = range(xmin, xmax + 1), range(ymin, ymax + 1)\n tile_list = [(self.zoom, x, y) for (y, x) in product(ys, xs)]\n\n return tile_list",
"def list_servers(self, all_tenants=False):\n _url = \"http://\" + self.host_ip + \":8774/v2/\" + \\\n self.project_info[\"project_id\"] + \"/servers/detail\"\n if all_tenants:\n _url = \"http://\" + self.host_ip + \":8774/v2/\" + self.project_info[\n \"project_id\"] + \"/servers/detail?all_tenants=1\"\n _headers = {'x-auth-token': self.project_info[\"token_project\"],\n 'content-type': 'application/json'}\n _body = None\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from server while listing servers.\")\n return\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"List servers Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Servers List :%s \" % output)\n return output[\"servers\"]",
"def get_test_sets(self, cluster_id):\n return self._client.get(\n url=\"/testsets/{}\".format(cluster_id),\n ).json()",
"def listSets(*args, allSets: bool=True, extendToShape: bool=True, object: name=None, type:\n int=0, **kwargs)->List[AnyStr]:\n pass",
"def show_all(self, limit=50, offset=0, order=[], details_level=''):\n return self.__common_client._show_all('show-access-layers', limit=limit,\n offset=offset, order=order, details_level=details_level)",
"def get_all(self):\n\n servers = self._scoped_servers()\n servers = [{u'id': x.id, u'name': x.name} for x in servers]\n return self.format_collection(servers)",
"def get(self):\n all_suites = [s.to_dict() for s in TestSuiteModel.get_list()]\n return flask.Response(json.dumps(all_suites), mimetype=\"application/json\")",
"def results(self):\n self.set_limit()\n\n # always get the latest data\n uuids = ITileDataManager(self).get().get(\"uuids\", None)\n\n results = list()\n if uuids:\n ordered_uuids = [(k, v) for k, v in uuids.items()]\n ordered_uuids.sort(key=lambda x: int(x[1][\"order\"]))\n\n for uuid in [i[0] for i in ordered_uuids]:\n obj = uuidToObject(uuid)\n if obj:\n results.append(obj)\n else:\n # maybe the user has no permission to access the object\n # so we try to get it bypassing the restrictions\n catalog = api.portal.get_tool(\"portal_catalog\")\n brain = catalog.unrestrictedSearchResults(UID=uuid)\n if not brain:\n # the object was deleted; remove it from the tile\n self.remove_item(uuid)\n logger.debug(\n \"Non-existent object {0} removed from tile\".format(uuid)\n )\n return results[: self.limit]",
"def lattice_tiles(\n self,\n lattice: AbstractLattice | int | str | np.ndarray,\n *,\n x: int | slice | None = None,\n y: int | slice | None = None,\n copy: bool = False,\n ) -> list[Tile]:\n\n if isinstance(lattice, (int, str)):\n lattice = cast(AbstractLattice, self.lattices[lattice])\n elif not isinstance(lattice, AbstractLattice):\n lattice = AbstractLattice(lattice)\n\n tilenames = np.unique(lattice.grid[x, y])\n\n if copy:\n return [self.tiles[t].copy() for t in tilenames]\n else:\n return [self.tiles[t] for t in tilenames]",
"def tile_list(tilefile):\n\t\n\ttf=file(tilefile,\"r\")\n\t\n\ttd=pickle.load(tf)\n\n\ttf.close()\n\treturn td",
"def get(self):\n suites = SuiteProvider.get_list(TestSuiteModel)\n return flask.Response(json.dumps(suites), mimetype=\"application/json\")",
"def list(config, username, hostname):\n if (not username and not hostname) or (username and hostname):\n print 'Usage: igor permissions list [OPTIONS]'\n print\n print 'Error: Exactly one of --username or --hostname is required.'\n exit()\n\n if username:\n response = make_api_request('GET', config, '/users/' + username +\n '/machines')\n machines = response.json()['machines']\n for machine in machines:\n print machine['hostname']\n elif hostname:\n response = make_api_request('GET', config, '/machines/' + hostname +\n '/users')\n users = response.json()['users']\n for user in users:\n print user['username']",
"def teams():\n print 'Getting Teams'\n\n substring = \"%\" + request.args.get('t') + \"%\"\n\n team_list = datastore.get_teams_typeahead(engine, substring, max_teams=10)\n\n print 'Teams:', team_list\n return jsonify(team_list)",
"def view(self, screen=None):\n r = requests.get(\"{}{}/view\".format(self.api,\n screen))\n\n return template(\"all_screens.tpl\", screens=self.screens)",
"def get_flagged_tile_list ( self ) :\n tile_list = []\n stmt = \"select name from sdb_product where sys003 =\\'T\\'\"\n self.oracle_cursor.arraysize = 100000\n self.oracle_cursor.execute(stmt)\n resultset = self.oracle_cursor.fetchmany()\n if resultset :\n for row in resultset :\n tile_list.append(str(row[0]))\n return tile_list",
"def get_tiles_from_server(self, variants, server):\n def request_and_crop(zoom, x, y):\n _x = int(math.floor(x))\n _y = int(math.floor(y))\n\n x_mod = 0.5 - (x - _x) #How does this desviates from 0.5\n y_mod = 0.5 - (y - _y) \n\n if x_mod > 0:\n x_start = _x - 1 #1 tile before\n start_xpixel = int(math.floor((1-x_mod)*256))\n else:\n x_start = _x\n start_xpixel = int(math.floor(-1*x_mod*256))\n if y_mod > 0:\n y_start = _y - 1 #1 tile before\n start_ypixel = int(math.floor((1-y_mod)*256))\n else:\n y_start = _y\n start_ypixel = int(math.floor(-1*y_mod*256))\n\n tile = np.zeros((256*2, 256*2, 3), dtype= 'uint8')\n for x in range(2):\n for y in range(2):\n url = 'http://localhost:8080/{}/{}/{}.png'.format(zoom, x_start + x, y_start + y)\n resp = urlopen(url)\n image = np.asarray(bytearray(resp.read()), dtype=\"uint8\")\n image = cv2.imdecode(image, cv2.IMREAD_COLOR)\n tile[256*y:256*(y+1), 256*x:256*(x+1),...] = image\n tile = tile[start_ypixel:start_ypixel+256,start_xpixel:start_xpixel+256]\n return tile\n tiles = []\n for _ in range(variants):\n zoom = random.randint(19,21)\n x, y = self.getXY(zoom) \n tile = request_and_crop(zoom, x, y)\n tile = cv2.resize(tile, (self.width, self.height))\n tiles.append(tile)\n tiles = np.stack(tiles)\n return tiles",
"def test_response_for_getting_all_users(self):\n response = self.client.get(\"/team/all/\", format='json')\n self.assertEqual(response.status_code, 200)",
"def get_teamsets(self, course_id):\n team_configuration = self.get_team_configuration(course_id)\n if not team_configuration:\n return None\n return team_configuration.teamsets"
] | [
"0.6313657",
"0.5784542",
"0.5680937",
"0.56709623",
"0.5668289",
"0.56286174",
"0.56016797",
"0.55826",
"0.5550821",
"0.5427271",
"0.535748",
"0.5275198",
"0.52157974",
"0.5204004",
"0.5186647",
"0.51531774",
"0.5109934",
"0.51063216",
"0.5066046",
"0.5051925",
"0.50334436",
"0.5028116",
"0.50141823",
"0.5011764",
"0.500067",
"0.49964827",
"0.4978185",
"0.49668458",
"0.49412677",
"0.49334437"
] | 0.7860446 | 0 |
Validate a Recipe JSON document tilesets validaterecipe | def validate_recipe(recipe, token=None, indent=None):
mapbox_api = _get_api()
mapbox_token = _get_token(token)
url = "{0}/tilesets/v1/validateRecipe?access_token={1}".format(
mapbox_api, mapbox_token
)
with open(recipe) as json_recipe:
recipe_json = json.load(json_recipe)
r = requests.put(url, json=recipe_json)
click.echo(json.dumps(r.json(), indent=indent)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validate_recipe(environ, recipe):\n try:\n validate_recipe(recipe, environ)\n except InvalidBagError as exc:\n raise HTTP409('Recipe content is invalid: %s' % exc)",
"def check_recipe(recipe, data_directory=None):\n # check recipe is a dictionary\n if not isinstance(recipe, dict):\n raise Exception(\"The recipe is not valid. It should be a dictionary.\")\n\n # check the filename pattern\n if \"pattern\" not in recipe:\n raise ValueError(\"A recipe should have a filename pattern \"\n \"('pattern' keyword).\")\n recipe_pattern = recipe[\"pattern\"]\n if not isinstance(recipe_pattern, str):\n raise ValueError(\"'pattern' should be a string, not a {0}.\"\n .format(type(recipe_pattern)))\n\n # count the different dimensions to combinate in the recipe (among\n # 'fov', 'r', 'c' and 'z')\n dimensions = re.findall(\"fov|r|c|z\", recipe_pattern)\n\n # each dimension can only appear once in the filename pattern\n if len(dimensions) != len(set(dimensions)):\n raise ValueError(\"The pattern used in recipe is wrong, a dimension \"\n \"appears several times: {0}\".format(recipe_pattern))\n\n # check keys and values of the recipe\n for key, value in recipe.items():\n if key not in ['fov', 'r', 'c', 'z', 'ext', 'opt', 'pattern']:\n raise ValueError(\"The recipe can only contain the keys 'fov', \"\n \"'r', 'c', 'z', 'ext', 'opt' or 'pattern'. \"\n \"Not '{0}'.\".format(key))\n if not isinstance(value, (list, str)):\n raise TypeError(\"A recipe can only contain lists or strings, \"\n \"not {0}.\".format(type(value)))\n\n # check that requested files exist\n if data_directory is not None:\n if not os.path.isdir(data_directory):\n raise ValueError(\"Directory does not exist: {0}\"\n .format(data_directory))\n recipe = fit_recipe(recipe)\n nb_r, nb_c, nb_z = get_nb_element_per_dimension(recipe)\n nb_fov = count_nb_fov(recipe)\n for fov in range(nb_fov):\n for r in range(nb_r):\n for c in range(nb_c):\n for z in range(nb_z):\n path = get_path_from_recipe(recipe, data_directory,\n fov=fov, r=r, c=c, z=z)\n if not os.path.isfile(path):\n raise ValueError(\"File does not exist: {0}\"\n .format(path))\n\n return",
"def check_mmum_recipe(recipe):\n\n if recipe[\"Maischform\"] != \"infusion\":\n print(\"[W] Only infusion is supported...\")\n return False\n\n single = [\"Infusion_Hauptguss\", \"Infusion_Einmaischtemperatur\",\"Abmaischtemperatur\",\"Kochzeit_Wuerze\",\"Nachguss\",\"Hefe\",\"Gaertemperatur\"]\n for k in single:\n try:\n _=recipe[k]\n except KeyError:\n print(f\"[E] invalid recipe. This field missed: {k}\")\n return False\n\n \"\"\" This is because this json array is soo stupid -.- \"\"\"\n cnt={\n 'malz':0,\n 'rast':0,\n 'hopfen_vwh':0,\n 'hopfen':0,\n 'extra_ingredient':0,\n 'hopfen_stopf':0,\n 'extra_gaerung':0,\n }\n for k in recipe:\n key = k.split('_') \n if k[:-1] == \"Malz\":\n cnt['malz'] += 1\n elif k[:17] == \"Infusion_Rastzeit\":\n cnt['rast'] += 1 \n elif k[:6] == \"Hopfen\": \n if len(key) == 3:\n if key[2] == \"Sorte\":\n cnt['hopfen'] += 1\n elif len(key) == 4:\n if key[3] == \"Sorte\":\n cnt['hopfen_vwh'] += 1\n elif k[:19] == \"WeitereZutat_Wuerze\": \n if k.split('_')[3] == \"Name\":\n cnt['extra_ingredient'] += 1\n elif key[0] == \"Stopfhopfen\":\n if key[2] == \"Sorte\":\n cnt['hopfen_stopf'] += 1\n elif key[0] == \"WeitereZutat\":\n if key[3] == \"Name\":\n cnt['extra_gaerung'] += 1\n \n if not cnt['hopfen'] or not cnt['malz'] or not cnt['rast']:\n print(f\"[E] invalid recipe, no counter of cnt: {cnt}\")\n return False\n\n return cnt",
"def validate_form(form, collection):\r\n\r\n # variable initialization\r\n max_title = 50\r\n max_ingredients = 500\r\n max_method = 1500\r\n max_recipe_URL = 250\r\n max_servings = 100\r\n max_category_name = 50\r\n max_category_URL = 250\r\n max_review = 250\r\n error_list = []\r\n\r\n # validates recipe form\r\n if collection == 'recipe':\r\n if not form['title'] or len(form['title']) > max_title:\r\n error_list.append(\r\n 'Title must not be empty or more than {} characters!'\r\n .format(max_title)\r\n )\r\n\r\n ingredient = form['ingredients']\r\n if not ingredient or len(ingredient) > max_ingredients:\r\n error_list.append(\r\n 'Ingredients must not be empty or more than {} characters!'\r\n .format(max_ingredients)\r\n )\r\n\r\n if not form['method'] or len(form['method']) > max_method:\r\n error_list.append(\r\n 'Method must not be empty or more than {} characters!'\r\n .format(max_method)\r\n )\r\n\r\n if 'appliance_categories' not in form:\r\n error_list.append(\r\n 'At least one of the appliances should be checked!'\r\n )\r\n\r\n if not form['img_link'] or len(form['img_link']) > max_recipe_URL:\r\n error_list.append(\r\n 'Image URL must not be empty or more than {} characters!!'\r\n .format(max_recipe_URL)\r\n )\r\n\r\n try:\r\n if not form['servings'] or int(form['servings']) > max_servings:\r\n error_list.append(\r\n 'Servings must not be empty or more than {}!'\r\n .format(max_servings)\r\n )\r\n\r\n except ValueError:\r\n error_list.append('Servings is not a number!')\r\n\r\n # validates recipe category form\r\n elif collection == 'recipe_category':\r\n if not form['name'] or len(form['name']) > max_category_name:\r\n error_list.append(\r\n 'Category name must not be empty or more than {} characters!'\r\n .format(max_category_name)\r\n )\r\n\r\n if not form['img_link'] or len(form['img_link']) > max_category_URL:\r\n error_list.append(\r\n 'Image URL must not be empty or more than {} characters!'\r\n .format(max_category_URL)\r\n )\r\n\r\n # validates review form\r\n elif collection == 'review':\r\n if not form['review'] or len(form['review']) > max_review:\r\n error_list.append(\r\n 'Review must not be empty or more than {} characters!'\r\n .format(max_review)\r\n )\r\n\r\n # returns errors on an empty list\r\n return error_list",
"def _validate(self):\n All = voluptuous.All\n Required = voluptuous.Required\n Length = voluptuous.Length\n Extra = voluptuous.Extra\n\n schema = voluptuous.Schema({\n Required('description'): voluptuous.All(str, Length(min=5)),\n Required('environments'): dict,\n Required('application'): {\n Required('name'): str,\n Required('scenario'): [{\n Required('driver'): str,\n Required('description'): All(str, Length(min=5)),\n Extra: object}]}})\n try:\n schema(self.marmite_tree)\n except voluptuous.MultipleInvalid as e:\n LOG.error(\"Failed to validate %s/marmite.yaml structure: %s\" %\n (self.fs_layer.base_dir, e))\n raise InvalidStructure()",
"def test_metadata_schema_json_valid(mock_irods):\n\n schema_file_path = 'pytest/assets/mi_schema.json'\n with open(schema_file_path, 'r') as file_obj:\n json_schema = file_obj.read()\n assert len(json_schema) > 0\n form_data = {\"mp_program_type\": \"Test Model Program\", \"mi_json_schema\": json_schema}\n metadata_validation_form = ModelProgramMetadataValidationForm(data=form_data)\n assert metadata_validation_form.is_valid()",
"def validate_datasets(row):\n data_validator = DataJSONDataset(row)\n valid = data_validator.validate(validator_schema=row['validator_schema'])\n errors = data_validator.errors\n row['validation_errors'] = errors\n if not valid:\n logger.error(f'Error validating {row}: {errors}')",
"def check_recipes(self):\n\n self.recipe = None\n\n for recipe in all_recipes:\n if recipe.matches(self.crafting, self.crafting_stride):\n self.recipe = recipe",
"def validate_json(self):\n pass",
"def validate(self, record):\n\n self.logger.debug(\"Validating %s\" % record[\"url\"])\n\n # Remove empty fields\n for field in list(record.keys()):\n if record[field] in [ None, \"\", [ ], { } ]:\n del record[field]\n\n # Check for missing fields\n missing = [ field for field in self.required_fields if field not in record.keys() ]\n if len(missing) > 0:\n self.logger.warn(\"recipe in %s: missing %s\" % (record[\"url\"], \", \".join(missing)))\n return False\n\n return True",
"def validate(self, json_data):\n self._errors = None\n success = True\n for item in self._schema:\n if not item.validate(json_data):\n success = False\n\n return success",
"def test_case_3(self):\n with open(f'{TEST_DATA_DIR}/r1.json') as file:\n data = json.load(file)\n self.assertIsInstance(data, dict)\n\n task_1 = Task.new(data=data)\n self.assertTrue(task_1.validate())\n\n with self.assertRaises(GCGValidationError):\n task_2 = Task.new(data={'data': 'bad_data'})",
"def _validate_json(self):\n # Do we find valid json?\n try:\n with open(self.batch_json_path, \"rb\") as fd:\n batch_json = json.loads(fd.read())\n\n except Exception as err:\n raise\n self.message(\n \"[-] Error reading JSON batch file '%s' : '%s'\" %\n (self.batch_json_path, err))\n return False\n\n # Does the json represent a dictionary of the expected form?\n if not isinstance(batch_json, types.DictionaryType):\n self.message(\n \"[-] JSON batch file '%s' deserialises to unexpected object type '%s'\" %\n (self.batch_json_path, type(batch_json)))\n return False\n\n # If it is a dictionary does it have the expected characteristics?\n for endpoint, sys_info in batch_json.items():\n\n # Endpoint should be a hostname, IP or some other string\n # identifier, difficult to validate much beyond 'string'\n if type(endpoint) not in [types.StringType, types.UnicodeType]:\n self.message(\n \"[-] Element within JSON batch file '%s' conatins unexpected object type for an endpoint element '%s'. %s : %s\" %\n (self.batch_json_path, type(endpoint), endpoint, sys_info))\n return False\n\n # Does the sys_info dict contain the expected keys?\n if set(sys_info.keys()).symmetric_difference(\n set(self.json_batch_template)):\n self.message(\n \"[-] Unexpected sys_info structure within JSON batch file %s, expected keys '%s' %s : %s\" %\n (self.batch_json_path, self.json_batch_template, endpoint, sys_info))\n return False\n\n # Create a psuedononymised hash of the uuid using MAC addr as salt\n mac_repr = \"0x\" + sys_info[\"mac_addr\"].lower().replace(\":\", \"\")\n sys_info[\"hashed_uuid\"] = hashlib.sha256(\n mac_repr + sys_info[\"sys_uuid\"]).hexdigest()\n\n # Remove both the real sys_uuid and the mac_addr from the structure so they do not get submitted to the API\n # and remain confidential to the submitter\n del sys_info[\"sys_uuid\"]\n del sys_info[\"mac_addr\"]\n\n # Set the read in json structure as the structure of system data to\n # walk and send to the API\n self.endpoints_to_check = batch_json\n\n self.message(\"[+] Batch JSON file validated\")\n return True",
"def validate_input(json_object):\n try:\n if type(json_object) is not list:\n return False\n for machine_config in json_object:\n if (type(machine_config[\"ip\"]) is not str) or not validate_ip(machine_config[\"ip\"]):\n return False\n if type(machine_config[\"community\"]) is not str:\n return False\n if type(machine_config[\"config\"]) is not list:\n return False\n for actual_config in machine_config[\"config\"]:\n if (type(actual_config[\"segment\"]) is not int) or not validate_segment(actual_config[\"segment\"]):\n return False\n if type(actual_config[\"ports\"]) is not list:\n return False\n for actual_port in actual_config[\"ports\"]:\n if (type(actual_port) is not int) or not validate_port(actual_port):\n return False\n except KeyError as ke:\n # Formato incorrecto debido a que algun campo no existe\n return False\n # Todos los campos existen y estan bien\n return True",
"def test_recipe_daylight_factor_gridbased_post(self):\n recipe = DaylightFactorGridBasedSchema()\n response = self.client.open(\n '/api/recipe/daylight_factor/gridbased',\n method='POST',\n data=json.dumps(recipe),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def _check_recipes(self, recipes):\n\n ret = {}\n if type(recipes) is not dict:\n print(\"Error: recipes is not type 'dict'!\")\n return ret\n\n for (recipe, flavors) in recipes.items():\n if type(flavors) is not dict:\n print(\"Error: recipe %s does not contain a dict of flavors\"%recipe)\n continue\n ret[recipe] = {}\n for (flav, amount) in flavors.items():\n if type(amount) is not int and type(amount) is not float:\n print(\"Error: flavor %s has non-numeric amount: %s\"%(flav, amount))\n continue\n # always assume percent\n amount = amount / 100.0\n ret[recipe][flav] = amount\n\n return ret",
"def fit_recipe(recipe):\n # initialize recipe\n new_recipe = copy.deepcopy(recipe)\n\n # initialize and fit the dimensions 'fov', 'r', 'c' and 'z'\n for key in ['fov', 'r', 'c', 'z']:\n if key not in new_recipe:\n new_recipe[key] = [None]\n value = new_recipe[key]\n if isinstance(value, str):\n new_recipe[key] = [value]\n\n # initialize the dimensions 'ext', 'opt'\n for key in ['ext', 'opt']:\n if key not in new_recipe:\n new_recipe[key] = \"\"\n\n return new_recipe",
"def _validate(self):\n if not self._contents.has_key('type'):\n raise ValidationFailed(\"Metadata file %s contains no type field\" % (self._filename))\n \n if not self._contents.has_key('version'):\n raise ValidationFailed(\"Metadata file %s contains no version field\" %\n (self._filename))",
"def _rec_is_template_valid(template: JSONDict, *, address: Tuple = ()) -> List[Error]:\n\n errors = []\n\n keywords = template[\"keywords\"] if \"keywords\" in template.keys() else []\n for k in keywords:\n errs = _check_keyword(k, address=address)\n errors.extend(errs)\n\n sections = template[\"sections\"] if \"sections\" in template.keys() else []\n for s in sections:\n if _undocumented(s):\n errors.append(\n Error(\n (address + (s[\"name\"],)),\n \"Sections must have a non-empty docstring.\",\n )\n )\n errs = _rec_is_template_valid(s, address=(address + (s[\"name\"],)))\n errors.extend(errs)\n\n return errors",
"def is_review_body_valid(serializer: ReviewSerializer):\n serializer.is_valid(raise_exception=True)",
"def validate():",
"def check_valid_schema(context):\n data = context.response.json()\n validate_schema(data)",
"def validate(cls, data, errors):",
"def validate_tileset_id(tileset_id):\n pattern = r\"^[a-z0-9-_]{1,32}\\.[a-z0-9-_]{1,32}$\"\n\n return re.match(pattern, tileset_id, flags=re.IGNORECASE)",
"def validate(self, config_json):\n pass",
"def test_recipe_valid(recipe_file, config_user, monkeypatch):\n # Mock input files\n find_files = create_autospec(esmvalcore._data_finder.find_files,\n spec_set=True)\n find_files.side_effect = lambda *_, **__: [\n 'test_0000-1849.nc',\n 'test_1850-9999.nc',\n ]\n monkeypatch.setattr(esmvalcore._data_finder, 'find_files', find_files)\n\n # Mock vertical levels\n levels = create_autospec(esmvalcore._recipe.get_reference_levels,\n spec_set=True)\n levels.side_effect = lambda *_, **__: [1, 2]\n monkeypatch.setattr(esmvalcore._recipe, 'get_reference_levels', levels)\n\n # Mock valid NCL version\n ncl_version = create_autospec(esmvalcore._recipe_checks.ncl_version,\n spec_set=True)\n monkeypatch.setattr(esmvalcore._recipe_checks, 'ncl_version', ncl_version)\n\n # Mock interpreters installed\n def which(executable):\n if executable in ('julia', 'ncl', 'python', 'Rscript'):\n path = '/path/to/' + executable\n else:\n path = None\n return path\n\n monkeypatch.setattr(esmvalcore._task, 'which', which)\n\n # Create a shapefile for extract_shape preprocessor if needed\n recipe = yaml.safe_load(recipe_file.read_text())\n for preproc in recipe.get('preprocessors', {}).values():\n extract_shape = preproc.get('extract_shape')\n if extract_shape and 'shapefile' in extract_shape:\n filename = Path(\n config_user['auxiliary_data_dir']) / extract_shape['shapefile']\n filename.parent.mkdir(parents=True, exist_ok=True)\n filename.touch()\n\n esmvalcore._recipe.read_recipe_file(recipe_file, config_user)",
"def validate_dataset(self):\n pass",
"def test_create_recipe_with_ingredients(self):\n\n payload = {\n 'name': 'Gnocchi',\n 'description': 'A detailed description of a yummy recipe!',\n 'ingredients': [\n {'name': 'Potatoes'},\n {'name': 'Flour'},\n {'name': 'Nutmeg'}\n ]\n }\n\n res = self.client.post(RECIPES_URL, payload, format='json')\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n recipe = Recipe.objects.get(id=res.data['id'])\n\n self.assertEqual(payload['name'], recipe.name)\n self.assertEqual(payload['description'], recipe.description)\n self.assertEqual(recipe.ingredients.count(), 3)\n self.assertEqual(recipe.ingredients.first().name, 'Potatoes')",
"def test__try_run_rest(self):\n\n with self.assertRaises(ValueError) as error:\n self.client.data_object.reference._try_run_rest(\"\", \"\", \"\", \"validate\")\n check_error_message(self, error, \"'validate' not supported!\")",
"def test_create_basic_recipe(self):\n payload = {\"title\": \"Vietnamese Cake\",\n \"time_minutes\": 45,\n \"price\": 5.55}\n res = self.client.post(RECIPE_URL, payload)\n recipe = Recipe.objects.get(id=res.data['id'])\n for key in payload.keys():\n if key == \"price\":\n self.assertEqual(round(Decimal(payload[key]), 2), getattr(recipe, key))\n else:\n self.assertEqual(payload[key], getattr(recipe, key))\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)"
] | [
"0.63595736",
"0.60863644",
"0.5877467",
"0.5658971",
"0.544159",
"0.5438701",
"0.54253197",
"0.5409057",
"0.53850645",
"0.5367168",
"0.53094155",
"0.5285954",
"0.5279377",
"0.5246911",
"0.5224074",
"0.5209505",
"0.52058226",
"0.50977165",
"0.5095339",
"0.50936866",
"0.5093286",
"0.50888234",
"0.50701165",
"0.50608355",
"0.5056873",
"0.50403947",
"0.5031378",
"0.5024729",
"0.5022571",
"0.4997541"
] | 0.72382593 | 0 |
View a tileset's recipe JSON tilesets viewrecipe | def view_recipe(tileset, token=None, indent=None):
mapbox_api = _get_api()
mapbox_token = _get_token(token)
url = "{0}/tilesets/v1/{1}/recipe?access_token={2}".format(
mapbox_api, tileset, mapbox_token
)
r = requests.get(url)
if r.status_code == 200:
click.echo(json.dumps(r.json(), indent=indent))
else:
raise errors.TilesetsError(r.text) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def view_recipe(request, recipe, **_kwargs):\n return render(request, \"deployments/disp_recipe.html\", {\"recipe\": recipe})",
"def recipe(id):\n\n selected_recipe = mongo.db.recipes.find_one({'_id': ObjectId(id)})\n\n # Using create list function to display these sections easier\n display_method = create_list(selected_recipe[\"method\"])\n display_ingredients = create_list(selected_recipe[\"ingredients\"])\n display_equipment = create_list(selected_recipe[\"equipment\"])\n\n show_ad = make_comparison(ad_equipment, display_equipment)\n\n return render_template('view_recipe.html', recipe=selected_recipe,\n title='Recipe', display_method=display_method,\n ad_equipment=ad_equipment,\n display_ingredients=display_ingredients,\n display_equipment=display_equipment,\n show_ad=show_ad)",
"def retrive_recipe(self):\n sample_recipe(user=self.user)\n sample_recipe(user=self.user)\n\n res = self.client.get(RECIPE_URL)\n\n recipe = Recipe.objects.all().order_by('-id')\n serailzer = Recipeserializer(recipe,many = True)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serailzer.data)",
"def test_view_recipe_detail(self):\n recipe = sample_recipe()\n\n url = recipe_detail_url(recipe.id)\n res = self.client.get(url)\n\n serializer = RecipeSerializer(recipe)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)",
"def showData(self, recipes):\n for recipe in recipes:\n json.dump(recipe, self.stdout, indent=2)\n print\n print '/' + '*' * 50 + '/'",
"def recipes():\n recipes = mongo.db.recipes.find()\n return render_template(\"recipes.html\", recipes=recipes)",
"def show_recipe_details(id):\n if not g.user:\n flash(\"Please login to view.\",\"warning\")\n return redirect('/login')\n\n \n recipe = get_recipe(id)\n print(recipe['instructions'])\n \n return render_template(\"recipes/detail.html\", recipe=recipe)",
"def list_recipes(self, recipes):\n prefix, suffix = self._get_jsonp()\n return prefix + JSON.list_recipes(self, recipes) + suffix",
"def get_recipe(self, _id):\n raise NotImplementedError()",
"def test_retrieve_recipe(self):\n sample_recipe(user=self.user)\n sample_recipe(user=self.user)\n res = self.client.get(RECIPE_URL)\n\n recipes = Recipe.objects.all().order_by('id')\n serializer = RecipeSerializer(recipes,many=True)\n\n print(json.dumps(serializer.data, indent=1))\n print('ok')\n print(json.dumps(res.data, indent=1))\n self.assertTrue(res.status_code,status.HTTP_200_OK)\n self.assertEqual(res.data,serializer.data)",
"def show_recipe_results():\n if not g.user:\n flash(\"Please login to view.\",\"warning\")\n return redirect('/login')\n\n data = search_recipes(request)\n recipes = data['results']\n print(recipes)\n \n return render_template('recipes/show.html',recipes=recipes)",
"def test_visualize_recipe_nutrition_by_id(self):\n pass",
"def test_visualize_recipe_taste(self):\n pass",
"def test_get_recipe_information(self):\n pass",
"def test_visualize_recipe_nutrition(self):\n pass",
"def test_retrieve_recipes(self):\n sample_recipe(user=self.user)\n sample_recipe(user=self.user, title=\"Beans\")\n\n res = self.client.get(RECIPE_URL)\n\n recipes = Recipe.objects.all().order_by('-id')\n serializer = RecipeSerializer(recipes, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 2)\n self.assertEqual(res.data, serializer.data)",
"def recipes():\n recipes = mongo.db.recipes.find()\n return render_template(\"recipes/list.html\", recipes=recipes)",
"def test_view_recipe_detail(self):\n recipe = sample_recipe(user=self.user)\n recipe.tags.add(sample_tags(user=self.user))\n recipe.ingredients.add(sample_ingredients(user=self.user))\n\n url = detail_url(recipe_id=recipe.id)\n res = self.client.get(url)\n serializer = RecipeDetailSerializer(recipe)\n\n self.assertEqual(res.data, serializer.data)",
"def view_recipes():\n if 'name' in session:\n recipeitem = PLAN.users[session['name']].view_recipes()\n return render_template('recipes.html', recipeitem=recipeitem)\n return redirect(url_for('log_in'))",
"def test_retrieving_recipes(self):\n sample_recipe(user=self.user)\n sample_recipe(user=self.user)\n\n res = self.client.get(RECIPES_URL)\n\n recipes = Recipe.objects.all().order_by('-id')\n serializer = RecipeSerializer(recipes, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)",
"def test_retrieve_recipes(self):\n sample_recipe(name=\"Avocado toast\")\n sample_recipe(name='Baklava')\n\n res = self.client.get(RECIPES_URL)\n\n recipes = Recipe.objects.all()\n serializer = RecipeSerializer(recipes, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 2)\n self.assertEqual(res.data, serializer.data)",
"def test_viewing_recipe_detail(self):\n recipe = create_sample_recipe(user=self.user)\n recipe.tags.add(create_sample_tag(user=self.user))\n recipe.ingredients.add(create_sample_ingredient(user=self.user))\n\n recipe_url = create_detail_url(recipe.id)\n\n res = self.client.get(recipe_url)\n serializer = RecipeDetailSerializer(recipe)\n\n self.assertEqual(res.data, serializer.data)",
"def test_retrieve_recipes(self):\n sample_recipe(user = self.user)\n sample_recipe(user = self.user)\n\n res = self.client.get(RECIPE_URL)\n\n recipes = Recipe.objects.all().order_by('-id')\n serializer = RecipeSerializer(recipes, many=True) # many=true returns the data as a list\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)",
"def get_recipe(recipe_id):\n recipe = mongo.db.recipes.find_one({\"_id\": ObjectId(recipe_id)})\n return render_template(\"pages/recipe.html\", recipe=recipe, isFooter=True)",
"def test_view_recipe_detail(self):\n recipe = sample_recipe(self.user)\n recipe.tags.add(sample_tag(self.user))\n recipe.ingredients.add(sample_ingredient(self.user))\n\n res = self.client.get(detail_url(recipe.id))\n\n serializer = RecipeDetailSerializer(recipe)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)",
"def list_recipes(environ, start_response):\n return list_entities(environ, start_response, 'list_recipes')",
"def recipe(id):\n recipe = mongo.db.recipes.find_one({\"_id\": ObjectId(id)})\n mongo.db.recipes.update(\n {\"_id\": ObjectId(id)}, {\"$set\": {\"views\": recipe[\"views\"] + 1}}\n )\n return render_template(\"recipes/details.html\", recipe=recipe)",
"def update_recipe(tileset, recipe, token=None, indent=None):\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/{1}/recipe?access_token={2}\".format(\n mapbox_api, tileset, mapbox_token\n )\n with open(recipe) as json_recipe:\n recipe_json = json.load(json_recipe)\n\n r = requests.patch(url, json=recipe_json)\n if r.status_code == 201:\n click.echo(\"Updated recipe.\", err=True)\n click.echo(r.text)\n else:\n raise errors.TilesetsError(r.text)",
"def test_retrive_recipe_detail(self):\n recipe = create_sample_recipe(user=self.sample_user)\n recipe.tag.add(create_sample_tag(user=self.sample_user))\n recipe.ingredient.add(create_sample_ingredient(user=self.sample_user))\n\n detail_URL = get_detail_URL(recipe.id)\n res = self.client.get(detail_URL)\n\n serializer = RecipeDetailSerializer(recipe)\n\n self.assertEqual(res.data, serializer.data)",
"def test_view_recipe_details(self):\n recipe = sample_recipe(user=self.user)\n recipe.tags.add(sample_tag(user=self.user))\n recipe.ingredient.add(sample_ingredient(user=self.user))\n\n url = detail_url(recipe.id)\n res = self.client.get(url)\n\n serailzer = RecipeDetailSerializer(recipe)\n self.assertEqual(res.data, serailzer.data)"
] | [
"0.6430486",
"0.6278637",
"0.60368425",
"0.60243356",
"0.60141695",
"0.5860817",
"0.585936",
"0.58377564",
"0.58284754",
"0.57778907",
"0.5770112",
"0.5763494",
"0.5761875",
"0.5735329",
"0.57243955",
"0.5721431",
"0.5719946",
"0.5699056",
"0.5689098",
"0.5674704",
"0.56615955",
"0.56604785",
"0.56575483",
"0.56571114",
"0.5618931",
"0.5593724",
"0.5586199",
"0.55515933",
"0.55383074",
"0.55372494"
] | 0.8028861 | 0 |
Update a Recipe JSON document for a particular tileset tilesets updaterecipe | def update_recipe(tileset, recipe, token=None, indent=None):
mapbox_api = _get_api()
mapbox_token = _get_token(token)
url = "{0}/tilesets/v1/{1}/recipe?access_token={2}".format(
mapbox_api, tileset, mapbox_token
)
with open(recipe) as json_recipe:
recipe_json = json.load(json_recipe)
r = requests.patch(url, json=recipe_json)
if r.status_code == 201:
click.echo("Updated recipe.", err=True)
click.echo(r.text)
else:
raise errors.TilesetsError(r.text) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_full_update_recipe(self):\n recipe = sample_recipe()\n recipe.ingredients.create(name='Eggs')\n original_description = recipe.description\n\n payload = {\n 'name': 'Vegan gnocchi',\n 'ingredients': [{'name': 'Vegegg'}]\n }\n url = recipe_detail_url(recipe.id)\n self.client.put(url, payload, format='json')\n\n recipe.refresh_from_db()\n self.assertEqual(recipe.name, payload['name'])\n self.assertEqual(recipe.description, original_description)\n self.assertEqual(recipe.ingredients.count(), 1)\n self.assertTrue(recipe.ingredients.first().name, 'Eggs')",
"def test_full_update_recipe(self):\n recipe = sample_recipe(user=self.user)\n recipe.tags.add(sample_tags(user=self.user))\n payload = {\n 'title': 'Jollof Spaghetti',\n 'time_minutes': 30,\n 'price': 5.00,\n 'currency': 'USD',\n }\n url = detail_url(recipe_id=recipe.id)\n self.client.put(url, payload)\n\n recipe.refresh_from_db()\n self.assertEqual(recipe.title, payload['title'])\n self.assertEqual(recipe.time_minutes, payload['time_minutes'])\n self.assertEqual(recipe.price, payload['price'])\n self.assertEqual(recipe.currency, payload['currency'])\n tags = recipe.tags.all()\n self.assertEqual(len(tags), 0)",
"def test_full_update_recipe(self):\n recipe = sample_recipe(user=self.user)\n recipe.tag.add(sample_tag(user=self.user))\n payload = {\n 'title':'chicken noodles',\n 'time_minutes':50,\n 'price':12.67,\n }\n url = detail_url(recipe.id)\n self.client.put(url,payload)\n\n recipe.refresh_from_db()\n self.assertEqual(recipe.title,payload['title'])\n self.assertEqual(recipe.time_minutes,payload['time_minutes'])\n self.assertEqual(float(recipe.price),payload['price'])\n tags = recipe.tag.all()\n self.assertEqual(len(tags),0)\n self.assertEqual(recipe.user,self.user)",
"def test_full_update_recipe(self):\n recipe = sample_recipe(user=self.user)\n recipe.tags.add(sample_tag(user=self.user))\n payload = {\n 'title': 'Spaghetti',\n 'time_minutes': 25,\n 'price': 5.00,\n }\n url = detail_url(recipe.id)\n self.client.put(url, payload)\n\n recipe.refresh_from_db()\n self.assertEqual(recipe.title, payload['title'])\n self.assertEqual(recipe.time_minutes, payload['time_minutes'])\n self.assertEqual(recipe.price, payload['price'])\n tags = recipe.tags.all()\n self.assertEqual(len(tags), 0)",
"def put(self, user, recipe_id):\n data = request.json\n return update_recipe(data=data, user=user, recipe_id=recipe_id)",
"def test_partial_update_recipe(self):\n recipe = sample_recipe(user=self.user)\n recipe.tag.add(sample_tag(user=self.user))\n recipe.ingredient.add(sample_ingredient(user=self.user))\n new_tag = sample_tag(user=self.user,name='curry')\n payload = {\n 'title':'chicken tikka recipe',\n 'tag' : [new_tag.id]\n }\n url = detail_url(recipe.id)\n res = self.client.patch(url,payload)\n recipe.refresh_from_db();\n self.assertEqual(recipe.title,payload['title'])\n self.assertEqual(len(recipe.tag.all()),1)\n self.assertIn(new_tag,recipe.tag.all())",
"def test_partial_update_recipe(self):\n recipe = sample_recipe()\n original_description = recipe.description\n payload = {'name': 'Panqueques con dulce de leche'}\n\n url = recipe_detail_url(recipe.id)\n res = self.client.patch(url, payload)\n\n recipe.refresh_from_db()\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(recipe.name, payload['name'])\n self.assertEqual(recipe.description, original_description)",
"def test_full_update_reecipe(self):\n recipe = sample_recipe(user=self.user)\n recipe.tags.add(sample_tag(user = self.user))\n payload = {\n 'title': 'mutton curry',\n 'time_minuts': 45,\n 'price':450\n\n }\n url = detail_url(recipe.id)\n self.client.put(url , payload)\n\n recipe.refresh_from_db()\n self.assertEqual(recipe.title, payload['title'])\n self.assertEqual(recipe.time_minuts, payload['time_minuts'])\n self.assertEqual(recipe.price, payload['price'])\n tags =recipe.tags.all()\n self.assertEqual(len(tags), 0 )",
"def update_recipe(recipe_id):\n if request.method == \"POST\":\n recipes = mongo.db.recipes\n\n recipes.update({'_id': ObjectId(recipe_id)},\n {\n 'recipe_name': request.form.get('recipe_name'),\n 'category_name': request.form.get('category_name'),\n 'description': request.form.get('description'),\n 'image': request.form.get('image'),\n 'prep_time': request.form.get('prep_time'),\n 'cook_time': request.form.get('cook_time'),\n 'ingredients': request.form.getlist('ingredients'),\n 'instructions': request.form.getlist('instructions')\n })\n\n flash(\"Succesfully updated the recipe!\")\n return render_template('pages/allrecipe.html', isFooter=True)\n\n all_categories = mongo.db.categories.find()\n prerecipes = mongo.db.recipes.find_one({'_id': ObjectId(recipe_id)})\n return render_template('pages/editrecipe.html',\n recipes=prerecipes, categories=all_categories, isFooter=True) # to do a find on the categories table.",
"def test_full_update(self):\n recipe = create_sample_recipe(user=self.user)\n recipe.ingredients.add(create_sample_ingredient(\n user=self.user,\n name='Fries'\n ))\n payload = {\n \"title\": \"New Cuisine\",\n \"price\": 5.00,\n \"time_minutes\": 90\n }\n recipe_url = create_detail_url(recipe.id)\n self.client.put(recipe_url, payload)\n recipe.refresh_from_db()\n ingredients = recipe.ingredients.all()\n self.assertEqual(recipe.title, payload['title'])\n self.assertEqual(recipe.time_minutes, payload['time_minutes'])\n self.assertEqual(len(ingredients), 0)",
"def updateEditedRefImages(self, recipe):\n oldProperties = recipe.getProperties()\n\n show = oldProperties['show']\n sequence = oldProperties['sequence']\n oldMode = Mode(show, sequence)\n updatedPoses = self.__getUpdatedPoses(recipe)\n\n newRecipeXML = CopySetup.localizeSetup(oldProperties, show, sequence,\n renderCallback=self.__renderCallback,\n setupCallback=self.feedReloadSetupsMultiTracks,\n multiTrackCallback=self.feedReloadSetupsMultiTracks)\n\n newRecipe = Recipe.fromXMLElement(newRecipeXML)\n\n self.__storeUpdatedPoses(newRecipe, updatedPoses)\n self.__updatePosesInSetup(newRecipe, updatedPoses)\n\n oldProperties = recipe.getProperties()\n existingRecipeEditingPath = oldMode.get(\"[poseEditingFile]\", oldProperties).replace(\".psd\", \".xml\")\n\n # if a setup has been send to flix already we will now use the new recipe version\n if self.fileServiceLocal.exists(existingRecipeEditingPath):\n oldProperties = self.fileServiceLocal.loadXMLFile(existingRecipeEditingPath).find(\"Properties\").attrib\n\n\n recipiesXML = ET.fromstring('<Recipies/>')\n newSetupXML = newRecipe.getMasterXML()\n\n setupXML = ET.fromstring('<OldSetup show=\"%s\" sequence=\"%s\" beat=\"%s\" setup=\"%s\" version=\"%s\" />'\\\n % (oldProperties[\"show\"],\n oldProperties[\"sequence\"],\n oldProperties[\"beat\"],\n oldProperties[\"setup\"],\n oldProperties[\"version\"]))\n\n\n setupXML.append(newSetupXML)\n recipiesXML.append(setupXML)\n self.addFeedback(\"replaceSetupsMultiTracks\", recipiesXML)\n\n FlixNuke().fromRecipe(newRecipe)\n FlixNuke().compRecipe(newRecipe, renderCallback=self.__renderCallback)\n\n newProperties = newRecipe.getProperties()\n mode = Mode(newProperties.get('show', None), newProperties.get('sequence', None))\n newMultitrackFile = mode.get('[recipeMultiTrackFile]', newProperties)\n newMultitrack = self.fileServiceLocal.loadTextFile(newMultitrackFile)\n\n data = []\n data.append('<Recipies>')\n data.append(\n \"\"\"<Setup\n show=\"%(show)s\"\n sequence=\"%(sequence)s\"\n beat=\"%(beat)s\"\n setup=\"%(setup)s\"\n version=\"%(version)s\">'\"\"\" % newProperties)\n data.append(newMultitrack + \"</Setup>\" + \"</Recipies>\")\n dataString = \"\".join(data)\n\n self.feedReloadSetupsMultiTracks(dataString)\n\n# FlixNuke().compRecipe(newRecipe, fileOutNodes='fileOut_master_png')\n\n return newRecipe",
"def test_partial_update_recipe(self):\n recipe = sample_recipe(user=self.user)\n recipe.tags.add(sample_tag(user=self.user))\n new_tag = sample_tag(user=self.user, name = 'Curry')\n\n payload = {'title': 'Chicken tikka', 'tags': [new_tag.id]}\n url = detail_url(recipe.id) # to update an object you have to use the detail endpoint(with the pk of the specific recipe)\n self.client.patch(url, payload)\n\n recipe.refresh_from_db() # we always need this when we update an object\n self.assertEqual(recipe.title, payload['title'])\n\n tags = recipe.tags.all()\n self.assertEqual(tags.count(), 1)\n self.assertIn(new_tag, tags)",
"def test_recipe_daylight_factor_gridbased_uuid_put(self):\n recipe = DaylightFactorGridBasedSchema()\n response = self.client.open(\n '/api/recipe/daylight_factor/gridbased/{uuid}'.format(uuid='uuid_example'),\n method='PUT',\n data=json.dumps(recipe),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def validate_recipe(recipe, token=None, indent=None):\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/validateRecipe?access_token={1}\".format(\n mapbox_api, mapbox_token\n )\n with open(recipe) as json_recipe:\n recipe_json = json.load(json_recipe)\n\n r = requests.put(url, json=recipe_json)\n click.echo(json.dumps(r.json(), indent=indent))",
"def test_partial_recipe_update(self):\n recipe = create_sample_recipe(user=self.user)\n recipe.tags.add(create_sample_tag(user=self.user))\n new_tag = create_sample_tag(user=self.user, name=\"Lourd\")\n\n payload = {\n \"title\": \"Russian Borsch\",\n \"time_minutes\": 70,\n 'tags': [new_tag.id, ]\n }\n recipe_url = create_detail_url(recipe.id)\n self.client.patch(recipe_url, payload)\n recipe.refresh_from_db()\n tags = recipe.tags.all()\n self.assertEqual(payload['title'], recipe.title)\n self.assertEqual(payload['time_minutes'], payload['time_minutes'])\n self.assertIn(new_tag, tags)\n self.assertEqual(len(tags), 1)",
"def test_partial_update_recipe(self):\n\n recipe = create_sample_recipe(user=self.sample_user)\n recipe.tag.add(create_sample_tag(user=self.sample_user, name=\"Curry\"))\n new_tag = create_sample_tag(user=self.sample_user, name=\"bread\")\n\n payload = {\n 'title': 'Chicken Tikka with Bread',\n 'tag': [new_tag.id]\n }\n url = get_detail_URL(recipe.id)\n self.client.patch(url, payload)\n\n recipe.refresh_from_db()\n self.assertEqual(recipe.title, payload['title'])\n tags = recipe.tag.all()\n self.assertEqual(len(tags), 1)\n self.assertIn(new_tag, tags)",
"def test_partial_update_recipe(self):\n recipe = sample_recipe(user=self.user)\n recipe.tags.add(sample_tag(user=self.user))\n new_tag = sample_tag(user=self.user, name= 'curry')\n\n payload = {\n 'title':'chicken tikka', 'tags':[new_tag.id]\n\n }\n url = detail_url(recipe.id)\n\n self.client.patch(url, payload)\n\n recipe.refresh_from_db()\n self.assertEqual(recipe.title, payload['title'])\n\n tags = recipe.tags.all()\n self.assertEqual(len(tags), 1)\n self.assertIn(new_tag, tags)",
"def test_put_recipe(self):\n recipe = sample_recipe(self.user)\n recipe.tags.add(sample_tag(self.user))\n payload = {\n 'title': 'Ham hack',\n 'time_minutes': 38,\n 'price': 33.00\n }\n res = self.client.put(detail_url(recipe.id), payload)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n\n recipe.refresh_from_db()\n serializer = RecipeSerializer(recipe)\n self.assertEqual(res.data, serializer.data)\n self.assertEqual(recipe.title, payload['title'])\n tags = recipe.tags.all()\n self.assertEqual(len(tags), 0)",
"def test_partial_update_recipe(self):\n recipe = sample_recipe(user=self.user)\n recipe.tags.add(sample_tags(user=self.user))\n new_tag = sample_tags(user=self.user, name='Cabbage')\n\n payload = {'title': 'Salad', 'tags': [new_tag.id]}\n url = detail_url(recipe_id=recipe.id)\n self.client.patch(url, payload)\n\n recipe.refresh_from_db()\n self.assertEqual(recipe.title, payload['title'])\n tags = recipe.tags.all()\n self.assertEqual(len(tags), 1)\n self.assertIn(new_tag, tags)",
"def put(id: int):\r\n parser = reqparse.RequestParser()\r\n parser.add_argument(\"title\", type=str)\r\n args = parser.parse_args()\r\n if args:\r\n filename = Path(__file__).parent / \"recipe-data.csv\"\r\n files = import_file.Files()\r\n recipe_load = files.import_from_csv(filename)\r\n recipes = Recipes(recipe_load)\r\n a_recipe = recipes.update_recipe(id, args)\r\n files.export_to_csv(recipes, filename)\r\n return jsonify(a_recipe)\r\n else:\r\n return abort(404)",
"def test_patch_recipe(self):\n recipe = sample_recipe(self.user)\n recipe.tags.add(sample_tag(self.user))\n tag = sample_tag(self.user, name='bacon')\n\n payload = {\n 'title': 'Ham hack',\n 'tags': tag.id\n }\n res = self.client.patch(detail_url(recipe.id), payload)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n\n recipe.refresh_from_db()\n serializer = RecipeSerializer(recipe)\n self.assertEqual(res.data, serializer.data)\n self.assertEqual(recipe.title, payload['title'])\n self.assertEqual(serializer.data['tags'], [payload['tags']])\n tags = recipe.tags.all()\n self.assertEqual(len(tags), 1)\n self.assertIn(tag, tags)",
"def test_full_update_reteta(self):\n recipe = sample_reteta(user=self.user)\n recipe.tags.add(sample_tag(user=self.user))\n payload = {\n 'title': 'Pepperoni',\n 'time_minutes': 3,\n 'price': 3.00\n }\n url = detail_url(recipe.id)\n self.client.put(url, payload)\n\n recipe.refresh_from_db()\n self.assertEqual(recipe.title, payload['title'])\n self.assertEqual(recipe.time_minutes, payload['time_minutes'])\n self.assertEqual(recipe.price, payload['price'])\n tags = recipe.tags.all()\n self.assertEqual(len(tags), 0)",
"def set_metadata(self, metadata):\n return self.client._perform_json(\n \"PUT\", \"/projects/%s/recipes/%s/metadata\" % (self.project_key, self.recipe_name),\n body=metadata)",
"def edit_recipe(self, recipeID, newrecipe):\n for recipe in self.__recipe:\n if recipe['id'] == recipeID:\n index = self.__recipe.index(recipe)\n self.__recipe.remove(self.__recipe[index])\n self.__recipe.insert(index, newrecipe)\n return 'Recipe edited successfully'\n return 'Recipe does not exist'",
"def _update_from_rest_data(self) -> None:",
"def reconstitute():\n with open(TEXT_FPATH, 'w') as txt:\n for jfpath in json_fpaths():\n with open(jfpath) as f:\n jstruct = json.load(f)\n\n for recipe in jstruct.keys():\n _reconstitute_recipe(txt, jstruct[recipe])",
"def update_drink_in_db():\n data = request.data\n data_dict = json.loads(data)\n\n # function returns an array\n # index 0: list of flavors\n # index 1: is a list of ingredients\n ingredients_and_flavors = get_ingredient_and_flavor_list(data_dict)\n print(data_dict)\n print(ingredients_and_flavors)\n connection = mongo_connect()\n connection[\"cocktails\"].update_one(\n {\"_id\": ObjectId(data_dict[\"id\"])},\n {\"$set\":\n {\"name\": data_dict[\"name\"],\n \"description\": data_dict[\"description\"],\n \"flavor_tags\": ingredients_and_flavors[0],\n \"ingredients\": ingredients_and_flavors[1],\n \"method\": data_dict[\"instructions\"],\n \"glass\": data_dict[\"glass\"],\n \"equipment\": data_dict[\"equipment\"],\n \"creator\": ObjectId(session['_id']),\n \"updated_at\": str(datetime.now()),\n \"image_url\": data_dict[\"image_url\"]}\n }\n )\n resp = jsonify(success=True)\n return resp",
"def test_partial_update_reteta(self):\n recipe = sample_reteta(user=self.user)\n recipe.tags.add(sample_tag(user=self.user))\n new_tag = sample_tag(user=self.user, name=\"spicy\")\n payload = {'title': 'Meat Feast', 'tags': [new_tag.id]}\n url = detail_url(recipe.id)\n self.client.patch(url, payload)\n\n recipe.refresh_from_db()\n self.assertEqual(recipe.title, payload['title'])\n tags = recipe.tags.all()\n self.assertEqual(len(tags), 1)\n self.assertIn(new_tag, tags)",
"def save(self):\n self._payload_to_str()\n return self.recipe.client._perform_json(\n \"PUT\", \"/projects/%s/recipes/%s\" % (self.recipe.project_key, self.recipe.recipe_name),\n body=self.data)",
"def edit_recipe(id):\n\n chosen_recipe = mongo.db.recipes.find_one({'_id': ObjectId(id)})\n form = RecipeForm(data=chosen_recipe)\n\n if request.method == \"GET\":\n return render_template('edit_recipe.html', form=form,\n title=\"Edit Recipe\")\n elif request.method == \"POST\":\n if form.validate_on_submit():\n recipes = mongo.db.recipes\n\n recipes.update_one({'_id': ObjectId(id)}, {'$set': {\n 'recipe_name': request.form['recipe_name'],\n 'summary': request.form['summary'],\n 'description': request.form['description'],\n 'ingredients': request.form['ingredients'],\n 'equipment': request.form['equipment'],\n 'prep_time': request.form['prep_time'],\n 'cook_time': request.form['cook_time'],\n 'serves_num': request.form['serves_num'],\n 'method': request.form['method'],\n 'course': request.form['course'],\n 'cuisine': request.form['cuisine'],\n }})\n flash('Recipe Updated ', 'success')\n return redirect(url_for('recipe', id=id))\n else:\n flash('An error occured', 'danger')\n return render_template('index.html')"
] | [
"0.6287885",
"0.62047666",
"0.61583877",
"0.6145476",
"0.61356527",
"0.612758",
"0.6121439",
"0.6084628",
"0.5905411",
"0.5889674",
"0.5887126",
"0.58802575",
"0.5874741",
"0.58549875",
"0.5836796",
"0.5829042",
"0.5811349",
"0.58038753",
"0.57788795",
"0.56087655",
"0.55903137",
"0.555765",
"0.54877627",
"0.54352003",
"0.5434185",
"0.54027724",
"0.53996825",
"0.53914654",
"0.53194046",
"0.53123766"
] | 0.7319158 | 0 |
Create/add a tileset source tilesets addsource | def add_source(ctx, username, id, features, no_validation, token=None, indent=None):
mapbox_api = _get_api()
mapbox_token = _get_token(token)
url = (
f"{mapbox_api}/tilesets/v1/sources/{username}/{id}?access_token={mapbox_token}"
)
with tempfile.TemporaryFile() as file:
for feature in features:
if not no_validation:
utils.validate_geojson(feature)
file.write((json.dumps(feature) + "\n").encode("utf-8"))
file.seek(0)
m = MultipartEncoder(fields={"file": ("file", file)})
resp = requests.post(
url,
data=m,
headers={
"Content-Disposition": "multipart/form-data",
"Content-type": m.content_type,
},
)
if resp.status_code == 200:
click.echo(json.dumps(resp.json(), indent=indent))
else:
raise errors.TilesetsError(resp.text) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _addSourceToTile(self, tile, sourceEntry, corners, scale):\n source = self._sources[sourceEntry['sourcenum']]\n ts = self._openSource(source, sourceEntry['kwargs'])\n # If tile is outside of bounding box, skip it\n bbox = source['bbox']\n if (corners[2][0] <= bbox['left'] or corners[0][0] >= bbox['right'] or\n corners[2][1] <= bbox['top'] or corners[0][1] >= bbox['bottom']):\n return tile\n transform = bbox.get('transform')\n srccorners = (\n list(np.dot(bbox['inverse'], np.array(corners).T).T)\n if transform is not None else corners)\n x = y = 0\n # If there is no transform or the diagonals are positive and there is\n # no sheer, use getRegion with an appropriate size (be wary of edges)\n if (transform is None or\n transform[0][0] > 0 and transform[0][1] == 0 and\n transform[1][0] == 0 and transform[1][1] > 0):\n scaleX = transform[0][0] if transform is not None else 1\n scaleY = transform[1][1] if transform is not None else 1\n region = {\n 'left': srccorners[0][0], 'top': srccorners[0][1],\n 'right': srccorners[2][0], 'bottom': srccorners[2][1],\n }\n output = {\n 'maxWidth': (corners[2][0] - corners[0][0]) // scale,\n 'maxHeight': (corners[2][1] - corners[0][1]) // scale,\n }\n if region['left'] < 0:\n x -= region['left'] * scaleX // scale\n output['maxWidth'] += int(region['left'] * scaleX // scale)\n region['left'] = 0\n if region['top'] < 0:\n y -= region['top'] * scaleY // scale\n output['maxHeight'] += int(region['top'] * scaleY // scale)\n region['top'] = 0\n if region['right'] > source['metadata']['sizeX']:\n output['maxWidth'] -= int(\n (region['right'] - source['metadata']['sizeX']) * scaleX // scale)\n region['right'] = source['metadata']['sizeX']\n if region['bottom'] > source['metadata']['sizeY']:\n output['maxHeight'] -= int(\n (region['bottom'] - source['metadata']['sizeY']) * scaleY // scale)\n region['bottom'] = source['metadata']['sizeY']\n for key in region:\n region[key] = int(round(region[key]))\n self.logger.debug('getRegion: ts: %r, region: %r, output: %r', ts, region, output)\n sourceTile, _ = ts.getRegion(\n region=region, output=output, frame=sourceEntry.get('frame', 0),\n format=TILE_FORMAT_NUMPY)\n # Otherwise, get an area twice as big as needed and use\n # scipy.ndimage.affine_transform to transform it\n else:\n # TODO\n msg = 'Not implemented'\n raise TileSourceError(msg)\n # Crop\n # TODO\n tile = self._mergeTiles(tile, sourceTile, x, y)\n return tile",
"def AddSource (self, name, source, filename):\n self.containments [name] = source, filename, False",
"def _add_source(self, source: _Source) -> None:\n\n self._sources.append(source)",
"def AddSource(self, source):\n self._sources.append(source)",
"def add_source_achors():\n pass",
"def addSource(self, data):\n # read input\n self.example_ids.append(data[\"example_id\"])\n self.src_char.append(torch.LongTensor(data['char_id']).contiguous())\n #src: snt_length x n_feature, contiguous means in memory in C order\n self.src.append(torch.LongTensor([data[\"snt_id\"],data[\"lemma_id\"],data[\"pos_id\"],data[\"ner_id\"]]).t().contiguous())\n #source, before preprocessing into tensor, includes labels and tokens\n if \"mwe\" not in data:\n data[\"mwe\"] = 'O' * len(data[\"tok\"])\n self.src_source.append([data[\"tok\"],data[\"lem\"],data[\"pos\"],data[\"ner\"],data[\"mwe\"],data[\"anchors\"]])",
"def add_result_source(self, source):\n self._sources.append(source)",
"def addSource(self, source):\n self.tprint('source ' + source)",
"def addSource(self,\n path,\n name,\n location,\n copyLib=False,\n copyGroups=False,\n copyInfo=False,\n copyFeatures=False,\n muteKerning=False,\n muteInfo=False,\n mutedGlyphNames=None,\n familyName=None,\n styleName=None,\n ):\n sourceElement = ET.Element(\"source\")\n sourceElement.attrib['filename'] = self._posixPathRelativeToDocument(path)\n sourceElement.attrib['name'] = name\n if copyLib:\n libElement = ET.Element('lib')\n libElement.attrib['copy'] = \"1\"\n sourceElement.append(libElement)\n\n if copyGroups:\n groupsElement = ET.Element('groups')\n groupsElement.attrib['copy'] = \"1\"\n sourceElement.append(groupsElement)\n\n if copyFeatures:\n featuresElement = ET.Element('features')\n featuresElement.attrib['copy'] = \"1\"\n sourceElement.append(featuresElement)\n\n if copyInfo or muteInfo:\n # copy info:\n infoElement = ET.Element('info')\n if copyInfo:\n infoElement.attrib['copy'] = \"1\"\n if muteInfo:\n infoElement.attrib['mute'] = \"1\"\n sourceElement.append(infoElement)\n\n if muteKerning:\n # add kerning element to the source\n kerningElement = ET.Element(\"kerning\")\n kerningElement.attrib[\"mute\"] = '1'\n sourceElement.append(kerningElement)\n\n if mutedGlyphNames:\n # add muted glyphnames to the source\n for name in mutedGlyphNames:\n glyphElement = ET.Element(\"glyph\")\n glyphElement.attrib[\"name\"] = name\n glyphElement.attrib[\"mute\"] = '1'\n sourceElement.append(glyphElement)\n\n if familyName is not None:\n sourceElement.attrib['familyname'] = familyName\n if styleName is not None:\n sourceElement.attrib['stylename'] = styleName\n\n\n locationElement = self._makeLocationElement(location)\n sourceElement.append(locationElement)\n self.root.findall('.sources')[0].append(sourceElement)",
"def add_source(self, source):\n # If source is an insn ID, look up the actual instruction.\n source = self.kernel.id_to_insn.get(source, source)\n\n for written in self.map_to_base_storage(\n set(source.assignee_var_names()) & self.relevant_vars):\n self.base_writer_map[written].add(source.id)\n\n for read in self.map_to_base_storage(\n source.dependency_names() & self.relevant_vars):\n self.base_access_map[read].add(source.id)",
"def add_source_file(self, filename):\n self.sources.add(Source.create(filename))",
"def add_source(self, name, position):#)*args, **kwargs):\n return self._add_object(name, Source, position)#*args, **kwargs)",
"def set_source_to_add_destination(self, source_name):\n self.single_selection_from_static_kendo_dropdown(self.source_kendo_dropdown_arrow_locator, source_name)",
"def set_source(self, source):\n self.data['source'] = source",
"def add_source(self, group_source):\n if group_source.name in self._sources:\n raise ValueError(\"GroupSource '%s': name collision\" % \\\n group_source.name)\n self._sources[group_source.name] = group_source",
"def add_source(self, pin_name):\n for i in range(self.num_pin_components(pin_name)):\n self.add_pin_component_source(pin_name, i)",
"def make_source_dataset(self, current_host_index, num_hosts):\n pass",
"def add_pin_component_source(self, pin_name, index):\n debug.check(index<self.num_pin_components(pin_name),\"Pin component index too large.\")\n \n pin_in_tracks = self.pin_groups[pin_name][index].grids\n debug.info(2,\"Set source: \" + str(pin_name) + \" \" + str(pin_in_tracks))\n self.rg.add_source(pin_in_tracks)",
"def add_tile(self, input_name, multiples, name=None):\n return self._build_op('Tile', [input_name, multiples], name=name)",
"def _set_source(source, context):\n if isinstance(source, (str, list, dict, Dataset)):\n return Source(source, context)\n elif isinstance(source, Source):\n return source\n else:\n raise ValueError('Wrong source')",
"def add_source(self, name, node_name, source_type, quality, pattern=None):\n if pattern and isinstance(pattern, six.string_types):\n pattern = self.get_pattern(pattern)\n source = Source(self, name, node_name, source_type, quality, pattern)\n self._sources[source.name] = source\n self._pattern_reg.add_usage(source.strength_timeseries.pattern_name, (source.name, 'Source'))\n self._node_reg.add_usage(source.node_name, (source.name, 'Source'))",
"def add_result_source(self, source):\n self._main_model.add_result_source(source)",
"def _resolveSourcePath(self, sources, source):\n source = copy.deepcopy(source)\n if source['path'] != '__none__':\n sourcePath = Path(source['path'])\n source['path'] = self._basePath / sourcePath\n if not source['path'].is_file():\n altpath = self._basePath.parent / sourcePath / sourcePath.name\n if altpath.is_file():\n source['path'] = altpath\n if not source['path'].is_file():\n raise TileSourceFileNotFoundError(str(source['path']))\n sources.append(source)",
"def __register_video_source(self, name, source):\n self.__video_modules[name] = source\n self.__last_images[name] = (time.time(), source.get_image())\n self.__video_locks[name] = threading.Lock()",
"def view_source(username, id, token=None, indent=None):\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/sources/{1}/{2}?access_token={3}\".format(\n mapbox_api, username, id, mapbox_token\n )\n r = requests.get(url)\n if r.status_code == 200:\n click.echo(json.dumps(r.json(), indent=indent))\n else:\n raise errors.TilesetsError(r.text)",
"def add_sources(self, src_dict):\n #todo check for already in self.data_sources\n for name, data_source in src_dict.items():\n data_source.source.on_change('data', self._data_updated_callback)\n self.data_sources[name] = data_source\n\n self.render_sources(src_dict)",
"def list_sources(username, token=None):\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/sources/{1}?access_token={2}\".format(\n mapbox_api, username, mapbox_token\n )\n r = requests.get(url)\n if r.status_code == 200:\n for source in r.json():\n click.echo(source[\"id\"])\n else:\n raise errors.TilesetsError(r.text)",
"def test_adding_sources():\n s1 = magpy.magnet.Cuboid()\n s2 = magpy.magnet.Cylinder()\n s3 = magpy.magnet.CylinderSegment()\n s4 = magpy.magnet.Sphere()\n s5 = magpy.current.Loop()\n s6 = magpy.current.Line()\n s7 = magpy.misc.Dipole()\n x1 = magpy.Sensor()\n c1 = magpy.Collection()\n c2 = magpy.Collection()\n\n for obj in [s1, s2, s3, s4, s5, s6, s7, x1, c1]:\n c2.add(obj)\n\n strs = \"\"\n for src in c2:\n strs += str(src)[:3]\n\n assert strs == \"CubCylCylSphLooLinDipSenCol\"",
"def source(self, source):\n\n self._close()\n self._source = source\n\n self.src = rasterio.open(source)\n\n idx = getattr(self, 'indexes', None)\n if idx is None:\n self.indexes = list(range(1, self.src.count+1))",
"def new_source(self, name):\n params = {\"name\": name}\n return JSONRPCRequest(self, \"newSource\", params)"
] | [
"0.63106346",
"0.6242407",
"0.6181852",
"0.5995432",
"0.5959899",
"0.59026515",
"0.5895848",
"0.5837343",
"0.5822718",
"0.5802456",
"0.5723033",
"0.5697263",
"0.56536853",
"0.5642326",
"0.5624862",
"0.5598968",
"0.5473412",
"0.54200363",
"0.54169947",
"0.5408778",
"0.5397525",
"0.539289",
"0.5348182",
"0.533949",
"0.53328806",
"0.5266925",
"0.5263048",
"0.5256923",
"0.5251312",
"0.5240782"
] | 0.70931864 | 0 |
View a Tileset Source's information tilesets viewsource | def view_source(username, id, token=None, indent=None):
mapbox_api = _get_api()
mapbox_token = _get_token(token)
url = "{0}/tilesets/v1/sources/{1}/{2}?access_token={3}".format(
mapbox_api, username, id, mapbox_token
)
r = requests.get(url)
if r.status_code == 200:
click.echo(json.dumps(r.json(), indent=indent))
else:
raise errors.TilesetsError(r.text) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_sources(username, token=None):\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/sources/{1}?access_token={2}\".format(\n mapbox_api, username, mapbox_token\n )\n r = requests.get(url)\n if r.status_code == 200:\n for source in r.json():\n click.echo(source[\"id\"])\n else:\n raise errors.TilesetsError(r.text)",
"def show_sources_all():\n response = requests.get(SOURCE_URL)\n json = response.json()\n for source in json['sources']:\n print(u\"{0}: <{1}> {2}\".format(\"News Code\", source['id'], source['name']))",
"def listSetInfo(self) :\n data = self.getSelectedRowData()\n\n if data : \n setName = data[self.setCols.index('Asset Name')]\n root = data[self.setCols.index('Root')]\n\n self.setAsmLocator(setName)\n self.setAsmRoot(mode='asset')\n self.setAsmRoot(mode='shot')\n self.setAsmVersion(root)\n\n self.viewData()",
"def display(\n views,\n location_syncs=[],\n value_scale_syncs=[],\n zoom_syncs=[],\n host=\"localhost\",\n server_port=None,\n dark_mode=False,\n log_level=logging.ERROR,\n fuse=True,\n auth_token=None,\n):\n from .server import Server\n from .client import CombinedTrack, DividedTrack, View, ViewConf, ViewportProjection\n\n tilesets = []\n\n # views can also be passed in as lists of tracks\n new_views = []\n for view in views:\n if isinstance(view, (tuple, list)):\n # view is a list of tracks\n new_views.append(View(view))\n else:\n new_views.append(view)\n views = new_views\n\n for view in views:\n for track in view.tracks:\n if hasattr(track, \"tracks\"):\n for track1 in track.tracks:\n if not isinstance(track1, ViewportProjection) and track1.tileset:\n tilesets += [track1.tileset]\n\n if track.tileset:\n tilesets += [track.tileset]\n\n server = Server(\n tilesets, host=host, port=server_port, fuse=fuse, log_level=log_level\n )\n server.start()\n\n cloned_views = [View.from_dict(view.to_dict()) for view in views]\n\n for view in cloned_views:\n for track in view.tracks:\n if isinstance(track, CombinedTrack):\n for track1 in track.tracks:\n if \"fromViewUid\" in track1.conf:\n # this is a viewport projection and doesn't have\n # a server\n pass\n elif \"server\" not in track1.conf or track1.conf[\"server\"] is None:\n track1.conf[\"server\"] = server.api_address\n elif \"fromViewUid\" in track.conf:\n pass\n elif \"data\" in track.conf:\n # probably a divided track with a custom\n # data fetcher\n pass\n else:\n if \"server\" not in track.conf or track.conf[\"server\"] is None:\n track.conf[\"server\"] = server.api_address\n\n viewconf = ViewConf(\n cloned_views,\n location_syncs=location_syncs,\n value_scale_syncs=value_scale_syncs,\n zoom_syncs=zoom_syncs,\n )\n\n extra_args = {}\n if auth_token:\n extra_args[\"auth_token\"] = auth_token\n\n return (\n HiGlassDisplay(\n viewconf=viewconf.to_dict(),\n hg_options={\"theme\": \"dark\" if dark_mode else \"light\",},\n **extra_args\n ),\n server,\n viewconf,\n )",
"async def get_tile_cache_preview(\n *, request: Request, dataset: str, version: str, implementation\n):\n\n tile_caches = get_dataset_tile_caches(dataset, version, implementation)\n sources = {\n \"carto-dark\": {\n \"type\": \"raster\",\n \"tiles\": [\n \"https://a.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png\",\n \"https://b.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png\",\n \"https://c.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png\",\n \"https://d.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png\",\n ],\n },\n }\n\n layers = [\n {\n \"id\": \"carto-dark-layer\",\n \"type\": \"raster\",\n \"source\": \"carto-dark\",\n \"minzoom\": 0,\n \"maxzoom\": 22,\n },\n ]\n for tile in tile_caches:\n if tile[\"asset_type\"] == \"Static vector tile cache\":\n try:\n style_specs = await get_static_vector_tile_cache_style_spec(tile)\n except ClientError:\n style_specs = get_default_style_spec(tile)\n else:\n style_specs = get_default_style_spec(tile)\n\n layers = [*layers, *style_specs[\"layers\"]]\n sources[dataset] = style_specs[\"sources\"][dataset]\n\n if len(layers) == 1:\n raise HTTPException(\n status_code=404, detail=\"No tile caches available for this dataset.\"\n )\n\n return templates.TemplateResponse(\n \"tile_preview.html\",\n context={\"sources\": sources, \"layers\": layers, \"request\": request},\n )",
"def data_source_set_info(self) -> Optional['outputs.DatasourceSetResponse']:\n return pulumi.get(self, \"data_source_set_info\")",
"def data_source_info(self) -> 'outputs.DatasourceResponse':\n return pulumi.get(self, \"data_source_info\")",
"def viewSource(self):\n\t\treturn self.driver.page_source",
"def data_source_set_info(self) -> Optional[pulumi.Input['DatasourceSetArgs']]:\n return pulumi.get(self, \"data_source_set_info\")",
"def __handle_view_tile(self, gamestate_component):",
"def tile_read_utm(source, bounds, tilesize, indexes=[1], nodata=None, alpha=None, dst_crs='EPSG:3857', \n verbose=False,\n boundless=False):\n w, s, e, n = bounds\n\n if alpha is not None and nodata is not None:\n raise RioTilerError('cannot pass alpha and nodata option')\n\n if isinstance(indexes, int):\n indexes = [indexes]\n (e - w) / tilesize\n out_shape = (len(indexes), tilesize, tilesize)\n if verbose:\n print(dst_crs)\n vrt_params = dict(\n crs=dst_crs,\n resampling=Resampling.bilinear,\n src_nodata=nodata,\n dst_nodata=nodata)\n\n if isinstance(source, DatasetReader):\n with WarpedVRT(source, **vrt_params) as vrt:\n window = vrt.window(w, s, e, n, precision=21)\n if verbose:\n print(window)\n #window_transform = windows.transform(window, vrt.transform)\n window_transform = transform.from_bounds(w,s,e,n, tilesize, tilesize)\n \n data = vrt.read(window=window,\n resampling=Resampling.bilinear,\n out_shape=out_shape,\n indexes=indexes,\n boundless=boundless)\n if False: #except:\n print(bounds)\n print(window)\n print(out_shape)\n print(indexes)\n print(boundless)\n print(window_transform)\n\n if nodata is not None:\n mask = np.all(data != nodata, axis=0).astype(np.uint8) * 255\n elif alpha is not None:\n mask = vrt.read(alpha, window=window,\n out_shape=(tilesize, tilesize),\n boundless=boundless,\n resampling=Resampling.bilinear)\n else:\n mask = vrt.read_masks(1, window=window,\n out_shape=(tilesize, tilesize),\n boundless=boundless,\n resampling=Resampling.bilinear)\n else:\n with rasterio.open(source) as src:\n with WarpedVRT(src, **vrt_params) as vrt:\n window = vrt.window(w, s, e, n, precision=21)\n window_transform = windows.transform(window, vrt.transform)\n window_transform = transform.from_bounds(w, s, e, n, tilesize, tilesize)\n\n data = vrt.read(window=window,\n boundless=boundless,\n resampling=Resampling.bilinear,\n out_shape=out_shape,\n indexes=indexes)\n\n if nodata is not None:\n mask = np.all(data != nodata, axis=0).astype(np.uint8) * 255\n elif alpha is not None:\n mask = vrt.read(alpha, window=window,\n out_shape=(tilesize, tilesize),\n boundless=boundless,\n resampling=Resampling.bilinear)\n else:\n mask = vrt.read_masks(1, window=window,\n out_shape=(tilesize, tilesize),\n boundless=boundless,\n resampling=Resampling.bilinear)\n\n return data, mask, window, window_transform",
"def get_source(self):",
"def generate_overview_tiles(self):\n\n gdal.SetConfigOption(\"GDAL_PAM_ENABLED\", \"NO\")\n\n print \"Generating Overview Tiles:\"\n\n if self.options.profile == 'garmin': # no overview tiles for 'garmin'\n return\n # Usage of existing tiles: from 4 underlying tiles generate one as overview.\n\n tcount = 0\n zcount = 0\n for tz in range(self.tmaxz-1, self.tminz-1, -1):\n tminx, tminy, tmaxx, tmaxy = self.tminmax[tz]\n tcount += (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n zcount+=1\n if self.options.resume:\n count_tiles=tcount\n zcount+=1\n tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]\n count_tiles += (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n i_count = self.tile_exists(0, 0, 0,1)\n if i_count == count_tiles:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; all-tiles [\",zcount,\"] zoom-levels with tiles[\",count_tiles,\"]\"\n return\n ti = 0\n\n # querysize = tilesize * 2\n\n for tz in range(self.tmaxz-1, self.tminz-1, -1):\n tminx, tminy, tmaxx, tmaxy = self.tminmax[tz]\n i_x_column_count=((tmaxx-tminx)+1)\n i_y_column_count=((tmaxy-tminy)+1)\n if self.options.verbose:\n # tx in range(tminx, tmaxx+1) tminx[ 140798 ] tmaxx[ 140872 ] ; ((tmaxx-tmaxy)+1) x_tiles[ -35331 ]\n print \"\\ttz=[\",tz,\"] : tx in range(tminx, tmaxx+1) tminx[\",tminx,\"] tmaxx[\",tmaxx,\"] ; ((tmaxx-tminx)+1) x_tiles[\",i_x_column_count,\"]\"\n # ty_tms in range(tmaxy, tminy-1, -1) tmaxy[ 176204 ] tminy[ 176126 ] ; ((tmaxy-tminy)) y_tiles[ 78 ]\n print \"\\ttz=[\",tz,\"] :ty_tms in range(tmaxy, tminy-1, -1) tmaxy[\",tmaxy,\"] tminy[\",tminy,\"] ; ((tmaxy-tminy)) y_tiles[\",i_y_column_count,\"]\"\n if self.options.resume:\n i_count = self.tile_exists(0, 0, tz,2)\n print \"\\tTile generation skipped because of --??? ; x/y-tiles of z[\",tz,\"] x/y_tiles[\",tcount,\"] i_count[\",i_count,\"]\"\n if i_count == tcount:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; x/y-tiles of z[\",tz,\"] x/y_tiles[\",tcount,\"]\"\n break\n for tx in range(tminx, tmaxx+1):\n tmaxy_work=tmaxy\n if self.options.resume:\n i_count = self.tile_exists(tx, 0, tz,3)\n print \"\\tTile generation skipped because of --??? ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"] i_count[\",i_count,\"]\"\n if i_count == i_y_column_count:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n break\n else:\n if i_count > 0:\n # this assums the rows are compleate, which may NOT be true 18-140798-176204.jpg\n tmaxy_work-=i_count\n if self.options.verbose:\n print \"\\tTile generation skipped to tmaxy[\",tmaxy_work,\"] because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n for ty_tms in range(tmaxy_work, tminy-1, -1): #range(tminy, tmaxy+1):\n ty_osm=self.flip_y(tz,ty_tms)\n ty=ty_tms\n if self.options.tms_osm:\n ty=ty_osm\n if self.stopped:\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n self.mbtiles_db=None\n break\n\n ti += 1\n\n if self.options.resume:\n exists = self.tile_exists(tx, ty, tz,0)\n if exists and self.options.verbose:\n print \"\\tTile generation skipped because of --resume\"\n else:\n exists = False\n\n if not exists:\n if self.options.verbose:\n print ti, '/', tcount, self.get_verbose_tile_name(tx, ty, tz)\n try:\n self.write_overview_tile(tx, ty, tz,self.options.tms_osm)\n except ImageOutputException, e:\n self.error(\"'%d/%d/%d': %s\" % (tz, tx, ty, e.message))\n\n if not self.options.verbose or self.is_subprocess:\n self.progressbar( ti / float(tcount) )\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n self.mbtiles_db=None",
"def view(self):",
"def show(source):\n print subarrayControl.s.info(source)",
"def layers(self): # -> LayerView:\n ...",
"def getViews(self):\n raise NotImplementedError()",
"def vtk_viewer(request):\n try:\n data = _refresh(request)\n except Exception:\n data = {}\n data['main'] = 'main'\n data['error'] = 'error'\n data['search'] = {\n 'help': ''\n }\n options = {\n 'resizable': True\n }\n data['options'] = mark_safe(json.dumps(options))\n return render(\n request,\n 'vtk_view/cdat_viewer.html',\n data\n )",
"def get_views(self):\n query = mssqlqueries.get_views()\n logger.info(u'Views query: %s', query)\n for tabular_result in self.execute_query(query):\n for row in tabular_result[0]:\n yield (row[0], row[1])",
"def show_source_page(sourceid=None):\n uuid = request.args.get(\"uuid\", sourceid)\n if not uuid:\n return redirect(url_for(\"virhesivu\", code=1, text=\"Missing Source key\"))\n u_context = UserContext(user_session, current_user, request)\n try:\n with SourceReader(\"read\", u_context) as service:\n # reader = SourceReader(readservice, u_context)\n res = service.get_source_with_references(uuid, u_context)\n\n if res[\"status\"] == Status.NOT_FOUND:\n msg = res.get(\"statustext\", _(\"No objects found\"))\n flash(msg, \"error\")\n if res[\"status\"] != Status.OK:\n flash(f'{res.get(\"statustext\", _(\"error\"))}', \"error\")\n\n stk_logger(\n u_context, f\"-> bp.scene.routes.show_source_page n={len(res['citations'])}\"\n )\n\n except KeyError as e:\n msg = f\"bp.scene.routes.show_source_page: {e.__class__.__name__} {e}\"\n flash(f'{ _(\"Program error\")}', \"error\")\n logger.error(msg)\n\n # for c in res.citations:\n # for i in c.citators:\n # if i.id[0] == \"F\": print(f'{c} – family {i} {i.clearname}')\n # else: print(f'{c} – person {i} {i.sortname}')\n return render_template(\n \"/scene/source_events.html\",\n source=res[\"item\"],\n citations=res[\"citations\"],\n user_context=u_context,\n )",
"def __show_source(self):\n pcd = o3d.io.read_point_cloud(\n self.source_cloud\n )\n if np.asarray(pcd.points).shape[0] != 0:\n pcd.paint_uniform_color([0, 1, 0])\n pcd.estimate_normals()\n self.source_point_cloud_view.load_cloud(pcd)\n try:\n self.source_point_cloud_view.show_window()\n except RuntimeError:\n pass\n else:\n QtWidgets.QMessageBox.warning(self, \"Error\",\n f\"Source point cloud is no longer available\"\n )\n self.source_cloud = \"\"\n self.__update_clickability()\n self.__save_context()",
"def __repr__(self):\n return '{} (source layer)'.format(self.name)",
"def drought_veg_index_map(request):\n \n view_center = [-105.2, 39.0]\n view_options = MVView(\n projection='EPSG:4326',\n center=view_center,\n zoom=7.0,\n maxZoom=12,\n minZoom=5\n )\n\n # TIGER state/county mapserver\n tiger_boundaries = MVLayer(\n source='TileArcGISRest',\n options={'url': 'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/State_County/MapServer'},\n legend_title='States & Counties',\n layer_options={'visible':True,'opacity':0.8},\n legend_extent=[-112, 36.3, -98.5, 41.66]) \n\n # NCDC Climate Divisions\n climo_divs = MVLayer(\n source='TileArcGISRest',\n options={'url': 'https://gis.ncdc.noaa.gov/arcgis/rest/services/backgrounds/MapServer',\n 'params': {'LAYERS': 'show:1'}},\n legend_title='Climate Divisions',\n layer_options={'visible':False,'opacity':0.8},\n legend_extent=[-112, 36.3, -98.5, 41.66]) \n \n # USGS Rest server for HUC watersheds \n watersheds = MVLayer(\n source='TileArcGISRest',\n options={'url': 'https://hydro.nationalmap.gov/arcgis/rest/services/wbd/MapServer'},\n legend_title='HUC Watersheds',\n layer_options={'visible':False,'opacity':0.4},\n legend_extent=[-112, 36.3, -98.5, 41.66])\n \n ##### WMS Layers - Ryan\n vdri_legend = MVLegendImageClass(value='VegDRI Cat',\n image_url='https://vegdri.cr.usgs.gov/wms.php?service=WMS&request=GetLegendGraphic&format=image%2Fpng&width=20&height=20&LAYER=DROUGHT_VDRI_EMODIS_1') \n vegdri = MVLayer(\n source='ImageWMS',\n options={'url': 'https://vegdri.cr.usgs.gov/wms.php?',\n 'params': {'LAYERS': 'DROUGHT_VDRI_EMODIS_1'},\n 'serverType': 'geoserver'},\n layer_options={'visible':True,'opacity':0.5},\n legend_title='VegDRI',\n legend_classes=[vdri_legend],\n legend_extent=[-126, 24.5, -66.2, 49])\n # historical layers https://edcintl.cr.usgs.gov/geoserver/qdrivegdriemodis/wms?', 'params': {'LAYERS': 'qdrivegdriemodis_pd_1-sevenday-53-2017_mm_data'\n\n qdri_legend = MVLegendImageClass(value='QuickDRI Cat',\n image_url='https://vegdri.cr.usgs.gov/wms.php?service=WMS&request=GetLegendGraphic&format=image%2Fpng&width=20&height=20&LAYER=DROUGHT_QDRI_EMODIS_1') \n quickdri = MVLayer(\n source='ImageWMS',\n options={'url': 'https://vegdri.cr.usgs.gov/wms.php?',\n 'params': {'LAYERS': 'DROUGHT_QDRI_EMODIS_1'},\n 'serverType': 'geoserver'},\n layer_options={'visible':False,'opacity':0.5},\n legend_title='QuickDRI',\n legend_classes=[qdri_legend],\n legend_extent=[-126, 24.5, -66.2, 49])\n # historical layers: https://edcintl.cr.usgs.gov/geoserver/qdriquickdriraster/wms?', 'params': {'LAYERS': 'qdriquickdriraster_pd_1-sevenday-53-2017_mm_data' \n \n # Land Cover REST layer\n #https://www.mrlc.gov/arcgis/rest/services/LandCover/USGS_EROS_LandCover_NLCD/MapServer\n NLCD = MVLayer(\n source='TileArcGISRest',\n options={'url': 'https://www.mrlc.gov/arcgis/rest/services/LandCover/USGS_EROS_LandCover_NLCD/MapServer',\n 'params': {'LAYERS': 'show6'}},\n layer_options={'visible':False,'opacity':0.5},\n legend_title='NLCD',\n legend_extent=[-126, 24.5, -66.2, 49])\n \n # Define map view options\n drought_veg_index_map_view_options = MapView(\n height='100%',\n width='100%',\n controls=['ZoomSlider', 'Rotate', 'ScaleLine', 'FullScreen',\n {'MousePosition': {'projection': 'EPSG:4326'}},\n {'ZoomToExtent': {'projection': 'EPSG:4326', 'extent': [-112, 36.3, -98.5, 41.66]}}],\n layers=[tiger_boundaries,climo_divs,vegdri,quickdri,NLCD,watersheds],\n view=view_options,\n basemap='OpenStreetMap',\n legend=True\n )\n\n context = {\n 'drought_veg_index_map_view_options':drought_veg_index_map_view_options,\n }\n\n return render(request, 'co_drought/drought_veg_index.html', context)",
"def getOLAPSource():",
"def render_sources(self, src_dict):\n pass",
"def call_skyview_simple(survey, source_name, fov=1):\n coords = coords_from_name(source_name)\n outname = f'{source_name}_{survey}_{fov}d.fits'\n images = SkyView.get_images(coords, survey,\n coordinates='J2000',\n projection='Car', pixels=500,\n height=fov*u.deg, width=fov*u.deg)\n fitsname = f'images/{source_name}_{survey}_{fov}d.fits'\n try:\n images[0][0].writeto(fitsname, overwrite=True)\n except astropy.io.fits.verify.VerifyError:\n print('Data not available')\n pass\n return fitsname",
"def view(self):\n raise NotImplementedError",
"def set_view(self, s):\n #s.scene.reset_zoom()\n s.scene.z_plus_view()\n c = s.scene.camera\n c.azimuth(30)\n c.elevation(30)\n s.render()",
"def write_overview_tile(self, tx, ty, tz,tms_osm):\n\n image_format = self.get_overview_tile_format(tx, ty, tz)\n\n if image_format is None:\n return\n else:\n num_bands = self.get_num_bands(image_format)\n\n dsquery = self.mem_drv.Create('', 2*self.tile_size, 2*self.tile_size, num_bands)\n self.fill_init_dest(dsquery)\n # tms: z=19: 281626\n # -z=18-140813 176168*2=352336; 176168*2+1=352337\n # -- 352336,352337\n y_from=2*ty\n y_to=2*ty + 1\n ty_tms=ty;\n s_y_type=\"tms\"\n if tms_osm:\n # osm: z=19: 281626\n # -z=18-140813 85975*2+1=171951; 85975*2=171950\n # -- 171951,171950 [in range: last/end not used]\n y_from=2*ty + 1\n y_to=2*ty\n ty_tms=(2**tz-1) - ty\n s_y_type=\"osm\"\n s_tile_id=\"{0}-{1}-{2}.{3}\".format(str(tz), str(tx),str(ty),s_y_type)\n if self.verbose:\n # Build from zoom 19 tiles: (281626, 171951) (281627, 171951) (281626, 171950) (281627, 171950)\n print \"\\tBuild [\",s_tile_id,\"] from [\",self.output_dir,\"] zoom\", tz+1,\" tiles [\",s_y_type,\"]: \", (2*tx, y_from), (2*tx+1, y_from),(2*tx, y_to), (2*tx+1, y_to)\n\n for cx, cy, child_image_format in self.iter_children(tx, ty, tz):\n if (ty_tms==0 and cy==1) or (ty_tms!=0 and (cy % (y_from)) != 0):\n tileposy = 0\n else:\n tileposy = self.tile_size\n if tx:\n tileposx = cx % (2*tx) * self.tile_size\n elif tx==0 and cx==1:\n tileposx = self.tile_size\n else:\n tileposx = 0\n\n path = self.get_full_path(cx, cy, tz+1, format_extension[child_image_format])\n\n dsquerytile = gdal.Open(path, gdal.GA_ReadOnly)\n\n dsquery.WriteRaster(tileposx, tileposy, self.tile_size, self.tile_size,\n dsquerytile.ReadRaster(0, 0, self.tile_size, self.tile_size),\n band_list=range(1, dsquerytile.RasterCount+1))\n\n if image_format == \"PNG\" and dsquerytile.RasterCount != num_bands:\n dsquery.WriteRaster(tileposx, tileposy, self.tile_size, self.tile_size,\n self.get_alpha_filler(), band_list=[num_bands])\n\n dstile = self.mem_drv.Create('', self.tile_size, self.tile_size, num_bands)\n path = self.get_full_path(tx, ty, tz, format_extension[image_format])\n self.resampler(path, dsquery, dstile, image_format)",
"def getSites(dataSource):\n pointsLayer = dataSource.GetLayer()\n pointsLayer.SetAttributeFilter(\"id >= 0\")\n return pointsLayer"
] | [
"0.57861507",
"0.5623747",
"0.54790926",
"0.5461085",
"0.544482",
"0.53834367",
"0.5382121",
"0.5329366",
"0.53023386",
"0.5296684",
"0.5284189",
"0.5280753",
"0.5255383",
"0.52418125",
"0.52131957",
"0.5167571",
"0.51532954",
"0.51469177",
"0.51326746",
"0.5106873",
"0.5103623",
"0.5101725",
"0.5098789",
"0.50795686",
"0.5045662",
"0.50311005",
"0.50178635",
"0.5013141",
"0.500739",
"0.500019"
] | 0.7161197 | 0 |
Delete a Tileset Source + all of its files. tilesets deletesource | def delete_source(username, id, force, token=None):
if not force:
click.confirm(
"Are you sure you want to delete {0} {1}?".format(username, id), abort=True
)
mapbox_api = _get_api()
mapbox_token = _get_token(token)
url = "{0}/tilesets/v1/sources/{1}/{2}?access_token={3}".format(
mapbox_api, username, id, mapbox_token
)
r = requests.delete(url)
if r.status_code == 204:
click.echo("Source deleted.")
else:
raise errors.TilesetsError(r.text) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete(self, source):\n _source = self._source_prefix+source\n assert _source in self.cache.keys()\n del self.cache[_source]",
"def __del__(self):\r\n train_data_sources = list(self._train_data.values())\r\n test_data_sources = list(self._test_data.values())\r\n all_data_sources = train_data_sources + test_data_sources\r\n for data_source in all_data_sources:\r\n data_source.cleanup()\r\n self._tester.__del__()",
"def src_delete(state):\n _lib.src_delete(state)",
"def delete_set(set_name):\n\n flg = logging.getLogger(\"lettuce.xgenSetup.delete_set\")\n\n flg.info(\"Set to delete: {}\".format(set_name))\n\n if mc.objExists(set_name):\n mc.select(set_name)\n old_objects = mc.ls(selection=True)\n flg.debug(\"Old Objects:\")\n for o in old_objects:\n flg.debug(o)\n ref_objects = mc.ls(selection=True, referencedNodes=True)\n\n ref_del_queue = []\n if len(ref_objects) > 0:\n flg.debug(\"Old Reference Nodes:\")\n for o in ref_objects:\n flg.debug(o)\n for o in ref_objects:\n flg.debug(\"Queuing {} for reference removal\".format(o))\n top = mc.referenceQuery(o, referenceNode=True)\n ref_del_queue.append(top)\n if len(ref_del_queue):\n for o in ref_del_queue:\n flg.debug(\"Removing reference: {}\".format(o))\n ref_file = mc.referenceQuery(o, filename=True)\n mc.file(ref_file, removeReference=True)\n for o in old_objects:\n try:\n flg.debug(\"Deleting {}\".format(o))\n mc.delete(o)\n except ValueError as e:\n flg.debug(\"Unable to delete {0}. Error: {1}\".format(o, e))\n flg.debug(\"Deleting set: {}\".format(set_name))\n mc.delete(set_name)",
"def rmtree(self, name, source):\n self.m.path.assert_absolute(source)\n self._run(name, ['rmtree', source])\n self.m.path.mock_remove_paths(str(source))",
"def remove(self, name, source):\n self.m.path.assert_absolute(source)\n self._run(name, ['remove', source])\n self.m.path.mock_remove_paths(source)",
"def delete(log, session, args):\n log('imageset id: {highlight}{id}{reset}',\n highlight=Fore.GREEN,\n id=args.id,\n reset=Style.RESET_ALL)\n log.warn('delete imageset command coming soon.')",
"def rmcontents(self, name, source):\n self.m.path.assert_absolute(source)\n self._run(name, ['rmcontents', source])\n self.m.path.mock_remove_paths(str(source)+self.m.path.sep)",
"def delete_source(self, src_name: SourceName) -> None:\n while True:\n try:\n response = self.genes.query(\n IndexName=\"src_index\",\n KeyConditionExpression=Key(\"src_name\").eq(src_name.value),\n )\n except ClientError as e:\n raise DatabaseReadException(e)\n records = response[\"Items\"]\n if not records:\n break\n with self.genes.batch_writer(\n overwrite_by_pkeys=[\"label_and_type\", \"concept_id\"]\n ) as batch:\n for record in records:\n try:\n batch.delete_item(\n Key={\n \"label_and_type\": record[\"label_and_type\"],\n \"concept_id\": record[\"concept_id\"],\n }\n )\n except ClientError as e:\n raise DatabaseWriteException(e)\n\n try:\n self.metadata.delete_item(Key={\"src_name\": src_name.value})\n except ClientError as e:\n raise DatabaseWriteException(e)",
"def delete_sample_set(namespace, workspace, sample_set_id):\n body = [{\"entityType\": \"sample_set\", \"entityName\": sample_set_id}]\n res = firecloud_api.delete_entities(namespace, workspace, body)\n return res",
"def deleteRig(self):\n\n allNodes = cmds.ls(\"*\")\n for node in allNodes:\n if cmds.objExists(node + \".sourceModule\"):\n cmds.lockNode(node, lock=False)\n source = cmds.getAttr(node + \".sourceModule\")\n if source == self.name:\n try:\n cmds.delete(node)\n except:\n pass",
"def _delete_tcs(self, testcases):\n\n delete_q = []\n\n # Find all the metadata files associated with all the testcases\n for testcase in testcases:\n metadata_files = nh.get_metadata_files(testcase)\n delete_q += metadata_files.values()\n\n # Write the placeholder file to indicate that this file is deleted\n placeholder_f \\\n = nh.get_metadata_files(testcase, deleted=True)['deleted']\n with open(placeholder_f, 'w') as obj:\n obj.write('Deleted at epoch=%d' % int(time.time()))\n\n remove_files(delete_q, self.verbose, warn=True, force=True)",
"def delete_presets(self, preset_ids=[], REQUEST=None):\r\n\r\n raise NotImplementedError",
"def delete(self):\n\t\tif self.hasUdim:\n\t\t\tfor a in self.udimPaths:\n\t\t\t\ta.delete()\n\t\telse:\n\t\t\tsuper( textureFile, self ).delete()",
"def RemoveSource(self,source):\n self._sources.RemoveSource(source)",
"def teardown():\n for filename in files_to_delete:\n delete_file(filename)",
"def sorl_delete(**kwargs):\n from sorl.thumbnail import delete\n delete(kwargs['file'])",
"def remove_empty_sources(self):\n for source in [\"dxf\", \"edilizia\", \"easyroom\", \"merged\"]:\n if source in self and not self[source]:\n del self[source]",
"def delete(self):\n\n del self.parent_mirror_dir[self.cvs_path]",
"def clearTargetShips(self):\n self.targets = []\n self.currentTarget = None",
"def clear_data():\n for i in range(_MAX_NUM_TESTS):\n rand, ref = filename(i)\n if os.path.exists(rand):\n os.remove(rand)\n if os.path.exists(ref):\n os.remove(ref)",
"def __del__(self):\n for filename in self.files:\n unlink(filename)",
"def RemoveSource(self, source):\n self._sources.remove(source)",
"def delete(configsetname):\n cnfset = configsetPath(configsetname)\n files = os.listdir(cnfset)\n for f in files: os.remove(os.path.join(cnfset, f))\n os.rmdir(cnfset)\n return None",
"def delSource(A,bSize,comp):\n sA,comp = delSink(A.T,bSize,comp)\n return sA.T,comp",
"def rmGt(self):\n gtfiles = [self.outselect, self.outmktime, self.outltcube,\n self.outbincub, self.outbinmap, self.outbinexp, \n self.outexpmap, self.outsrcmap, \n os.path.join(self.workpath, 'SrcList_cntspec'+self.suffix+'.fits'),\n os.path.join(self.workpath, 'SrcList_cntspec'+self.suffix+'.log')]\n for f in gtfiles:\n if os.path.isfile(f):\n os.remove(f)\n return",
"def teardown():\n os.remove('green-dot.tif')\n os.remove('green-dot.jpg')\n os.remove('green-dot.png')",
"def delete(self, dest, source=None):\n raise NotImplementedYet()",
"def tearDown(self):\n os.rmdir(self.cur_source)\n super().tearDown()",
"def delete_datasets(self, base_url):\n response = requests.get(base_url + '/testdata')\n for index in range(len(response.json()['testdata'])):\n self.delete_dataset(base_url, response.json()['testdata'][index]['dataset'])"
] | [
"0.636035",
"0.6173071",
"0.60790503",
"0.5987599",
"0.5917638",
"0.5863623",
"0.58427405",
"0.58380115",
"0.5836744",
"0.58044493",
"0.5800107",
"0.5758255",
"0.5698515",
"0.56531304",
"0.56498647",
"0.56251085",
"0.55927056",
"0.5576707",
"0.55732995",
"0.55669975",
"0.55491304",
"0.5549052",
"0.5539252",
"0.5513845",
"0.55127645",
"0.54876363",
"0.54452276",
"0.543814",
"0.5436388",
"0.5413152"
] | 0.6908297 | 0 |
List all Tileset Sources for an account. Response is an unordered array of sources. tilesets listsources | def list_sources(username, token=None):
mapbox_api = _get_api()
mapbox_token = _get_token(token)
url = "{0}/tilesets/v1/sources/{1}?access_token={2}".format(
mapbox_api, username, mapbox_token
)
r = requests.get(url)
if r.status_code == 200:
for source in r.json():
click.echo(source["id"])
else:
raise errors.TilesetsError(r.text) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_sources():\n url = base_url + \"sources\"\n params = {\"language\": \"en\"}\n resp = requests.get(url, params=params)\n data = resp.json()\n sources = [src['id'].strip() for src in data['sources']]\n print(\"all the sources:\")\n print(sources)\n return sources",
"def get_sources():\n url = base_url + \"sources\"\n params = {\"language\": \"en\"}\n resp = requests.get(url, params=params)\n data = resp.json()\n sources = [src['id'].strip() for src in data['sources']]\n print(\"all the sources\")\n print(sources)\n return sources",
"def ListSources(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def show_sources_all():\n response = requests.get(SOURCE_URL)\n json = response.json()\n for source in json['sources']:\n print(u\"{0}: <{1}> {2}\".format(\"News Code\", source['id'], source['name']))",
"def view_source(username, id, token=None, indent=None):\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/sources/{1}/{2}?access_token={3}\".format(\n mapbox_api, username, id, mapbox_token\n )\n r = requests.get(url)\n if r.status_code == 200:\n click.echo(json.dumps(r.json(), indent=indent))\n else:\n raise errors.TilesetsError(r.text)",
"def get_sources(**kwargs):\n\n instance = Ceic._get_instance()\n\n get_dictionaries_method = instance._dictionary_facade.get_sources\n result = instance._make_request(get_dictionaries_method, **kwargs)\n\n return result",
"def Sources():\n return _sources",
"def sources(self):\n return self._sources",
"def sources(self) -> Sequence[Any]:\n return pulumi.get(self, \"sources\")",
"def retrieve_sources(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.retrieve_sources_with_http_info(**kwargs)\n else:\n (data) = self.retrieve_sources_with_http_info(**kwargs)\n return data",
"def retrieve_sources_with_http_info(self, **kwargs):\n\n all_params = []\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method retrieve_sources\" % key\n )\n params[key] = val\n del params['kwargs']\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['Using HTTP Header', 'Using URL Query Parameter']\n\n return self.api_client.call_api('/sources', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[ExistingSource]',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)",
"def get(self):\n\n return self.get_request_handler(request.headers).get_all_sources()",
"def listsources():\n\tmain_url = \" https://newsapi.org/v2/sources?apiKey=5f81b593f35d42a8980313250c03d7e7\"\n\n\t# fetching data in json format \n\topen_source = requests.get(main_url).json() \n\n\t# getting all articles in a string sources\n\tsource = open_source[\"sources\"] \n\n\t# empty list which will \n\t# contain all trending newssources \n\tresults = [] \n\t\n\tfor k in source: \n results.append(k[\"id\"])\n \n \t\n\tfor w in results[0:4]:\n print(w)",
"def sources(self):\n for source_name, source in self._sources.items():\n yield source_name, source",
"def Sources(self):\n return self._sources",
"def list_network_sources(self, compartment_id, **kwargs):\n resource_path = \"/networkSources\"\n method = \"GET\"\n\n # Don't accept unknown kwargs\n expected_kwargs = [\n \"retry_strategy\",\n \"page\",\n \"limit\"\n ]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_network_sources got unknown kwargs: {!r}\".format(extra_kwargs))\n\n query_params = {\n \"compartmentId\": compartment_id,\n \"page\": kwargs.get(\"page\", missing),\n \"limit\": kwargs.get(\"limit\", missing)\n }\n query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[NetworkSourcesSummary]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n query_params=query_params,\n header_params=header_params,\n response_type=\"list[NetworkSourcesSummary]\")",
"def get_all_feed_sources(request):\n feed_sources = FeedSource.objects.all().order_by('-id')\n return get_feed_sources_list(feed_sources)",
"def findsources(self, *args, **kwargs):\n return _image.image_findsources(self, *args, **kwargs)",
"def sources(self):\n return self._sources.keys()",
"def sources(self, video_display_name=None):\r\n sources_selector = self.get_element_selector(video_display_name, CSS_CLASS_NAMES['video_sources'])\r\n return self.q(css=sources_selector).map(lambda el: el.get_attribute('src').split('?')[0]).results",
"def source_list(self):\n return self._source_list",
"def source_list(self):\n return self._source_list",
"def sources(self) -> Optional[Sequence['outputs.AddressPrefixItemResponse']]:\n return pulumi.get(self, \"sources\")",
"def copy_sources(self) -> Set[str]:\n return self._sources.copy()",
"def source(self) -> list:\n sources = self.source_control.list_sources()\n sources_list = [source['label'] for source in sources]\n return sources_list",
"def source_list(self):\n return list(self._client.group.streams_by_name().keys())",
"def solr_sources(self):\n # conn = pysolr.Solr(settings.SOLR['SERVER'])\n q = {\n \"fq\": ['type:source', f'archive_i:{self.pk}'],\n \"fl\": [\"pk\",\n \"public_images_b\",\n 'display_name_s',\n 'cover_image_i',\n 'source_type_s',\n 'date_statement_s',\n 'surface_type_s'],\n \"rows\": 10000,\n \"sort\": [\"shelfmark_ans asc\"]\n }\n\n res = SolrConnection.search(\"*:*\", **q)\n if res.hits > 0:\n return res.docs\n else:\n return []",
"def source_name_list(self):\n return list(self._sources.keys())",
"def _load_sources(self):\n ss_dir = SteelScriptDir('AppResponse', 'files')\n\n for svc in [PACKETS_REPORT_SERVICE_NAME,\n GENERAL_REPORT_SERVICE_NAME]:\n svc_version = self.appresponse.versions[svc]\n sw_version = (self.appresponse.get_info()['sw_version']\n .replace(' ', ''))\n sources_filename = ('{}-sources-{}-{}.pcl'\n .format(svc, svc_version, sw_version))\n sources_file = ss_dir.get_data(sources_filename)\n\n sources_file.read()\n\n if not sources_file.data:\n svcdef = self.appresponse.find_service(svc)\n\n # sources is a list of dictionaries\n sources = svcdef.bind('sources').execute('get').data['items']\n\n # the whole set of sources for current service\n all_sources = {}\n\n for source in sources:\n cols = source['columns']\n source['columns'] = \\\n OrderedDict(sorted(zip(map(lambda x: x['id'], cols),\n cols)))\n source['filters_on_metrics'] = \\\n source['capabilities']['filters_on_metrics']\n if 'granularities' not in source:\n source['granularities'] = None\n\n all_sources[source['name']] = source\n\n if source['name'] in report_source_to_groups:\n self._sources[source['name']] = source\n\n # source_file writes the whole set of sources to disk\n sources_file.data = all_sources\n sources_file.write()\n logger.debug(\"Wrote sources data into {}\"\n .format(sources_filename))\n else:\n logger.debug(\"Loading sources data from {}\"\n .format(sources_filename))\n # Only load valid sources based on settings\n for k, v in sources_file.data.iteritems():\n if k in report_source_to_groups:\n self._sources[k] = v\n\n return",
"def list(username, verbose, token=None, indent=None):\n mapbox_api = _get_api()\n mapbox_token = _get_token(token)\n url = \"{0}/tilesets/v1/{1}?access_token={2}\".format(\n mapbox_api, username, mapbox_token\n )\n r = requests.get(url)\n if r.status_code == 200:\n if verbose:\n for tileset in r.json():\n click.echo(json.dumps(tileset, indent=indent))\n else:\n for tileset in r.json():\n click.echo(tileset[\"id\"])\n else:\n raise errors.TilesetsError(r.text)"
] | [
"0.7102343",
"0.70275754",
"0.6670911",
"0.66414875",
"0.6598273",
"0.6418739",
"0.63086843",
"0.624363",
"0.61829776",
"0.61549807",
"0.6057943",
"0.60390824",
"0.60325825",
"0.6032255",
"0.6019853",
"0.600825",
"0.59725916",
"0.5939919",
"0.5932034",
"0.5928629",
"0.59208006",
"0.59208006",
"0.58937025",
"0.58883935",
"0.58721495",
"0.58129084",
"0.57543063",
"0.5752833",
"0.5746667",
"0.5714252"
] | 0.7821861 | 0 |
Mutes everyone that you are following | def auto_mute_following():
following = set(t.friends.ids(screen_name=TWITTER_HANDLE)["ids"])
muted = set(t.mutes.users.ids(screen_name=TWITTER_HANDLE)["ids"])
not_muted = following - muted
# put user IDs of people you do not want to mute here
users_keep_unmuted = set([])
# mute all
for user_id in not_muted:
if user_id not in users_keep_unmuted:
t.mutes.users.create(user_id=user_id)
print("muted %d" % (user_id)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def follow_reciprocated(self, target):\n if random.randint(1, 1000) == 1: # 1 in 20 are public @replies\n self.tweet_user(target)\n else:\n try:\n self.dm_user(target)\n except:\n pass",
"def author_following(self):\n\t\tpass",
"def follow_user(cls, user, following):\r\n pass",
"def follow_user(cls, user, following):\n pass",
"def follow_user(self, target):\n try:\n if self.api.me().friends_count > 1990:\n return\n except Exception, e:\n print e\n\n \"Rate limit exceeded. Clients may not make more than 350 requests per hour.\"\n if \"Clients\" in str(e):\n continue\n # import pdb; pdb.set_trace()\n return\n\n try:\n self.api.create_friendship(target.hunted.screen_name)\n self.log.debug(\"Followed: %s\" % target.hunted.screen_name)\n except Exception, e:\n self.log.exception(\"Could not follow %s\" %\n target.hunted.screen_name)\n else:\n # Write record of new follow to db\n target.status = Target.PURGATORY\n target.save()",
"def follow(self, follower, followee):\n pass",
"def follow(self, user_index, following_index):\n if user_index >= self.num_users or following_index >= self.num_users:\n raise ValueError(\n f\"Number of users is {self.num_users}, but indices \"\n f\"{user_index} and {following_index} were requested.\"\n )\n if self.users_hat[following_index, user_index] == 0:\n self.users_hat[following_index, user_index] = 1\n elif self.is_verbose():\n self.log(f\"User {following_index} was already following user {user_index}\")",
"def follow(request, usertofollow):\n to_follow = Member.objects.get(user__username=usertofollow)\n user = Member.objects.get(user=request.user)\n user.following.add(to_follow)\n user.save()\n return redirect(request.META['HTTP_REFERER'])",
"async def love(ctx, user: discord.Member):\r\n author = ctx.message.author\r\n if user.id == ctx.bot.user.id:\r\n await ctx.send(\"I am not capable of loving like you can. I'm sorry.\" )\r\n else:\r\n await ctx.send(author.mention + \" is capable of loving \" + user.mention + \" a whopping \" +\r\n str(randint(0, 100)) + \"%!\")\r\n ctx.counter(n)",
"def following_changed(sender, action, instance, *args, **kwargs):\n\n # m2mchanged.connect specified in apps.py\n\n following = instance.following.all()\n creator = instance.user\n\n if creator in following:\n raise ValidationError (\"can't like own post\")",
"def get_everyone_granted(self):",
"def follow(self, other):\n\t\tif not self.follows(other):\n\t\t\tself.followed.append(other)",
"def expireme(message):\n users = hf.get_users()\n requester = message._get_user_id()\n for user in users:\n if user[\"id\"] == requester:\n name = user[\"name\"]\n break\n\n hf.expire_user(name)",
"async def selfmute(ctx, *args):\n user = ctx.message.author\n if await is_staff(ctx):\n return await ctx.send(\"Staff members can't self mute.\")\n time = \" \".join(args)\n await _mute(ctx, user, time, self=True)",
"async def stealthtorment(self, ctx, *, member = None, times : int = None):\r\n\r\n\t\tchannel = ctx.message.channel\r\n\t\tauthor = ctx.message.author\r\n\t\tserver = ctx.message.guild\r\n\t\tmessage = ctx.message\r\n\r\n\t\t# Only allow owner\r\n\t\tisOwner = self.settings.isOwner(ctx.author)\r\n\t\tif isOwner == None:\r\n\t\t\treturn\r\n\t\telif isOwner == False:\r\n\t\t\treturn\r\n\t\t\t\t\r\n\t\tusage = 'Usage: `{}torment [role/member] [times]`'.format(ctx.prefix)\r\n\r\n\t\tisRole = False\r\n\r\n\t\tif member == None:\r\n\t\t\tawait ctx.channel.send(usage)\r\n\t\t\treturn\r\n\t\t\t\t\r\n\t\t# Check for formatting issues\r\n\t\tif times == None:\r\n\t\t\t# Either xp wasn't set - or it's the last section\r\n\t\t\tif type(member) is str:\r\n\t\t\t\t# It' a string - the hope continues\r\n\t\t\t\troleCheck = DisplayName.checkRoleForInt(member, server)\r\n\t\t\t\tif roleCheck and roleCheck[\"Role\"]:\r\n\t\t\t\t\tisRole = True\r\n\t\t\t\t\tmember = roleCheck[\"Role\"]\r\n\t\t\t\t\ttimes = roleCheck[\"Int\"]\r\n\t\t\t\telse:\r\n\t\t\t\t\t# Role is invalid - check for member instead\r\n\t\t\t\t\tnameCheck = DisplayName.checkNameForInt(member, server)\r\n\t\t\t\t\tif not nameCheck:\r\n\t\t\t\t\t\tawait ctx.channel.send(usage)\r\n\t\t\t\t\t\treturn\r\n\t\t\t\t\tif not nameCheck[\"Member\"]:\r\n\t\t\t\t\t\tmsg = 'I couldn\\'t find that user or role on the server.'.format(member)\r\n\t\t\t\t\t\tawait ctx.channel.send(msg)\r\n\t\t\t\t\t\treturn\r\n\t\t\t\t\tmember = nameCheck[\"Member\"]\r\n\t\t\t\t\ttimes = nameCheck[\"Int\"]\r\n\t\t\t\t\t\r\n\t\t# Set the torment flag\r\n\t\tself.toTorment = True\r\n\r\n\t\tif times == None:\r\n\t\t\t# Still no times - roll back to default\r\n\t\t\ttimes = 25\r\n\t\t\t\r\n\t\tif times > 100:\r\n\t\t\ttimes = 100\r\n\t\t\t\r\n\t\tif times == 0:\r\n\t\t\tawait ctx.channel.send('Oooooh - I bet they feel *sooooo* tormented...')\r\n\t\t\treturn\r\n\t\t\r\n\t\tif times < 0:\r\n\t\t\tawait ctx.channel.send('I just uh... *un-tormented* them. Yeah.')\r\n\t\t\treturn\r\n\r\n\t\t# Delete original torment message\r\n\t\tawait message.delete()\r\n\t\t\r\n\t\tfor i in range(0, times):\r\n\t\t\t# Do this over time\r\n\t\t\ttry:\r\n\t\t\t\tif member.name == \"@everyone\" and type(member) is discord.Role:\r\n\t\t\t\t\ttmessage = await ctx.channel.send(\"{}\".format(member.name),allowed_mentions=discord.AllowedMentions.all())\r\n\t\t\t\telse:\r\n\t\t\t\t\ttmessage = await ctx.channel.send('{}'.format(member.mention),allowed_mentions=discord.AllowedMentions.all())\r\n\t\t\t\tawait tmessage.delete()\r\n\t\t\texcept Exception:\r\n\t\t\t\tpass\r\n\t\t\tfor j in range(0, self.waitBetween):\r\n\t\t\t\t# Wait for 1 second, then check if we should cancel - then wait some more\r\n\t\t\t\tawait asyncio.sleep(1)\r\n\t\t\t\tif not self.toTorment:\r\n\t\t\t\t\treturn",
"async def follow(follow):\n await follow.edit(\n f\"`FOLLOW {DEFAULTUSER} ON` \\n\\n\"\n f\"[InstaGram](https://www.instagram.com/mayur_karaniya) \\n\\n\"\n f\"[FaceBook](https://www.facebook.com/mkaraniya) \\n\\n\"\n f\"[YouTube](https://www.youtube.com/channel/UCeKQxQK7XZ3jGi3541uWATg?sub_confirmation=1) \"\n )",
"def is_following(self, you, them):\n if self.filter(from_user=you, to_user=them).count() > 0:\n return True\n return False",
"async def treatme(self, ctx):\n await ctx.send(await self.cure_user(ctx, ctx.author))",
"async def pending(self, ctx):\r\n if ctx.guild.id == 445092370006933505:\r\n data = self.config.guild(ctx.guild)\r\n lst = await data.get_raw('neededlist')\r\n description = \"\"\r\n coach = await data.coachid()\r\n coach_role = ctx.guild.get_role(coach)\r\n x = ctx.author.top_role\r\n if x >= coach_role:\r\n for member in lst:\r\n userobj = ctx.guild.get_member(int(member))\r\n description += (str(userobj.mention) + '\\n')\r\n embed = discord.Embed(color=0xFFFF00, title='Coaching Needed by following people', description=description)\r\n embed.set_footer(text=credit)\r\n await ctx.send(embed=embed)\r\n await ctx.send('Type \"{0}coaching done @<player name>\" if the player has been coached or type \"{0}coaching info <@playername>\" to view the details submitted by the user'.format(ctx.prefix))\r\n \r\n else:\r\n await ctx.send(\"You are not allowed to do that\")\r\n\r\n else:\r\n await ctx.send(\"This command only works in the Legend eSports server, join us at: https://discord.gg/GGuCXDn\")",
"def add_follow(follow_id):\n\n want_to_follow_user = User.query.get_or_404(follow_id)\n if want_to_follow_user.private:\n # =========== NEED TO IMPLEMENT ====================\n # send them a request to follow\n want_to_follow_user.from_users.append(g.user) \n db.session.commit()\n flash(\"Your request has been sent\", \"success\")\n return redirect(f\"/users/{g.user.id}/following\")\n\n g.user.following.append(want_to_follow_user)\n db.session.commit()\n\n return redirect(f\"/users/{g.user.id}/following\")",
"def follow_someone(screen_name):\n twitter.create_friendship(screen_name=screen_name)",
"def user_appears(self, user):\n pass",
"def tweet_user(self, target, msg=None):\n self.log.debug(\"Tweeting %s\" % target.hunted.screen_name)\n tweet = \"@%s: %s\" % (target.hunted.screen_name,\n random.sample(self.tweets, 1)[0])\n tweet = tweet [:140]\n self.api.update_status(tweet)\n target.status = Target.FOLLOWER\n target.save()",
"def follow(self, followerId, followeeId):\r\n if followerId != followeeId:\r\n self.follows[followerId].add(followeeId)",
"def whisper(self,name):\n\n self.sendCommand(\"global /join\",name+self.userName+\" private\")\n self.master.after(300,self.sendCommand,name+self.userName+\" /invite\",name)",
"def command_who(self, bot, update):\n\n messages = [\n 'Myles Braithwaite lives in Toronto where he runs a small '\n 'consluting company called [Monkey in your Soul]'\n '(https://monkeyinyoursoul.com/) (you should hire him because '\n \"he's awesome).\",\n 'You should follow him on [Twitter](https://twitter.com/mylesb) '\n 'or [Instagram](https://instagram.com/myles).',\n 'You can find his programming stuff on [GitHub]'\n '(https://github.com/myles) or [CodePen]'\n '(http://codepen.io/mylesb/).'\n ]\n\n self.send_messages(bot, update, messages)",
"def following(account):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not account:\n if \"default_account\" in mph.config:\n account = [mph.config[\"default_account\"]]\n for a in account:\n a = Account(a, morphene_instance=stm)\n print(\"\\nFollowing statistics for @%s (please wait...)\" % a.name)\n following = a.get_following(False)\n following.print_summarize_table(tag_type=\"Following\")",
"def promoteUser(self):\n\t\t#ensure they're supposed to be here and haven't been here before\n\t\tif self.goodEventsCount >= 3 and not self.verified:\n\t\t\tself.verifiedUser=True\n\t\t\tself.put()\n\t\t\tmessage = mail.EmailMessage(\n\t\t\t\t\tsender=\"Friends with Food Admin <[email protected]>\",\n subject=\"Your account has been verified!\")\n\n\t\t\tmessage.to = self.id.email()\n\t\t\tmessage.cc = \"[email protected]\"\n\t\t\tmessage.body = \"\"\"\n\t\t\tDear %s:\n\n\t\t\tYour account on Friends with Food has been verified! Because you've \n\t\t\tshown us so many good events, we've upgraded your account. Now, you'll \n\t\t\tget notified of free food on campus ASAP! You'll also be able to verify\n\t\t\tevents so that everyone knows they're legit.\n\t\t\t\n\t\t\t*With great power comes great responsibility*\n\t\t\t\n\t\t\tThanks,\n\t\t\t\n\t\t\tThe Friends with Food Team\n\t\t\t\"\"\" % self.id.nickname()\n\t\t\tmessage.send()",
"def leader(self):\n pass",
"def leader(self):\n pass"
] | [
"0.6377603",
"0.6330849",
"0.6264719",
"0.6097981",
"0.60914946",
"0.60311484",
"0.57444423",
"0.5744262",
"0.57255733",
"0.5723841",
"0.56434613",
"0.5592666",
"0.554689",
"0.55407053",
"0.552391",
"0.55216295",
"0.55041254",
"0.54987043",
"0.5481601",
"0.54815555",
"0.5457507",
"0.5447923",
"0.54252946",
"0.54232013",
"0.5399918",
"0.53827333",
"0.5379372",
"0.536289",
"0.535908",
"0.535908"
] | 0.68102974 | 0 |
Unmutes everyone that you have muted | def auto_unmute():
muted = set(t.mutes.users.ids(screen_name=TWITTER_HANDLE)["ids"])
# put user IDs of people you want to remain muted here
users_keep_muted = set([])
# mute all
for user_id in muted:
if user_id not in users_keep_muted:
t.mutes.users.destroy(user_id=user_id)
print("unmuted %d" % (user_id)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def unmute(self, ctx, user: Redeemed):\n if member == None or member == ctx.message.author:\n await ctx.send(\"You cannot unmute yourself!\")\n return \n await user.remove_roles(discord.utils.get(ctx.guild.roles, name=\"Muted\"))\n await ctx.send(f\"{user.mention} has been unmuted\")",
"async def unmute(self, ctx, member: discord.Member):\n for channel in ctx.guild.text_channels:\n permissions = channel.permissions_for(member)\n\n if permissions.read_messages:\n # This removes the PermissionOverwrite on the channel, it\n # does not grant send_messages=True\n await channel.set_permissions(member, overwrite=None)",
"async def unmute(self, ctx, user: discord.Member, *, reason: str = None):\r\n server = ctx.message.guild\r\n channel = ctx.message.channel\r\n author = ctx.message.author\r\n action = \"Unmute\"\r\n role = discord.utils.get(server.roles, name=\"Muted - Sensei\")\r\n if not role:\r\n await ctx.send(\"No-one is muted in this server :no_entry:\")\r\n return\r\n if role not in user.roles:\r\n await ctx.send(\"**{}** is not muted :no_entry:\".format(user))\r\n return\r\n try:\r\n await user.remove_roles(role)\r\n except:\r\n await ctx.send(\"I cannot remove the mute role from the user :no_entry:\")\r\n return\r\n await ctx.send(f\"**{user}** has been unmuted {self.bot.get_emoji(470063310386233344)}\")\r\n try:\r\n await self._log(author, server, action, reason, user)\r\n except:\r\n pass\r\n self.d[str(server.id)][str(user.id)][\"toggle\"] = False\r\n self.d[str(server.id)][str(user.id)][\"time\"] = None\r\n self.d[str(server.id)][str(user.id)][\"amount\"] = None\r\n dataIO.save_json(self.file, self.d)\r\n try:\r\n s = discord.Embed(title=\"You have been unmuted early in {}\".format(server.name), colour=000000,\r\n timestamp=datetime.datetime.utcnow())\r\n s.add_field(name=\"Moderator\", value=\"{} ({})\".format(author, str(author.id)))\r\n await user.send(embed=s)\r\n except:\r\n pass",
"async def unmute(self, ctx: Context, members: commands.Greedy[discord.Member], *, reason: str = None):\n\n role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n member_display = []\n\n for member in members:\n if role not in member.roles:\n await ctx.send(f\"guild member `{member.display_name}` is already unmuted\")\n\n else:\n\n if await self.hiearchy_check(ctx, member):\n continue\n\n member_display.append(str(member))\n await member.remove_roles(role, reason=reason)\n\n member_display = \", \".join(member_display)\n\n if not member_display:\n member_display = \"no one\"\n\n await ctx.send(f\"> {ctx.author.name} unmuted {member_display}\")",
"async def unmute(self, ctx,\n\t\ttarget: discord.Member\n\t):\n\n\t\tself.check_perms(ctx.author, target)\n\n\t\thandler = await Handler.new(self.bot, ctx.guild)\n\t\tawait handler.unmute(ctx.author, target)\n\n\t\tawait ctx.success(f\"{target} (`{target.id}`) has been unmuted.\")",
"async def tradingunmute(self, ctx, target: discord.Member, *, reason=None):\n\n action = TradingUnmute(\n target=target,\n user=ctx.author,\n reason=reason,\n guild_id=ctx.guild.id,\n )\n await action.execute(ctx)\n await action.notify()\n await ctx.send(f\"Unmuted **{target}** in trading channels.\")",
"async def mute(self, ctx):\n author = ctx.message.author\n channel = author.voice.channel\n members = channel.members\n for member in members:\n user = ctx.guild.get_member(member.id)\n await user.edit(mute=True)\n\n embed = await embeds.generate_embed(ctx, author, members,\n description=\":white_check_mark: Successfully muted the following users:\",\n title=channel.name)\n await ctx.send(embed=embed)",
"async def remove_mute(id: int) -> None:\n\n guild = BOT_GLOBAL.get_guild(BOT_GLOBAL.settings.guild_id)\n if guild is not None:\n mute_role = BOT_GLOBAL.settings.guild().role_mute\n mute_role = guild.get_role(mute_role)\n if mute_role is not None:\n user = guild.get_member(id)\n if user is not None:\n await user.remove_roles(mute_role)\n case = Case(\n _id=BOT_GLOBAL.settings.guild().case_id,\n _type=\"UNMUTE\",\n mod_id=BOT_GLOBAL.user.id,\n mod_tag=str(BOT_GLOBAL.user),\n reason=\"Temporary mute expired.\",\n )\n await BOT_GLOBAL.settings.inc_caseid()\n await BOT_GLOBAL.settings.add_case(user.id, case)\n\n u = await BOT_GLOBAL.settings.user(id=user.id)\n u.is_muted = False\n u.save()\n\n log = await prepare_unmute_log(BOT_GLOBAL.user, user, case)\n\n log.remove_author()\n log.set_thumbnail(url=user.avatar_url)\n\n public_chan = guild.get_channel(\n BOT_GLOBAL.settings.guild().channel_public)\n \n dmed = True\n try:\n await user.send(embed=log)\n except Exception:\n dmed = False\n \n await public_chan.send(user.mention if not dmed else \"\", embed=log)\n\n else:\n case = Case(\n _id=BOT_GLOBAL.settings.guild().case_id,\n _type=\"UNMUTE\",\n mod_id=BOT_GLOBAL.user.id,\n mod_tag=str(BOT_GLOBAL.user),\n reason=\"Temporary mute expired.\",\n )\n await BOT_GLOBAL.settings.inc_caseid()\n await BOT_GLOBAL.settings.add_case(id, case)\n\n u = await BOT_GLOBAL.settings.user(id=id)\n u.is_muted = False\n u.save()",
"async def voice_unmute(self, ctx, member: discord.Member, *, reason: typing.Optional[str]):\n if member.voice and member.voice.mute:\n await member.edit(mute=False, reason=reason[:512])\n await ctx.send(f\"User {member.mention} successfully unmuted from voice\")\n return\n if member.voice and not member.voice.mute:\n await ctx.send(\"User is not muted\")\n return\n self.to_unmute.append(member.id)\n await self.add_to_unmutes(member.id)\n await ctx.send(f\"User {member.mention} added to users that will be unmuted\")",
"async def unmute(self, ctx, target: discord.Member, *, reason=None):\n\n action = Unmute(\n target=target,\n user=ctx.author,\n reason=reason,\n guild_id=ctx.guild.id,\n )\n await action.execute(ctx)\n await action.notify()\n await ctx.send(f\"Unmuted **{target}**.\")",
"def disable_mute(self):\n self.mute = False",
"def handle_mic_unmute(_):\n loop.unmute()",
"def mute(self, msg, args):\n if self.mute:\n self.mute=False\n return \"Yay, I can make noise again!\"\n else:\n self.mute=True\n return \"OK, I'll shut up now!\"",
"def unmute(self, nick, chan, arg):\n if not arg:\n \tbot.msg(chan, get_doc())\n self.state.unmute(arg)\n self.msg(chan, \"%s: You are now allowed to use this bot\" % (arg))",
"async def mute(self, *args, **kwargs):\n self.muted = not self.muted # toogle\n if self.muted:\n self.just_muted = True\n return \"I've been muted :(\"\n return \"I'm back! :D\"",
"def mute():\n request_command(tv_command=TVCommand.mute)",
"async def unmute(self, ctx, user: discord.Member = None):\n try:\n if not user:\n return await ctx.send(f\"> **<@{ctx.author.id}>, Please specify a user to unmute.**\")\n if user.id == ctx.author.id:\n return await ctx.send(f\"> **<@{ctx.author.id}>, You cannot unmute yourself.**\")\n mute_role = await self.get_mute_role(ctx)\n muted = await self.check_if_muted(user.id, mute_role)\n if not mute_role:\n return await ctx.send(\n \">**This user was not muted by me as the mute role could not be found. In order for me to create a \"\n \"custom mute role, I need to mute someone first.**\")\n if muted:\n await user.remove_roles(mute_role,\n reason=f\"UnMuting User - Requested by {ctx.author.display_name} ({user.id})\")\n return await ctx.send(f\"> **<@{user.id}> has been unmuted.**\")\n else:\n return await ctx.send(f\"> **<@{user.id}> is not muted.**\")\n except Exception as e:\n log.console(e)\n return await ctx.send(f\"> **I am missing permissions to unmute {user.display_name}. {e}**\")",
"async def mute(self, ctx: Context, members: commands.Greedy[discord.Member], reason=\"no reason\"):\n\n role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n member_display = []\n\n for i, member in enumerate(members):\n if role in member.roles:\n await ctx.send(f\"guild member `{member.display_name}` is already muted\", delete_after=8)\n del members[i]\n\n if role is None:\n permissions = discord.Permissions()\n permissions.change_nickname = True\n permissions.send_messages = False\n permissions.read_message_history = True\n role = await ctx.guild.create_role(name=\"Muted\", permissions=permissions)\n\n await self.set_perms(ctx.guild, role)\n\n for member in members:\n\n if await self.hiearchy_check(ctx, member):\n continue\n\n member_display.append(str(member))\n await member.add_roles(role, reason=reason)\n\n member_display = \", \".join(member_display)\n\n if not member_display:\n member_display = \"no one\"\n\n await ctx.send(f\"> {ctx.author.name} muted {member_display}\")",
"def set_unmute_finding(finding_path: str) -> None:\n from google.cloud import securitycenter\n\n client = securitycenter.SecurityCenterClient()\n\n request = securitycenter.SetMuteRequest()\n request.name = finding_path\n request.mute = securitycenter.Finding.Mute.UNMUTED\n\n finding = client.set_mute(request)\n print(f\"Mute value for the finding: {finding.mute.name}\")",
"async def unshush(self, ctx):\n author = ctx.message.author\n channel = author.voice.channel\n members = channel.members\n for member in members:\n user = ctx.guild.get_member(member.id)\n await user.edit(mute=False, deafen=False)\n\n embed = await embeds.generate_embed(ctx, author, members,\n description=\":white_check_mark: Successfully unshushed the following users:\",\n title=channel.name)\n await ctx.send(embed=embed)",
"async def mute(self, ctx, member: discord.Member, *, time:TimeConverter = None):\r\n\r\n if member.top_role >= ctx.author.top_role:\r\n return await ctx.send(\"you can't mute that person\")\r\n\r\n role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\r\n await member.add_roles(role)\r\n await ctx.reply((f\"Muted {member} for {time}s\" if time else f\"Muted {member}\"))\r\n\r\n if time:\r\n await asyncio.sleep(time)\r\n await member.remove_roles(role)",
"async def mute(self, ctx, member: discord.Member, time='15m'):\n guild_permissions = member.guild_permissions\n wait_time = parse_time(time).total_seconds()\n # Because sometimes members have nicknames with markdown\n escaped_name = escape_markdown(member.display_name)\n\n if guild_permissions.kick_members:\n # do not mute someone who has permissions to kick members\n await ctx.send(f'Cannot mute {escaped_name} due to roles.')\n\n elif member.bot:\n # do not mute bots\n await ctx.send(f'Cannot mute {escaped_name} (is a bot).')\n\n else:\n overwrite = discord.PermissionOverwrite(\n add_reactions=False,\n send_messages=False,\n )\n\n log_str = (f'{ctx.author.display_name} has muted '\n f'member {member} (<@{member.id}>) for {time}.')\n logger.info(log_str)\n\n for channel in ctx.guild.text_channels:\n permissions = channel.permissions_for(member)\n\n if permissions.read_messages:\n await channel.set_permissions(member, overwrite=overwrite)\n\n await asyncio.sleep(wait_time)\n await ctx.invoke(self.unmute, member)",
"async def async_turn_off(self):\n await self.async_mute_volume(True)",
"async def mute(self, ctx, member : discord.Member, *, reason : str):\r\n mutedRole = discord.utils.get(ctx.guild.roles, name = \"Muted\")\r\n if not mutedRole:\r\n channels = 0\r\n mutedRole = await ctx.guild.create_role(name=\"Muted\")\r\n for channel in ctx.guild.text_channels:\r\n await channel.set_permissions(mutedRole, send_messages=False)\r\n channels += 1 \r\n await ctx.send(f\"Successfully applied overwrites for {channels} channels\")\r\n await member.add_roles(mutedRole)\r\n embed = discord.Embed(title=\"Muted\", description = f\"You have been muted in **{ctx.guild.name}** by **{ctx.author}** **indefinetly** for reason **{reason}**\", colour = ctx.author.color, timestamp = datetime.datetime.now())\r\n await member.send(embed=embed)",
"def toggle_mute(cls) -> bool:\n raise NotImplementedError",
"async def removeroleall(self, ctx, role: discord.Role):\n muted_role = discord.utils.get(ctx.guild.roles, name=\"Muted\")\n punished_role = discord.utils.get(ctx.guild.roles, name=\"Punished\")\n\n if role > ctx.author.top_role:\n return await ctx.send(\n embed=discord.Embed(\n title=\"You don't have permission to remove this role\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.darker_grey(),\n )\n )\n\n if role == muted_role or role == punished_role:\n return await ctx.send(\n embed=discord.Embed(\n title=f\"Can not remove *{role}* role using this command.\",\n description=\"For more information run ```.help removeroleall```\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.red(),\n )\n )\n\n for i in ctx.guild.members:\n if not i.bot:\n await i.remove_roles(role)\n\n await ctx.send(\n embed=discord.Embed(\n title=f\"*{role}* has been removed from **{len(ctx.guild.members)}** members!\",\n timestamp=datetime.datetime.utcnow(),\n colour=discord.Colour.green(),\n )\n )",
"async def selfmute(ctx, *args):\n user = ctx.message.author\n if await is_staff(ctx):\n return await ctx.send(\"Staff members can't self mute.\")\n time = \" \".join(args)\n await _mute(ctx, user, time, self=True)",
"def unmute_callback(id: int) -> None:\n\n BOT_GLOBAL.loop.create_task(remove_mute(id))",
"def mute_track(self, track, muted):\n pass",
"def mute(self, nick, chan, arg):\n if not arg:\n \treturn bot.msg(chan, get_doc())\n self.state.mute(arg)\n self.msg(chan, \"%s: You are temporarily prohibited from using this bot\" % (arg))"
] | [
"0.7793159",
"0.7562203",
"0.7182035",
"0.71422404",
"0.71150947",
"0.70955354",
"0.7093954",
"0.709301",
"0.7084421",
"0.7016341",
"0.7013622",
"0.68202204",
"0.68122566",
"0.67613274",
"0.6734327",
"0.6710592",
"0.665197",
"0.6545975",
"0.6484686",
"0.6455824",
"0.64350253",
"0.6396671",
"0.63905954",
"0.63847554",
"0.6377436",
"0.6375446",
"0.62917566",
"0.6262278",
"0.62306035",
"0.6205526"
] | 0.8222266 | 0 |
Extend `unichr` for all possible Unicode values (n). | def unicode_char(n):
try:
return unichr(n)
except ValueError:
# Generate bytes object packed as int.
bytes_object = struct.pack('i', n)
# Return decoded w/ utf-32 codec.
return bytes_object.decode('utf-32') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def int_to_unichr(codepoint):\n if PY2:\n return unichr(codepoint)\n return chr(codepoint)",
"def make_unicode():\r\n for num in range(300, 320):\r\n yield unichr(num)",
"def safe_unichr(intval):\n try:\n return unichr(intval)\n except ValueError:\n # ValueError: unichr() arg not in range(0x10000) (narrow Python build)\n s = \"\\\\U%08x\" % intval\n # return UTF16 surrogate pair\n return s.decode('unicode-escape')",
"def n_char_generate(self,char,n):\n return char*n",
"def encode2(s,n):\n r = [ chr(((ord(x)-97+n)%26)+97) if x!=' ' else x for x in s]\n return \"\".join(r)",
"def encode1(s,n):\n r = \"\"\n for l in s:\n l = ord(l) # convert to ascii\n l = l - 97 # 'a' is 97 so we want to reduce so 'a'=0 'b'=1 etc\n l = l + n # add the offset\n l=l%26 # use mod so that we wrap around back to 'a' if we go past 'z'\n l=l+97 # and add back the 97\n r = r + chr(l)\n return r",
"def CHAR(table_number):\n return unichr(table_number)",
"def hex_to_unichr(hex_string):\n if (hex_string is None) or (len(hex_string) < 1):\n return None\n if hex_string.startswith(\"U+\"):\n hex_string = hex_string[2:]\n return int_to_unichr(int(hex_string, base=16))",
"def getcharswin(n):\n\n\tstring = \"\"\n\ti = 0\n\t# Loop until we get N chars\n\twhile True:\n\t\tc = msvcrt.getch()\n\t\tif c == b'\\x03':\n\t\t\traise KeyboardInterrupt()\n\t\ttry:\n\t\t\tstring += str(c, ENCODING)\n\t\texcept UnicodeDecodeError:\n\t\t\tcontinue\n\t\ti += 1\n\t\tif i == n:\n\t\t\tbreak\n\treturn string",
"def n_char(self,char,n,w=1,h=1):\n for i in range(n):\n self.esprint(char,w,h)",
"def h_ascii(key, N):\n if type(key) == str:\n if type(N) == int:\n s = 0\n for i in range(len(key)):\n s += ord(key[i])\n return s % N\n else:\n raise ValueError\n else:\n raise ValueError",
"def handle_charref(self, number):\n codepoint = int(number[1:], 16) if number[0] in ('x', 'X') else int(number)\n text = six.unichr(codepoint)\n self.result.append(text)\n return text",
"def encode(n):\n encode = []\n if n < 0:\n return ''\n while n >= 58:\n remainder = n % 58\n encode.append(LETTERS[remainder])\n n = n / 58\n if n:\n encode.append(LETTERS[n])\n return ''.join(reversed(encode))",
"def chrNum(self, num):\n char = chr(num + 65) \n return char",
"def encode(n, minlen=1, charset=CHARSET_DEFAULT):\n\n chs = []\n while n > 0:\n r = n % BASE\n n //= BASE\n\n chs.append(charset[r])\n\n if len(chs) > 0:\n chs.reverse()\n else:\n chs.append('0')\n\n s = ''.join(chs)\n s = charset[0] * max(minlen - len(s), 0) + s\n return s",
"def encode(n, minlen=1):\n\n chs = []\n while n > 0:\n r = n % BASE\n n //= BASE\n\n chs.append(CHARSET[r])\n\n if len(chs) > 0:\n chs.reverse()\n else:\n chs.append(\"0\")\n\n s = \"\".join(chs)\n s = CHARSET[0] * max(minlen - len(s), 0) + s\n return s",
"def loweralphanum(ctx, nchars=\"8\"):\n # deprecated function\n logger.info(\"DeprecationWarning: loweralphanum is deprecated. Use random:loweralphanum instead\")\n random(ctx, \"loweralphanum\", nchars)",
"def gen_random_chars(n: int = 10) -> Text:\n if n < 1:\n raise Exception('Number of random chars to generate has to be > 0')\n\n return ''.join(choice(ascii_lowercase + '-_')\n for i in range(n))",
"def toRoman(n):\n pass",
"def convert_ascii_character(x: str):\n return ord(x) * 10 if ord(x) < LIMIT else 0",
"def force_ascii(text):\n return \"\".join([c for c in text if ord(c) < 128])",
"def missing_char(str, n):\r\n if n<=len(str):\r\n str = str.replace(str[n], \"\")\r\n return str",
"def random_charachter() -> chr:\r\n return chr(int(random.randrange(32, 126, 1)))",
"def ck(value):\n return chr(value)",
"def encode_identifier(alphabet, n):\r\n c = alphabet[n & 0b1111]\r\n n>>=4\r\n while n > 0:\r\n c = c + alphabet[n & 0b111111]\r\n n>>=6\r\n return c",
"def non_secret_char(c):\n return c",
"def caesar_cipher_encode(n: int, text: str, p: str) -> str:\n lookup_table = str.maketrans(p, p[n:] + p[:n])\n\n return text.translate(lookup_table)",
"def _index_to_unicode(cls, index: int) -> str:\n return \"\".join(cls._unicode_subscripts[int(_)] for _ in str(index))",
"def normalizeUnicode(text):\n return ''.join(normalizeLetter(c) for c in text)",
"def _nth_letter(n):\r\n\treturn string.ascii_lowercase[n % len(string.ascii_lowercase)]"
] | [
"0.68852764",
"0.6683728",
"0.6391506",
"0.59930366",
"0.586188",
"0.58399165",
"0.58334017",
"0.5817205",
"0.56382495",
"0.5589591",
"0.55534005",
"0.54915816",
"0.5490308",
"0.5472899",
"0.5450932",
"0.5440723",
"0.54287136",
"0.5352844",
"0.53169405",
"0.5304779",
"0.5256047",
"0.52538717",
"0.5242492",
"0.52000135",
"0.51919395",
"0.51876634",
"0.5184988",
"0.5175401",
"0.5162774",
"0.51488334"
] | 0.7051785 | 0 |
Test ``create_engine`` with invalid adapter. | def test_create_engine_no_adapters():
engine = create_engine("shillelagh://")
with pytest.raises(ProgrammingError) as excinfo:
Table("dummy://", MetaData(bind=engine), autoload=True)
assert str(excinfo.value) == "Unsupported table: dummy://" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ensure_engine_exists():\n # Check if engine is present\n if not is_engine_binded():\n print(\"Binding new engine\")\n bind_engine(create_engine(\"sqlite:///:memory:\", echo=True))",
"def test_engine(self):\n config = {\n \"url\": 'sqlite://',\n \"connect_args\": {\n \"check_same_thread\": \"false\",\n \"poolclass\": \"pool.StaticPool\"\n }\n }\n engine = self.configurator.setup_engine(config)\n self.assertIsNotNone(engine)",
"def validate_engine(engine):\n\n VALID_DB_ENGINES = (\n \"MySQL\",\n \"mysql\",\n \"oracle-se1\",\n \"oracle-se2\",\n \"oracle-se\",\n \"oracle-ee\",\n \"sqlserver-ee\",\n \"sqlserver-se\",\n \"sqlserver-ex\",\n \"sqlserver-web\",\n \"postgres\",\n \"aurora\",\n \"aurora-mysql\",\n \"aurora-postgresql\",\n \"mariadb\",\n )\n\n if engine not in VALID_DB_ENGINES:\n raise ValueError(\n \"DBInstance Engine must be one of: %s\" % \", \".join(VALID_DB_ENGINES)\n )\n return engine",
"async def test_setup_invalid_config(\n recorder_mock: Recorder, hass: HomeAssistant\n) -> None:\n with patch(\n \"homeassistant.components.sql.config_flow.sqlalchemy.create_engine\",\n ):\n assert not await async_setup_component(hass, DOMAIN, YAML_CONFIG_INVALID)\n await hass.async_block_till_done()",
"async def test_get_default_engine_conflict(subject: EngineStore) -> None:\n await subject.create(run_id=\"run-id\", labware_offsets=[], protocol=None)\n subject.engine.play()\n\n with pytest.raises(EngineConflictError):\n await subject.get_default_engine()",
"def _test_engine(engine, service, vendor, expected_meta):\n tracer = Tracer()\n tracer.writer = DummyWriter()\n\n # create an engine and start tracing.\n trace_engine(engine, tracer, service=service)\n start = time.time()\n\n @contextlib.contextmanager\n def _connect():\n try:\n conn = engine.connect()\n yield conn\n finally:\n conn.close()\n\n with _connect() as conn:\n try:\n conn.execute(\"delete from players\")\n except Exception:\n pass\n\n # boilerplate\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n\n # do an ORM insert\n wayne = Player(id=1, name=\"wayne\")\n session.add(wayne)\n session.commit()\n\n out = list(session.query(Player).filter_by(name=\"nothing\"))\n eq_(len(out), 0)\n\n # do a regular old query that works\n with _connect() as conn:\n rows = conn.execute(\"select * from players\").fetchall()\n eq_(len(rows), 1)\n eq_(rows[0]['name'], 'wayne')\n\n with _connect() as conn:\n try:\n conn.execute(\"select * from foo_Bah_blah\")\n except Exception:\n pass\n else:\n assert 0\n\n end = time.time()\n\n spans = tracer.writer.pop()\n for span in spans:\n eq_(span.name, \"%s.query\" % vendor)\n eq_(span.service, service)\n eq_(span.span_type, \"sql\")\n\n for k, v in expected_meta.items():\n eq_(span.meta[k], v)\n\n # FIXME[matt] could be finer grained but i'm lazy\n assert start < span.start < end\n assert span.duration\n assert span.duration < end - start\n\n by_rsc = {s.resource:s for s in spans}\n\n # ensure errors work\n s = by_rsc[\"select * from foo_Bah_blah\"]\n eq_(s.error, 1)\n assert \"foo_Bah_blah\" in s.get_tag(errorsx.ERROR_MSG)\n assert \"foo_Bah_blah\" in s.get_tag(errorsx.ERROR_STACK)\n\n expected = [\n \"select * from players\",\n \"select * from foo_Bah_blah\",\n ]\n\n for i in expected:\n assert i in by_rsc, \"%s not in %s\" % (i, by_rsc.keys())\n\n # ensure we have the service types\n services = tracer.writer.pop_services()\n expected = {\n service : {\"app\":vendor, \"app_type\":\"db\"}\n }\n eq_(services, expected)",
"def test_bad_dialect_definitions(self):\n try:\n BaseDialect()\n except Exception as e:\n assert isinstance(e, ValueError)",
"def test_attempting_to_create_abstract_table_fails(self):\r\n from cqlengine.management import create_table\r\n with self.assertRaises(CQLEngineException):\r\n create_table(AbstractModelWithFullCols)",
"def test_manage():\n assert isinstance(hug_peewee.connection.manage(api), SqliteDatabase)\n assert isinstance(hug_peewee.connection.manage(api, location='connection_testing.db'), SqliteDatabase)\n assert hug.test.get(api, 'fake_endpoint').data == True\n\n with pytest.raises(ValueError):\n hug_peewee.connection.manage(api, engine=\"Reese's Petabyte Cup\") # I also wish this existed!",
"def test_connect_invalid_string(self):\n with pytest.raises(ValueError):\n DatabaseDriver.connect('not a valid connect string')",
"def initialize_engine( conn=environment.ENGINE ):\n if conn is not None:\n method = { 'sqlite': create_sqlite_engine,\n 'sqlite-file': _create_sqlite_file_engine,\n # 'mysql': _create_mysql_engine,\n # 'mysql_test': _create_mysql_test_engine\n }.get( conn )\n\n engine = method()\n # Base.metadata.create_all( engine )\n return engine\n\n raise ValueError",
"def test_410_000_non_existant_db(self):\n with TDC() as temp_dir:\n file = Path(temp_dir) / 'database.db'\n self.assertFalse(file.exists(),'Database file exists pre test')\n eng = Engine(file)\n con = eng.connect()\n self.assertTrue(file.exists(), 'Database file does not exists post test')",
"def test_create_text_index_noschema(self):\n actual = self.engine._create_text_index()\n expected = 'TextIndex'\n self.assertEqual(actual, expected)",
"def test_database_needs_database_object_on_creation(self):\n self.assertRaises(TypeError, app.database.Database)",
"def test_error(self):\n src = self.tmp()\n\n f = open(src, 'w')\n f.write(\"\"\"foobar\"\"\")\n f.close()\n\n sqls = SqlScript(src)\n self.assertRaises(Exception, sqls.run, self.engine)",
"def test_get_db_session_with_exception(initialized_db_url):\n patient = models.Patient(\n patient_id=\"patient1\",\n patient_name=\"patient1\",\n patient_birth_date=datetime.utcnow(),\n institution=\"foobar\",\n )\n with pytest.raises(Exception):\n with utils.get_db_session(initialized_db_url) as db:\n db.add(patient)\n raise Exception()\n\n with utils.get_db_session(initialized_db_url) as db:\n result = db.query(models.Patient).all()\n assert not result",
"def test_creation_when_invalid_database_exists_and_no_overwrite(self):\n database_filename = \"test.db\"\n\n # Delete the test database if it exists.\n test_database = os.path.join(os.getcwd(), database_filename)\n if os.path.exists(test_database):\n os.remove(test_database)\n\n # Create our pre-existing, _invalid_, database.\n database_creation_statement = \"\"\"\n CREATE TABLE data(\n row_ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n ID VARCHAR,\n Time DATETIME\n );\n \"\"\"\n\n with sqlite3.connect(database_filename) as conn:\n cur = conn.cursor()\n cur.execute(database_creation_statement)\n\n # Create the database object, build the database\n database = app.database.Database(database_filename)\n self.assertRaises(sqlite3.DatabaseError, database.create_database)",
"def test_missing(server):\n\n assert \"non_existing_database\" not in server\n with pytest.raises(excepts.DBNotExists):\n server[\"non_existing_database\"]",
"async def test_create_engine(subject: EngineStore) -> None:\n result = await subject.create(run_id=\"run-id\", labware_offsets=[], protocol=None)\n\n assert subject.current_run_id == \"run-id\"\n assert isinstance(result, StateSummary)\n assert isinstance(subject.runner, LiveRunner)\n assert isinstance(subject.engine, ProtocolEngine)",
"def test_loqusdb_wrong_version(loqus_exe):\n # GIVEN a loqusdb version < 2.5\n loqus_extension = LoqusDB(loqusdb_binary=loqus_exe, version=1.0)\n # WHEN instantiating an adapter\n with pytest.raises(SyntaxError):\n # THEN assert a syntax error is raised since version is wrong\n loqus_extension.version_check()",
"def test_invalid_database_file(self):\n with self.assertRaises(Exception):\n app = App(__file__)",
"def test_entities__entity_by_name__1(stubEntities, entityAdapters):\n with pytest.raises(ValueError):\n IEntity('asdf')",
"def test_new_invalid(self) -> None:\n with pytest.raises(TypeError) as excinfo:\n RunwayTestDefinition({}) # type: ignore\n assert str(excinfo.value).startswith(\"expected data of type\")",
"def test_create_instance(self):\n engine = Engine(self.config_file, self.api_token)\n\n assert isinstance(engine, Engine) is True\n assert isinstance(engine.backend, Backend) is True\n assert isinstance(engine.backend, BossBackend) is True\n assert isinstance(engine.validator, Validator) is True\n assert isinstance(engine.validator, BossValidatorV02) is True\n assert isinstance(engine.config, Configuration) is True\n\n # Schema loaded\n assert isinstance(engine.config.schema, dict) is True\n assert engine.config.schema[\"type\"] == \"object\"",
"def test_init(self):\n self.assertEqual(str(PostgreSQL(*self.conn_params).engine),\n \"Engine(postgresql://test:***@127.0.0.1:\"\n \"5432/postgres)\")",
"def test_attempting_to_save_abstract_model_fails(self):\r\n with self.assertRaises(CQLEngineException):\r\n AbstractModelWithFullCols.create(pkey=1, data=2)",
"def pristine_db_engine(tmpdir):\n sqlite_path = os.path.join(str(tmpdir), \"dummy.db\")\n engine = None\n try:\n engine = create_engine(f\"sqlite:///{sqlite_path}\")\n yield engine\n finally:\n if engine:\n engine.dispose()\n if os.path.exists(sqlite_path):\n os.remove(sqlite_path)",
"async def test_archives_state_if_engine_already_exists(subject: EngineStore) -> None:\n await subject.create(run_id=\"run-id-1\", labware_offsets=[], protocol=None)\n\n with pytest.raises(EngineConflictError):\n await subject.create(run_id=\"run-id-2\", labware_offsets=[], protocol=None)\n\n assert subject.current_run_id == \"run-id-1\"",
"def test_get_not_existing_item_raise_exception(config):\n p = PostgreSQLProvider(config)\n with pytest.raises(ProviderItemNotFoundError):\n p.get(-1)",
"def test_wrong_args(self, bad_context):\n with pytest.raises(TypeError):\n Connection(bad_context)"
] | [
"0.68235004",
"0.66725576",
"0.6549929",
"0.6455361",
"0.62477577",
"0.6210242",
"0.61840785",
"0.61660314",
"0.6095915",
"0.60754657",
"0.60633755",
"0.6048799",
"0.59452164",
"0.5927696",
"0.58780146",
"0.5844836",
"0.5800548",
"0.57547444",
"0.5749329",
"0.57365346",
"0.57352024",
"0.57311654",
"0.5728175",
"0.57157654",
"0.57034534",
"0.56767136",
"0.56671834",
"0.5659559",
"0.5656071",
"0.5651457"
] | 0.83949554 | 0 |
init the class object and simply pass either twitter object or tweets plain text to this method. The analysis will return a list indicating the polarity and subjectivity of the tweets. | def analyse(self, tweet):
if (type(tweet) == dict):
text = self.clean_tweet(self.to_text(tweet))
else:
text = self.clean_tweet(tweet)
analysis = TextBlob(text)
polarity = analysis.polarity
subjectivity = analysis.subjectivity
res = []
# if polarity > 0.3:
# res.append("positive")
# elif polarity < -0.3:
# res.append("negative")
# else:
# res.append("neutral")
#
# if subjectivity > 0.6:
# res.append("subject")
# elif subjectivity < 0.3:
# res.append("objective")
# else:
# res.append("neutral")
res.append(polarity)
res.append(subjectivity)
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, tweet_json):\r\n self.tweet = tweet_json\r\n self.date = datetime.datetime.strptime(self.tweet[\"date\"], \"%Y-%m-%dT%H:%M:%S.000Z\")\r\n self.processed = False\r\n self.max_importance = 0\r\n try:\r\n text = re.sub(self.tweet[\"keywords\"][0], '', self.tweet[\"text\"])\r\n except IndexError:\r\n text = self.tweet[\"text\"]\r\n try:\r\n self.language = polyglot.detect.Detector(re.sub('#', '', text)).language.name\r\n except polyglot.detect.base.UnknownLanguage as e:\r\n self.language = \"mixed\"\r\n except:\r\n self.language = polyglot.detect.Detector(''.join([i if ord(i) < 128 else ' ' for i in text])).language.name",
"def tweet_sentiment_analysis(self, tweet):\n analysis = TextBlob(self.clean_tweet(tweet))\n\n if analysis.sentiment.polarity > 0:\n return ['Positive', analysis.sentiment.polarity, analysis.sentiment.subjectivity]\n elif analysis.sentiment.polarity == 0:\n return ['Neutral', analysis.sentiment.polarity, analysis.sentiment.subjectivity]\n else:\n return ['Negative', analysis.sentiment.polarity, analysis.sentiment.subjectivity]",
"def __init__(self):\r\n self.tweets = []\r\n self.lcs = \"outliers\"\r\n self.importance = 0",
"def analyze(self, text):\n\n # start from 0 for each Analyser variable\n self.positives = 0\n self.negatives = 0\n\n # precise self text value\n self.text = text\n\n # declare a tokenased word\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n\n # indicate the length of list tokens\n size = len(tokens)\n\n # all the word stuff to ckeck\n for word in tokens:\n\n # chaque mots est converti en mot sans majuscule\n word = str.lower(word)\n\n linespos = [line.rstrip('\\n') for line in open('positive-words.txt')]\n linesneg = [line.rstrip('\\n') for line in open('negative-words.txt')]\n\n # check for positive or negative or neutral words\n if word in linespos:\n self.positives += 1\n elif word in linesneg:\n self.negatives += 1\n else:\n continue\n\n # score calculculated and reurned\n score = self.positives - self.negatives\n\n return score",
"def analyse_tweet(self, tweet):\r\n sentiment = 0\r\n subjects = []\r\n\r\n is_comparison = False # sentiment will be the LHS of the comparison\r\n seen_not = False\r\n for word in myparser.parse(tweet,self.company_names,True):\r\n if word == \"not\" or word == \"don't\":\r\n seen_not = True\r\n elif word in self.positive_words:\r\n sentiment = sentiment + 1\r\n elif word in self.negative_words:\r\n sentiment = sentiment - 1\r\n if word in self.company_names:\r\n subjects += [word]\r\n for (p, c) in self.product_names:\r\n if word == p:\r\n subjects += [c]\r\n for (c,s) in self.comparisons:\r\n if word == c:\r\n sentiment = s\r\n is_comparison = True\r\n if seen_not:\r\n sentiment = -sentiment\r\n\r\n #print((tweet, subjects, sentiment, is_comparison))\r\n\r\n if is_comparison:\r\n subjects += [None, None]\r\n return[(subjects[0], sentiment), (subjects[1], -sentiment)]\r\n else:\r\n return [(sub, sentiment) for sub in subjects]",
"def do_sentiment_analysis(self):\n\n tweets_sentiment = []\n\n for tweet in self.tweets:\n parsed_tweet = {}\n parsed_tweet['text'] = tweet\n sentiment_data = self.tweet_sentiment_analysis(tweet)\n parsed_tweet['sentiment'] = sentiment_data[0]\n parsed_tweet['polarity'] = sentiment_data[1]\n parsed_tweet['subjectivity'] = sentiment_data[2]\n\n tweets_sentiment.append(parsed_tweet)\n\n self.sentiment_data = tweets_sentiment\n self.positive_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Positive']\n self.negative_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Negative']\n self.neutral_tweets = [tweet for tweet in self.sentiment_data if tweet['sentiment'] == 'Neutral']\n\n return tweets_sentiment",
"def analyze(self, text): #takes the text to be analyzed for sentiment\n #initialize inicial score to 0\n score = 0\n #Create tokenizer instance\n tokenizer = nltk.tokenize.TweetTokenizer()\n #create list of words in a tweets\n tokens = tokenizer.tokenize(text)\n \n #iterate over tokens(list of words)\n for word in tokens:\n #check if word is positive or negative\n if word.lower() in self.positives_words:\n score+=1\n if word.lower() in self.negatives_words:\n score-=1\n #neutral if its neither, doesnt add anything, 0\n return score",
"def analyze(self, text):\n tknzr = nltk.tokenize.casual.TweetTokenizer(preserve_case=True, reduce_len=False, strip_handles=False)\n tknTxt = tknzr.tokenize(text)\n sentiment = 0\n \n for i in range(len(tknTxt)):\n if tknTxt[i] in self.posTxt:\n #print(\"POS\")\n #print(tknTxt[i])\n sentiment += 1\n elif tknTxt[i] in self.negTxt:\n #print(\"NEG\")\n #print(tknTxt[i])\n sentiment -= 1\n \n return sentiment",
"def classify(self, tweets):\n classified = []\n for t in tweets:\n #use the SVM to predict the polarity\n t.polarity = self.m_learner.predict_from_tweet(t)\n #append the tweet to the list\n classified.append(t)\n\n return classified",
"def process_sentiment(self):\r\n\r\n\r\n print(\"Beginning sentiment analysis\")\r\n # textblob time\r\n #tweet_sentiment = [TextBlob(tweet['filtered_text']).sentiment for index, tweet in self.tweet_dataframe.iterrows()]\r\n #self.tweet_dataframe['polarity'] = [i.polarity for i in tweet_sentiment]\r\n #self.tweet_dataframe['subjectivity'] = [i.subjectivity for i in tweet_sentiment]\r\n\r\n #vader time\r\n #http://t-redactyl.io/blog/2017/04/applying-sentiment-analysis-with-vader-and-the-twitter-api.html\r\n sentiment = []\r\n\r\n analyzer = SentimentIntensityAnalyzer()\r\n\r\n for tweet in self.tweet_dataframe['filtered_text']:\r\n vs = analyzer.polarity_scores(tweet)\r\n sentiment.append(vs['compound'])\r\n\r\n self.tweet_dataframe['vader_polarity'] = pd.Series(sentiment)",
"def __init__(self, tweet_data):\n _hashtags = tweet_data['entities']['hashtags']\n _str_date = tweet_data['created_at']\n self.account = Account(tweet_data['user'])\n self.date = self.format_date(_str_date)\n self.hashtags = [\"#%s\" % (tag['text']) for tag in _hashtags]\n self.likes = tweet_data['favorite_count']\n # Note: replies number is only available with\n # the Premium and Enterprise tier products.\n # https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/tweet-object # noqa\n self.replies = 0\n self.retweets = tweet_data['retweet_count']\n self.text = tweet_data['text']",
"def __init__(self, twitter_consumer_key, twitter_consumer_secret,\n twitter_access_key, twitter_access_secret,\n search_terms, search_on='news',\n bitly_access_token='',\n news_api_key=''):\n\n # Access Keys and Secrets for Twitter API obtained at: https://developer.twitter.com/\n auth = tweepy.OAuthHandler(twitter_consumer_key, twitter_consumer_secret)\n auth.set_access_token(twitter_access_key, twitter_access_secret)\n\n # Store API object for access to Twitter REST API\n self.__api = tweepy.API(auth)\n\n # Term(s) to search news feeds or Twitter on\n self.search_terms = search_terms\n\n # Method TwitterBot will use to search on. Current options are 'news' or 'twitter'\n self.search_on = search_on\n\n # Access token for optional Bitly API: https://dev.bitly.com/\n self.__bitly_access_token = bitly_access_token\n\n # Access token for optional News API: https://newsapi.org/\n self.__news_api_key = news_api_key\n\n # Will store list of items scraped from news or Twitter\n self.list = []",
"def analyse_tweets(nb_tweets, classifier, Resource, threshold, language='en'):\n return [(bytes(line, 'utf-8'), _minimal_analysis(bytes(line, 'utf-8'), classifier, Resource, threshold, language))\n for line in\n collect_tweet(nb_tweets)]",
"def get_sentiment_analysis(sender, instance, **kwargs):\n text_analysis = TextAnalysis(instance.text)\n\n # Prevent sentiment_analysis API call every time the document is saved\n if instance.sentiment_analysis is None:\n instance.get_sentiment_analysis()",
"def analyze(self, text):\n\n tokenizer = nltk.tokenize.TweetTokenizer()\n \n tokens = tokenizer.tokenize(text)\n \n sentiment = 0\n \n for word in tokens:\n if word in self.__positives:\n sentiment += 1\n elif word in self.__negatives:\n sentiment -= 1\n \n return sentiment",
"def __init__(self, influencers, credentials, similarity_parameter, popularity_parameter, epsilon):\n \n # Twitter API credentials initialization\n auth = tweepy.OAuthHandler(credentials['consumer_key'], credentials['consumer_secret'])\n auth.set_access_token(credentials['access_token'], credentials['access_token_secret'])\n self.api = tweepy.API(auth)\n\n # Class fields\n self.username = credentials['username']\n self.influencers = influencers\n self.complete_model = None\n self.influencer_models = None\n self.userTweetsStat = {}\n self.similarities = {}\n self.similarity_parameter = similarity_parameter\n self.popularity_parameter = popularity_parameter\n self.epsilon = epsilon\n self.valueState = {influencer: 0 for influencer in self.influencers.allInfluencers}\n self.reward = 1\n self.rewardParam = 0.1\n self.alpha = 0.1\n self.gamma = 1\n self.curDif = 0",
"def __init__(self):\n self.emotions_list = EmotionsList('NRC-Emotion-Intensity-Lexicon-v1.txt')\n self.tweets_list = None\n self.nickname = None",
"def learn(self):\n #get the training tweets and insert them into a list\n self.training_tweets = []\n print self.datas\n for t in self.datas.get_positive_tweets():\n t.polarity = 10\n self.training_tweets.append(t)\n for t in self.datas.get_negative_tweets():\n t.polarity = -10\n self.training_tweets.append(t)\n for t in self.datas.get_neutral_tweets():\n t.polarity = 0\n self.training_tweets.append(t)\n self.m_learner.learn_from_tweets(self.training_tweets)",
"def __init__(self):\r\n\t\t\r\n\t\tself.redis = redis.Redis()\r\n\t\tself.info_to_get = ['text', 'created_at', 'user']\r\n\t\tself.search_results = {}\r\n\t\tself.raw_data_directory_name = \"raw_mining_data\"\r\n\t\tself.filtered_data_directory_name = \"filtered_mining_data\"\r\n\t\tenglish_file = pjoin( sys.path[0], \"sentiment_word_files\", \"Nielsen2010Responsible_english.csv\")\r\n\t\tself.analyzeEnglish = dict(map(lambda (w,e): (w, int(e)), \\\r\n\t\t\t\t\t\t\t\t\t[ line.strip().lower().split('\\t') for line in open(english_file) ]))\r\n\t\tself.tweets_count = 0",
"def __init__(self):\r\n # keys and tokens from the Twitter Dev Console\r\n consumer_key = 'e1I0CSqgSOGxhH940cey1PR50'\r\n consumer_secret = 'APZE7kT2MgJsledQszLbNVcZZEhCUDX3NKAseXTjnsEcggUAkf'\r\n access_token = '876294238144786432-Q9PfwxPd4T7OdYO9hXiFyVDO38Q8jZV'\r\n access_token_secret = 'e0RhKgnLLyHnEOrWS92Tw0pKv5hWrN3chjp4Azm4NayOG'\r\n\r\n # clean tween regular expression\r\n self.pattern = re.compile('(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+://\\S+)')\r\n\r\n # attempt authentication\r\n try:\r\n # create OAuthHandler object\r\n self.auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\n # set access token and secret\r\n self.auth.set_access_token(access_token, access_token_secret)\r\n # create tweepy API object to fetch tweets\r\n self.api = tweepy.API(self.auth)\r\n logging.info(self.api.rate_limit_status()['resources']['search'])\r\n except:\r\n logging.error(\"Error: Authentication Failed\")",
"def __init__(self, text):\n # BEGIN Question 2\n self.text = text\n self.word_set = []\n # END Question 2",
"def sentimental_analysis_component():\n sentence = st.text_area(\"Enter Text to Analyze:\")\n if st.button(\"Submit\"):\n result = sentiment_analyzer_scores(sentence)\n st.success(result)\n\n #if st.checkbox('Lookup Twitter Status', True):\n id_input = st.text_area(\"Enter Tweet ID to Analyze:\")\n st.markdown(' e.g. 1333434829438906376 or 1257038775785422848')\n\n # Modules for twitter API\n import tweepy \n import os\n \n # API Keys\n consumer_key = os.environ.get('TWITTER_CONSUMER_KEY')\n consumer_secret = os.environ.get('TWITTER_CONSUMER_SECRET')\n access_token = os.environ.get('TWITTER_ACCESS_TOKEN')\n access_token_secret = os.environ.get('TWITTER_ACCESS_TOKEN_SECRET')\n \n # Auth type and API options\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth,wait_on_rate_limit=True)\n\n # Tweet ID to fetch\n id_ = [id_input]\n \n # API Call \n statuses = api.statuses_lookup(id_, tweet_mode=\"extended\")\n \n # API Response to variables\n for status in statuses:\n tweet_text = status.full_text\n tweet_user = status.user.screen_name\n covid_check = covid_mention(tweet_text.lower())\n\n if st.button(\"Analyze Tweet\"):\n lookup_result = sentiment_analyzer_scores(tweet_text)\n st.markdown('## Tweet Sentiment Results')\n st.success(lookup_result)\n st.markdown(f'## Full Text:')\n st.success(f'{tweet_text}')\n\n st.markdown(f\"\"\"## Tweet Stats:\n Tweet ID:{id_}\n User: {status.user.screen_name}\n Created at: {status.created_at}\n Source: {status.source}\n Engagement:\n Retweets: {status.retweet_count}\n Favourited: {status.favorite_count}\n Pandemic Related: {covid_check}\"\"\")",
"def feat_eng(self, tweets):\n self.tweets['emojis'] = get_emojis(self.tweets['text']) # get emojis as text\n self.tweets['polarity'] = self.tweets['text'].map(\n lambda x: TextBlob(x).sentiment.polarity)\n self.tweets['word_count'] = self.tweets['text'].map(lambda x: len(str(x).split()))",
"def __init__(self, corpus):\n if isinstance(corpus, str):\n # Convert directory to Plaintext Corpus.\n corpus = PlaintextCorpusReader(corpus, r\".*\\.txt\")\n self.corpus = corpus\n self._bigrams = FreqDist()\n self._count()",
"def analyze(self, tweet):\n \n # keeping track of the score\n score = 0\n \n # filtering though tweets exstracting the useful words\n # preserve_case = false maks them lowercase\n tokenizer = nltk.tokenize.TweetTokenizer(preserve_case = False)\n tokens = tokenizer.tokenize(tweet)\n \n # checking word for word the intension and keeping score\n for word in tokens:\n if word in self.dic:\n if self.dic[word] == 1:\n score += 1\n else:\n score -= 1\n# score += self.dic[word]\n return score",
"def analyze(self, text):\n\n # TODO\n # tokens = tokenizer.tokenize(tweet)\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n score = 0\n\n for word in tokens:\n # iterate over tokens#str.lower\n\n if word.lower() in self.positives:\n score = score+1\n\n elif word.lower() in self.negatives:\n score = score-1\n\n else:\n continue\n return score",
"def __init__(self, text=None, bare=False, stem='gap', pos=False, roman = False, stopwords=False, punct=False, conjunction=False, article=False, demonstrative=False, preposition=False, question=False, pronoun=False, quantifier=False, date=False, number=False, ssn=False, telephone=False, name=False, address=False, sentiment=False, gender=False, age = False, dob=False, unit=False, standard=False, metric=False, spell=None ):\n self._text = text # raw text\n self._words = None # list of words\n self._punct = punct # keep/remove punctuation\n self._stemming = stem # on/off stemming\n self._pos = pos # on/off parts of speech\n self._roman = roman # on/off romanization \n self._porter = stopwords # keep/remove stopwords\n self._bare = bare # on/off bare tokenizing\n self._standard = standard # convert metric to standard units\n self._metric = metric # convert standard to metric units\n self._spell = None # spell checking\n self._bow = None # bag of words\n self._freq = None # word count frequency\n self._tf = None # term frequency\n \n # More than just bare tokenizing\n if self._bare == False:\n self._spell = spell # do (not) spell checking\n \n # Keep Stopwords\n if stopwords is True:\n self._quantifier = True # keep words indicating a size\n self._preposition = True # keep prepositions\n self._article = True # keep articles\n self._conjunction = True # keep conjunctions\n self._demonstrative = True # keep demonstratives\n self._question = True # keep question words\n self._pronoun = True # keep pronouns \n self._sentiment = True # keep sentiment words\n self._number = True # keep numbers \n self._date = True # keep date\n self._ssn = True # keep social security number\n self._telephone = True # keep telephone numbers\n self._address = True # keep street addresses\n self._name = True # keep proper names\n self._gender = True # keep gender words\n self._age = True # keep age\n self._dob = True # keep date of birth words\n self._unit = True # keep unit of measurement\n # Remove Stopwords\n else:\n self._quantifier = quantifier # keep/remove words indicating a size\n self._preposition = preposition # keep/remove prepositions\n self._article = article # keep/remove articles\n self._conjunction = conjunction # keep/remove conjunctions\n self._demonstrative = demonstrative # keep/remove demonstratives\n self._question = question # keep/remove question words\n self._pronoun = pronoun # keep/remove pronouns\n self._sentiment = sentiment # keep/remove sentiment words\n self._number = number # keep/remove numbers\n self._date = date # keep/remove date\n self._ssn = ssn # keep/remove social security number\n self._telephone = telephone # keep/remove telephone numbers\n self._address = address # keep/remove street addresses\n self._name = name # keep/remove proper names\n self._gender = gender # keep/remove gender words\n self._age = age # keep/remove age\n self._dob = dob # keep/remove date of birth words\n self._unit = unit # keep/remove unit of measurement words\n \n if isinstance(stopwords, bool) is False:\n raise TypeError(\"Stopwords must be a boolean\")\n if isinstance(bare, bool) is False:\n raise TypeError(\"Bare must be a boolean\")\n if isinstance(quantifier, bool) is False:\n raise TypeError(\"Quantifier must be a boolean\")\n if isinstance(preposition, bool) is False:\n raise TypeError(\"Preposition must be a boolean\")\n if isinstance(conjunction, bool) is False:\n raise TypeError(\"Conjunction must be a boolean\")\n if isinstance(article, bool) is False:\n raise TypeError(\"Article must be a boolean\")\n if isinstance(demonstrative, bool) is False:\n raise TypeError(\"Demonstrative must be a boolean\")\n if isinstance(question, bool) is False:\n raise TypeError(\"Question must be a boolean\")\n if isinstance(pronoun, bool) is False:\n raise TypeError(\"Pronoun must be a boolean\")\n if isinstance(number, bool) is False:\n raise TypeError(\"Number must be a boolean\")\n if isinstance(date, bool) is False:\n raise TypeError(\"Date must be a boolean\")\n if isinstance(ssn, bool) is False:\n raise TypeError(\"SSN must be a boolean\")\n if isinstance(telephone, bool) is False:\n raise TypeError(\"Telephone must be a boolean\")\n if isinstance(name, bool) is False:\n raise TypeError(\"Name must be a boolean\")\n if isinstance(address, bool) is False:\n raise TypeError(\"Address must be a boolean\")\n if isinstance(sentiment, bool) is False:\n raise TypeError(\"Sentiment must be a boolean\")\n if isinstance(gender, bool) is False:\n raise TypeError(\"Gender must be a boolean\")\n if isinstance(dob, bool) is False:\n raise TypeError(\"Gender must be a boolean\")\n if isinstance(age, bool) is False:\n raise TypeError(\"Age must be a boolean\")\n if isinstance(punct, bool) is False:\n raise TypeError(\"Punct must be a boolean\")\n if isinstance(unit, bool) is False:\n raise TypeError(\"Unit must be a boolean\")\n if isinstance(standard, bool) is False:\n raise TypeError(\"Standard must be a boolean\")\n if isinstance(metric, bool) is False:\n raise TypeError(\"Metric must be a boolean\")\n if text is not None:\n if isinstance(text, str) is False:\n raise TypeError(\"String expected for text\")\n if spell is not None:\n if spell not in ['en', 'fr', 'es', 'it', 'de']:\n raise ValueError(\"Wrong value for spell: en, es, fr, it or de\")\n \n if text is not None:\n self._split()\n if self._bare == False:\n # preprocess the tokens\n self._preprocess()\n # word stemming\n if self._stemming == 'gap':\n self._stem()\n elif self._stemming == 'porter':\n self._nltkStemmer('porter')\n elif self._stemming == 'snowball':\n self._nltkStemmer('snowball')\n elif self._stemming == 'lancaster':\n self._nltkStemmer('lancaster')\n elif self._stemming == 'lemma':\n self._lemma()\n # remove stop words\n self._stopwords()\n # Do unit conversions\n self._conversion()\n # Do POS tagging\n if self._pos == True:\n self._partsofspeech()",
"def analyze(self, text):\n #analize every word in the text a value -1, 1 or 0 and calculate total score\n #tokens allow us to split words in single tokens we can initialize tokens like this:\n\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text.lower())\n\n score = 0\n\n if tokens[0] in self.negatives:\n score =- 1\n elif tokens[0] in self.positives:\n score =+ 1\n else:\n score = 0\n\n #print('', text)\n\n return score",
"def process(self, tweet):\n\n #identify the applicable event keywords for this text\n text = self.cleanup_data(tweet.text)\n tokens = [str(t.lower()).translate(None, string.punctuation) for t in tweet.text.split()]\n applicable_tokens = []\n for phrase in self.match_event_tree.root.keywords:\n if phrase in \" \".join(tokens):\n applicable_tokens.append(phrase)\n\n self.match_event_tree.propogate_tweet(applicable_tokens, tweet)",
"def __init__(self, data: List):\n self.data: List = data\n self.texts: List = self.list_texts()\n self.post_nlp: List = False"
] | [
"0.6772969",
"0.66310155",
"0.6570511",
"0.6479291",
"0.64774686",
"0.6471122",
"0.63829994",
"0.63479525",
"0.6342602",
"0.6246067",
"0.6185657",
"0.6146315",
"0.61160105",
"0.6048045",
"0.6047929",
"0.6042262",
"0.6041226",
"0.6035858",
"0.60260266",
"0.60242325",
"0.6005878",
"0.5968961",
"0.59406316",
"0.5939317",
"0.5925296",
"0.59222686",
"0.59214926",
"0.5896973",
"0.58498555",
"0.58485323"
] | 0.74163216 | 0 |
Discrete Variable to Feature Convertor. var value of variable varname name of variable. lims = range of numbers lims = range of discretization. collapse = list with two binary vals. collapse all below lim[0] to lim[0] & collapse all above lim[1] to lim[1] e.g., fdict = discVar2Feature(8, 'positive adjective', lims = [1,5], collapse [True, True]) contains 1 positive adjective False contains 2 positive adjective False contains 4 positive adjective False contains 3 positive adjective False contains 5 positive adjective True | def discVar2Feature( var, varname, lims = [1,3], collapse = [False, False], ctxt = 'Has'):
vals = xrange(lims[0], lims[1]+1)
keystr = ctxt + ' %s ' + varname
fdict = {keystr % val:False for val in vals}
if collapse[0] == True:
if lims[0] > var:
var = lims[0]
#var = max([var, lims[0]])
if collapse[1] == True:
if lims[1] < var:
var = lims[1]
#var = min([var, lims[1]])
if var >= lims[0] and var <= lims[1]: #if collapse = False, ignore vals outside lims
fdict[(keystr) % (var)] = True
return fdict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def discVar2FeatureOld( var, varname, lims = [1,5], collapse = [False, False], ctxt = 'contains'):\n nums = ['zero','one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten']\n \n vals = range(lims[0], lims[1]+1) \n \n #init fdict\n fdict = dict() \n for k, val in enumerate(vals):\n fdict[(ctxt + ' %s ' + varname) % (nums[val])] = False\n \n if collapse[0] == True: var = max([var, lims[0]])\n if collapse[1] == True: var = min([var, lims[1]])\n \n if var >= lims[0] and var <= lims[1]: #if collapse = False, ignore vals outside lims\n fdict[(ctxt + ' %s ' + varname) % (nums[var])] = True \n \n return fdict",
"def convertToDiscreteFunction(boolean: bool) -> cern.japc.value.DiscreteFunction:\n ...",
"def FE_discretize_numeric_variables(train, bin_dict, test='', strategy='kmeans',verbose=0):\r\n df = copy.deepcopy(train)\r\n test = copy.deepcopy(test)\r\n num_cols = len(bin_dict)\r\n nrows = int((num_cols/2)+0.5)\r\n #print('nrows',nrows)\r\n if verbose:\r\n fig = plt.figure(figsize=(10,3*num_cols))\r\n for i, (col, binvalue) in enumerate(bin_dict.items()):\r\n new_col = col+'_discrete'\r\n if strategy == 'gaussian':\r\n kbd = GaussianMixture(n_components=binvalue, random_state=99)\r\n df[new_col] = kbd.fit_predict(df[[col]]).astype(int)\r\n if not isinstance(test, str):\r\n test[new_col] = kbd.predict(test[[col]]).astype(int)\r\n else:\r\n kbd = KBinsDiscretizer(n_bins=binvalue, encode='ordinal', strategy=strategy)\r\n df[new_col] = kbd.fit_transform(df[[col]]).astype(int)\r\n if not isinstance(test, str):\r\n test[new_col] = kbd.transform(test[[col]]).astype(int)\r\n if verbose:\r\n ax1 = plt.subplot(nrows,2,i+1)\r\n ax1.scatter(df[col],df[new_col])\r\n ax1.set_title(new_col)\r\n if not isinstance(test, str):\r\n return df, test\r\n else:\r\n return df",
"def denseFeature(self, feat):\n return {'feat': feat}",
"def convertToDiscreteFunctionList(boolean: bool) -> cern.japc.value.DiscreteFunctionList:\n ...",
"def preprocess_var(bd, var):\n filepath_sv = f\"team67-ptp/data/{var}.csv\"\n filepath = bd\n data = feather.read_dataframe(filepath)\n df = data.copy()\n df2 = df[var]\n df2 = df2.to_frame()\n if df2[var].dtype is \"category\":\n df2[var] = df2[var].astype(\"category\").cat.codes\n filename = filepath_sv\n df2.to_csv(filename, index=False)\n print(\"Succesfully exported to csv\")\n else:\n filename = filepath_sv\n df2.to_csv(filename, index=False)\n print(\"Succesfully exported to csv\")",
"def addFeature(self, strName, lstDomain):\n # create a new variable CSPVariable object\n newFeature = CSPFeature(strName, lstDomain)\n # put the new variable in the graph's list of variables\n self.features.append(newFeature)",
"def select_var_feature(adata, min_score=0.5, nb_features=None, show=True, copy=False):\n if copy:\n inplace=False\n else:\n inplace=True\n\n adata = adata.copy() if not inplace else adata\n \n # calculate variability score\n cal_var(adata, show=show) # adds variability score for each feature \n # adata.var['variablility_score'] = abs(adata.var['prop_shared_cells']-0.5)\n var_annot = adata.var.sort_values(ascending=True, by ='variability_score')\n\n # calculate the min score to get a specific number of feature \n if nb_features != None and nb_features < len(adata.var_names): \n min_score = var_annot['variability_score'][nb_features] \n \n \n adata_tmp = adata[:,adata.var['variability_score']<=min_score].copy()\n \n ## return the filtered AnnData objet.\n if not inplace:\n adata_tmp = adata[:,adata.var['variability_score']<=min_score]\n return(adata_tmp)\n else:\n adata._inplace_subset_var(adata.var['variability_score']<=min_score)",
"def feature_discretion(self, X, y):\n temp, X_interval = [], []\n if self._DISCRETION == \"percentile_discrete\":\n for i in range(0, X.shape[-1]):\n x = X[:, i]\n x_type = type_of_target(x)\n # logging.info(\"before: \"+\" \".join([str(i), str(set(X[:, i])), str(x_type)]))\n if 0:\n if x_type == 'continuous':\n x1, interval = self.percentile_discrete(x, self._WOE_N)\n X_interval.append(interval)\n temp.append(x1)\n # logging.info(\"continue_after: \" + \" \".join([str(i), str(set(x1)), str(x1)]))\n else:\n temp.append(x)\n # logging.info(\"after: \" + \" \".join([str(i), str(set(x)), str(x)]))\n else:\n x1, interval = self.percentile_discrete(x, self._WOE_N)\n X_interval.append(interval)\n temp.append(x1)\n # logging.info(\"continue_after: \" + \" \".join([str(i), str(set(x1)), str(x1)]))\n elif self._DISCRETION == \"interval_discrete\":\n for i in range(0, X.shape[-1]):\n x = X[:, i]\n # logging.info(\"before: \"+\" \".join([str(i), str(set(X[:, i]))]))\n x1, interval = self.interval_discrete(x, self._WOE_N)\n X_interval.append(interval)\n temp.append(x1)\n # logging.info(\"interval_after: \" + \" \".join([str(i), str(set(x1)), str(x1)]))\n elif self._DISCRETION == \"rf_discrete\":\n for i in range(0, X.shape[-1]):\n x = X[:, i]\n # logging.info(\"before: \"+\" \".join([str(i), str(set(X[:, i]))]))\n x1, interval = self.rf_discrete(x, y)\n X_interval.append(interval)\n temp.append(x1)\n # logging.info(\"rf_after: \" + \" \".join([str(i), str(set(x1)), str(x1)]))\n return np.array(temp).T, X_interval",
"def dense2cvxopt(value):\n import cvxopt\n return cvxopt.matrix(value, tc='d')",
"def dict_to_feature(d):\n f = ee.Feature(None,ee.Dictionary(d))\n return f",
"def my_featurize(apartment):\n col =np.array([1, 2, 0, 0, 0, 0, 0, 0 ])\n a= pd.DataFrame(apartment[col])\n if(apartment.get('condition')== 'good'):\n col[1] =1\n else:\n if(apartment.get('condition')== 'zero condition'):\n col[1] = 0\n col[2] =apartment.get('num_rooms')\n col[3] =apartment.get('area')\n col[4] =apartment.get('num_bathrooms')\n col[5] =apartment.get('floor')\n col[6] =apartment.get('ceiling_height')\n col[7] =apartment.get('max_floor')\n\n return col, apartment['price']",
"def get_feature_set_SC2(tweet, sentimentvalues):\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n #Add lexicon values\n #total subjectivity score from word polarities, total objectivity score, number of subjective words, number of objective words, e\n sub_score = sentimentvalues[0]+sentimentvalues[1]\n obj_score = sentimentvalues[2]\n if sub_score>0:\n additional_freq[\"sub_score\"] = sub_score+1.0\n if obj_score>0:\n additional_freq[\"obj_score\"] = obj_score+1.0\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n \n return features",
"def _make_features(self, x):\n\t\tx = x.unsqueeze(1)\n\t\treturn torch.cat([x ** i for i in range(1, self._degree+1)], 1)",
"def preprocess_feature(df):",
"def classify_columns(df_preds, verbose=0):\r\n train = copy.deepcopy(df_preds)\r\n #### If there are 30 chars are more in a discrete_string_var, it is then considered an NLP variable\r\n max_nlp_char_size = 30\r\n max_cols_to_print = 30\r\n print('############## C L A S S I F Y I N G V A R I A B L E S ####################')\r\n print('Classifying variables in data set...')\r\n #### Cat_Limit defines the max number of categories a column can have to be called a categorical colum\r\n cat_limit = 35\r\n float_limit = 15 #### Make this limit low so that float variables below this limit become cat vars ###\r\n def add(a,b):\r\n return a+b\r\n sum_all_cols = dict()\r\n orig_cols_total = train.shape[1]\r\n #Types of columns\r\n cols_delete = [col for col in list(train) if (len(train[col].value_counts()) == 1\r\n ) | (train[col].isnull().sum()/len(train) >= 0.90)]\r\n train = train[left_subtract(list(train),cols_delete)]\r\n var_df = pd.Series(dict(train.dtypes)).reset_index(drop=False).rename(\r\n columns={0:'type_of_column'})\r\n sum_all_cols['cols_delete'] = cols_delete\r\n var_df['bool'] = var_df.apply(lambda x: 1 if x['type_of_column'] in ['bool','object']\r\n and len(train[x['index']].value_counts()) == 2 else 0, axis=1)\r\n string_bool_vars = list(var_df[(var_df['bool'] ==1)]['index'])\r\n sum_all_cols['string_bool_vars'] = string_bool_vars\r\n var_df['num_bool'] = var_df.apply(lambda x: 1 if x['type_of_column'] in [np.uint8,\r\n np.uint16, np.uint32, np.uint64,\r\n 'int8','int16','int32','int64',\r\n 'float16','float32','float64'] and len(\r\n train[x['index']].value_counts()) == 2 else 0, axis=1)\r\n num_bool_vars = list(var_df[(var_df['num_bool'] ==1)]['index'])\r\n sum_all_cols['num_bool_vars'] = num_bool_vars\r\n ###### This is where we take all Object vars and split them into diff kinds ###\r\n discrete_or_nlp = var_df.apply(lambda x: 1 if x['type_of_column'] in ['object'] and x[\r\n 'index'] not in string_bool_vars+cols_delete else 0,axis=1)\r\n ######### This is where we figure out whether a string var is nlp or discrete_string var ###\r\n var_df['nlp_strings'] = 0\r\n var_df['discrete_strings'] = 0\r\n var_df['cat'] = 0\r\n var_df['id_col'] = 0\r\n discrete_or_nlp_vars = var_df.loc[discrete_or_nlp==1]['index'].values.tolist()\r\n if len(var_df.loc[discrete_or_nlp==1]) != 0:\r\n for col in discrete_or_nlp_vars:\r\n #### first fill empty or missing vals since it will blowup ###\r\n train[col] = train[col].fillna(' ')\r\n if train[col].map(lambda x: len(x) if type(x)==str else 0).mean(\r\n ) >= max_nlp_char_size and len(train[col].value_counts()\r\n ) <= int(0.9*len(train)) and col not in string_bool_vars:\r\n var_df.loc[var_df['index']==col,'nlp_strings'] = 1\r\n elif len(train[col].value_counts()) > cat_limit and len(train[col].value_counts()\r\n ) <= int(0.9*len(train)) and col not in string_bool_vars:\r\n var_df.loc[var_df['index']==col,'discrete_strings'] = 1\r\n elif len(train[col].value_counts()) > cat_limit and len(train[col].value_counts()\r\n ) == len(train) and col not in string_bool_vars:\r\n var_df.loc[var_df['index']==col,'id_col'] = 1\r\n else:\r\n var_df.loc[var_df['index']==col,'cat'] = 1\r\n nlp_vars = list(var_df[(var_df['nlp_strings'] ==1)]['index'])\r\n sum_all_cols['nlp_vars'] = nlp_vars\r\n discrete_string_vars = list(var_df[(var_df['discrete_strings'] ==1) ]['index'])\r\n sum_all_cols['discrete_string_vars'] = discrete_string_vars\r\n ###### This happens only if a string column happens to be an ID column #######\r\n #### DO NOT Add this to ID_VARS yet. It will be done later.. Dont change it easily...\r\n #### Category DTYPE vars are very special = they can be left as is and not disturbed in Python. ###\r\n var_df['dcat'] = var_df.apply(lambda x: 1 if str(x['type_of_column'])=='category' else 0,\r\n axis=1)\r\n factor_vars = list(var_df[(var_df['dcat'] ==1)]['index'])\r\n sum_all_cols['factor_vars'] = factor_vars\r\n ########################################################################\r\n date_or_id = var_df.apply(lambda x: 1 if x['type_of_column'] in [np.uint8,\r\n np.uint16, np.uint32, np.uint64,\r\n 'int8','int16',\r\n 'int32','int64'] and x[\r\n 'index'] not in string_bool_vars+num_bool_vars+discrete_string_vars+nlp_vars else 0,\r\n axis=1)\r\n ######### This is where we figure out whether a numeric col is date or id variable ###\r\n var_df['int'] = 0\r\n var_df['date_time'] = 0\r\n ### if a particular column is date-time type, now set it as a date time variable ##\r\n var_df['date_time'] = var_df.apply(lambda x: 1 if x['type_of_column'] in ['<M8[ns]','datetime64[ns]'] and x[\r\n 'index'] not in string_bool_vars+num_bool_vars+discrete_string_vars+nlp_vars else 0,\r\n axis=1)\r\n ### this is where we save them as date time variables ###\r\n if len(var_df.loc[date_or_id==1]) != 0:\r\n for col in var_df.loc[date_or_id==1]['index'].values.tolist():\r\n if len(train[col].value_counts()) == len(train):\r\n if train[col].min() < 1900 or train[col].max() > 2050:\r\n var_df.loc[var_df['index']==col,'id_col'] = 1\r\n else:\r\n try:\r\n pd.to_datetime(train[col],infer_datetime_format=True)\r\n var_df.loc[var_df['index']==col,'date_time'] = 1\r\n except:\r\n var_df.loc[var_df['index']==col,'id_col'] = 1\r\n else:\r\n if train[col].min() < 1900 or train[col].max() > 2050:\r\n if col not in num_bool_vars:\r\n var_df.loc[var_df['index']==col,'int'] = 1\r\n else:\r\n try:\r\n pd.to_datetime(train[col],infer_datetime_format=True)\r\n var_df.loc[var_df['index']==col,'date_time'] = 1\r\n except:\r\n if col not in num_bool_vars:\r\n var_df.loc[var_df['index']==col,'int'] = 1\r\n else:\r\n pass\r\n int_vars = list(var_df[(var_df['int'] ==1)]['index'])\r\n date_vars = list(var_df[(var_df['date_time'] == 1)]['index'])\r\n id_vars = list(var_df[(var_df['id_col'] == 1)]['index'])\r\n sum_all_cols['int_vars'] = int_vars\r\n copy_date_vars = copy.deepcopy(date_vars)\r\n for date_var in copy_date_vars:\r\n #### This test is to make sure sure date vars are actually date vars\r\n try:\r\n pd.to_datetime(train[date_var],infer_datetime_format=True)\r\n except:\r\n ##### if not a date var, then just add it to delete it from processing\r\n cols_delete.append(date_var)\r\n date_vars.remove(date_var)\r\n sum_all_cols['date_vars'] = date_vars\r\n sum_all_cols['id_vars'] = id_vars\r\n sum_all_cols['cols_delete'] = cols_delete\r\n ## This is an EXTREMELY complicated logic for cat vars. Don't change it unless you test it many times!\r\n var_df['numeric'] = 0\r\n float_or_cat = var_df.apply(lambda x: 1 if x['type_of_column'] in ['float16',\r\n 'float32','float64'] else 0,\r\n axis=1)\r\n if len(var_df.loc[float_or_cat == 1]) > 0:\r\n for col in var_df.loc[float_or_cat == 1]['index'].values.tolist():\r\n if len(train[col].value_counts()) > 2 and len(train[col].value_counts()\r\n ) <= float_limit and len(train[col].value_counts()) <= len(train):\r\n var_df.loc[var_df['index']==col,'cat'] = 1\r\n else:\r\n if col not in num_bool_vars:\r\n var_df.loc[var_df['index']==col,'numeric'] = 1\r\n cat_vars = list(var_df[(var_df['cat'] ==1)]['index'])\r\n continuous_vars = list(var_df[(var_df['numeric'] ==1)]['index'])\r\n ######## V E R Y I M P O R T A N T ###################################################\r\n ##### There are a couple of extra tests you need to do to remove abberations in cat_vars ###\r\n cat_vars_copy = copy.deepcopy(cat_vars)\r\n for cat in cat_vars_copy:\r\n if df_preds[cat].dtype==float:\r\n continuous_vars.append(cat)\r\n cat_vars.remove(cat)\r\n var_df.loc[var_df['index']==cat,'cat'] = 0\r\n var_df.loc[var_df['index']==cat,'numeric'] = 1\r\n elif len(df_preds[cat].value_counts()) == df_preds.shape[0]:\r\n id_vars.append(cat)\r\n cat_vars.remove(cat)\r\n var_df.loc[var_df['index']==cat,'cat'] = 0\r\n var_df.loc[var_df['index']==cat,'id_col'] = 1\r\n sum_all_cols['cat_vars'] = cat_vars\r\n sum_all_cols['continuous_vars'] = continuous_vars\r\n sum_all_cols['id_vars'] = id_vars\r\n ###### This is where you consoldate the numbers ###########\r\n var_dict_sum = dict(zip(var_df.values[:,0], var_df.values[:,2:].sum(1)))\r\n for col, sumval in var_dict_sum.items():\r\n if sumval == 0:\r\n print('%s of type=%s is not classified' %(col,train[col].dtype))\r\n elif sumval > 1:\r\n print('%s of type=%s is classified into more then one type' %(col,train[col].dtype))\r\n else:\r\n pass\r\n ############### This is where you print all the types of variables ##############\r\n ####### Returns 8 vars in the following order: continuous_vars,int_vars,cat_vars,\r\n ### string_bool_vars,discrete_string_vars,nlp_vars,date_or_id_vars,cols_delete\r\n if verbose == 1:\r\n print(\" Number of Numeric Columns = \", len(continuous_vars))\r\n print(\" Number of Integer-Categorical Columns = \", len(int_vars))\r\n print(\" Number of String-Categorical Columns = \", len(cat_vars))\r\n print(\" Number of Factor-Categorical Columns = \", len(factor_vars))\r\n print(\" Number of String-Boolean Columns = \", len(string_bool_vars))\r\n print(\" Number of Numeric-Boolean Columns = \", len(num_bool_vars))\r\n print(\" Number of Discrete String Columns = \", len(discrete_string_vars))\r\n print(\" Number of NLP String Columns = \", len(nlp_vars))\r\n print(\" Number of Date Time Columns = \", len(date_vars))\r\n print(\" Number of ID Columns = \", len(id_vars))\r\n print(\" Number of Columns to Delete = \", len(cols_delete))\r\n if verbose == 2:\r\n marthas_columns(df_preds,verbose=1)\r\n print(\" Numeric Columns: %s\" %continuous_vars[:max_cols_to_print])\r\n print(\" Integer-Categorical Columns: %s\" %int_vars[:max_cols_to_print])\r\n print(\" String-Categorical Columns: %s\" %cat_vars[:max_cols_to_print])\r\n print(\" Factor-Categorical Columns: %s\" %factor_vars[:max_cols_to_print])\r\n print(\" String-Boolean Columns: %s\" %string_bool_vars[:max_cols_to_print])\r\n print(\" Numeric-Boolean Columns: %s\" %num_bool_vars[:max_cols_to_print])\r\n print(\" Discrete String Columns: %s\" %discrete_string_vars[:max_cols_to_print])\r\n print(\" NLP text Columns: %s\" %nlp_vars[:max_cols_to_print])\r\n print(\" Date Time Columns: %s\" %date_vars[:max_cols_to_print])\r\n print(\" ID Columns: %s\" %id_vars[:max_cols_to_print])\r\n print(\" Columns that will not be considered in modeling: %s\" %cols_delete[:max_cols_to_print])\r\n ##### now collect all the column types and column names into a single dictionary to return!\r\n len_sum_all_cols = reduce(add,[len(v) for v in sum_all_cols.values()])\r\n if len_sum_all_cols == orig_cols_total:\r\n print(' %d Predictors classified...' %orig_cols_total)\r\n #print(' This does not include the Target column(s)')\r\n else:\r\n print('No of columns classified %d does not match %d total cols. Continuing...' %(\r\n len_sum_all_cols, orig_cols_total))\r\n ls = sum_all_cols.values()\r\n flat_list = [item for sublist in ls for item in sublist]\r\n if len(left_subtract(list(train),flat_list)) == 0:\r\n print(' Missing columns = None')\r\n else:\r\n print(' Missing columns = %s' %left_subtract(list(train),flat_list))\r\n return sum_all_cols",
"def topdia(x):\r\n return Feature(x, \"TopDia\")",
"def my_featurize(apartment):\n return x, y",
"def cat2onehot_var(snt_ids, vocab_size, batch_size):\n\n targets = np.array([snt_ids]).reshape(-1)\n one_hot_targets = np.eye(vocab_size)[targets]\n result = Variable(torch.FloatTensor(one_hot_targets).view(-1, batch_size, vocab_size)) #\n\n return result",
"def restrict(self, variable, value):\n # 新因子变量列表为原因子变量列表减去被求和的变量\n restricted_variable = self.var_list.index(variable)\n new_var_list = self.var_list[:restricted_variable] + self.var_list[restricted_variable + 1:]\n\n # 对于新因子CPT中的表项,其值为原因子中与新因子变量取值相同且限制变量取对应值的的单个表项的值\n new_cpt = {}\n if restricted_variable == 0:\n for j in range(pow(2, len(new_var_list) - restricted_variable)):\n postfix = Util.to_binary(j, len(new_var_list) - restricted_variable)\n if len(self.var_list) == 1:\n postfix = ''\n new_cpt[postfix] = self.cpt[str(value) + postfix]\n elif restricted_variable == len(self.var_list) - 1:\n for i in range(pow(2, restricted_variable)):\n prefix = Util.to_binary(i, restricted_variable)\n new_cpt[prefix] = self.cpt[prefix + str(value)]\n else:\n for i in range(pow(2, restricted_variable)):\n prefix = Util.to_binary(i, restricted_variable)\n for j in range(pow(2, len(new_var_list) - restricted_variable)):\n postfix = Util.to_binary(j, len(new_var_list) - restricted_variable)\n new_cpt[prefix + postfix] = self.cpt[prefix + str(value) + postfix]\n new_node = Node('f' + str(new_var_list), new_var_list)\n new_node.set_cpt(new_cpt)\n return new_node",
"def make_features(x):\n x = x.unsqueeze(1)\n # torch.cat 实现tensor拼接\n return torch.cat([x ** i for i in range(1, POLY_DEGREE + 1)], 1)",
"def categorical(df):\n\n # variables which need to be transformed to categorical\n categorical = [\"prop_country_id\", \"visitor_location_country_id\"]\n\n for var in categorical:\n df = pd.concat([df, pd.get_dummies(df[var], prefix=var)], axis=1)\n del df[var]\n\n return df",
"def get_categorical_features(self, x: pd.DataFrame) -> pd.DataFrame:\n return x[self.categorical_features]",
"def collapse_var(nc, out, name, direction):\n var1 = nc.variables[name]\n N = (len(nc.dimensions[direction]) - 1) / 2\n\n print(\"Processing %s...\" % name)\n dims = var1.dimensions\n if len(dims) > 1: # only collapse spatial fields\n dims = [x for x in dims if x != direction]\n\n try:\n fill_value = var1._FillValue\n var2 = out.createVariable(name, var1.dtype,\n dimensions=dims, fill_value=fill_value)\n except:\n var2 = out.createVariable(name, var1.dtype,\n dimensions=dims)\n\n copy_attributes(var1, var2)\n\n if direction == 'x':\n var2[:] = var1[get_slice(var1.dimensions, x=N)]\n elif direction == 'y':\n var2[:] = var1[get_slice(var1.dimensions, y=N)]",
"def apply_randomization(features, label, randomize_prob):\n rnd_tok = lambda: tf.as_string(tf.random.uniform([], 0, 99999999, tf.int32))\n\n for idx in CAT_FEATURE_INDICES:\n key = feature_name(idx)\n # Ignore lint since tf.cond should evaluate lambda immediately.\n features[key] = tf.cond(tf.random.uniform([]) < randomize_prob,\n rnd_tok,\n lambda: features[key]) # pylint: disable=cell-var-from-loop\n return features, label",
"def dict_to_feature(feature_dict, keys, max_value=None):\n feature = []\n for key, val in feature_dict.items(): # First level\n if key not in keys:\n continue\n if val is None or val == \"auto\" or key == \"autotuning\" or val == \"\":\n continue\n if isinstance(val, dict):\n feature.append(dict_to_feature(val, max_value))\n else:\n feature.append(float(val))\n\n # normalization, should not matter in tree models\n if max_value is not None:\n norm_feature = []\n for f, mv in zip(feature, max_value):\n norm_feature.append(f / mv)\n feature = norm_feature\n\n return feature",
"def get_optimal_discrete_feature_split(\n self, X: np.ndarray, y: np.ndarray, feature_col: int\n ):\n return BaseTree.get_discrete_split_value(\n X[:, feature_col], y, eval_func=self.eval_func\n )",
"def preprocess_sf(bd, var):\n filepath_svf = f\"team67-ptp/data/{var}.ftr\"\n filepath = bd\n data = feather.read_dataframe(filepath)\n df = data.copy()\n df2 = df[var]\n df2 = df2.to_frame()\n if df2[var].dtype is \"category\":\n df2[var] = df2[var].astype(\"category\").cat.codes\n filename = filepath_svf\n df2.to_feather(filename)\n print(\"Succesfully exported to feather\")\n else:\n filename = filepath_svf\n df2.to_feather(filename)\n print(\"Succesfully exported to feather\")",
"def cvxopt2dense(value):\n return np.array(value)",
"def xx(x):\r\n return Feature(x, \"XX\")"
] | [
"0.7855145",
"0.5295992",
"0.5193816",
"0.51229674",
"0.5071455",
"0.50275296",
"0.4994465",
"0.49909624",
"0.48956412",
"0.4888899",
"0.48470324",
"0.47712082",
"0.4754599",
"0.4749255",
"0.47370207",
"0.47250566",
"0.46894085",
"0.46833327",
"0.46548498",
"0.4644874",
"0.46404266",
"0.4620488",
"0.46150672",
"0.4604797",
"0.4603583",
"0.45971718",
"0.4595794",
"0.45830557",
"0.45722824",
"0.4536436"
] | 0.8257702 | 0 |
Discrete Variable to Feature Convertor. var value of variable varname name of variable. lims = range of numbers lims = range of discretization. collapse = list with two binary vals. collapse all below lim[0] to lim[0] & collapse all above lim[1] to lim[1] e.g., fdict = discVar2Feature(8, 'positive adjective', lims = [1,5], collapse [True, True]) contains one positive adjective False contains two positive adjective False contains four positive adjective False contains three positive adjective False contains five positive adjective True | def discVar2FeatureOld( var, varname, lims = [1,5], collapse = [False, False], ctxt = 'contains'):
nums = ['zero','one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten']
vals = range(lims[0], lims[1]+1)
#init fdict
fdict = dict()
for k, val in enumerate(vals):
fdict[(ctxt + ' %s ' + varname) % (nums[val])] = False
if collapse[0] == True: var = max([var, lims[0]])
if collapse[1] == True: var = min([var, lims[1]])
if var >= lims[0] and var <= lims[1]: #if collapse = False, ignore vals outside lims
fdict[(ctxt + ' %s ' + varname) % (nums[var])] = True
return fdict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def discVar2Feature( var, varname, lims = [1,3], collapse = [False, False], ctxt = 'Has'):\n \n vals = xrange(lims[0], lims[1]+1) \n \n keystr = ctxt + ' %s ' + varname\n fdict = {keystr % val:False for val in vals} \n\n if collapse[0] == True:\n if lims[0] > var:\n var = lims[0]\n #var = max([var, lims[0]])\n if collapse[1] == True:\n if lims[1] < var:\n var = lims[1]\n #var = min([var, lims[1]])\n \n if var >= lims[0] and var <= lims[1]: #if collapse = False, ignore vals outside lims\n fdict[(keystr) % (var)] = True \n \n return fdict",
"def convertToDiscreteFunction(boolean: bool) -> cern.japc.value.DiscreteFunction:\n ...",
"def FE_discretize_numeric_variables(train, bin_dict, test='', strategy='kmeans',verbose=0):\r\n df = copy.deepcopy(train)\r\n test = copy.deepcopy(test)\r\n num_cols = len(bin_dict)\r\n nrows = int((num_cols/2)+0.5)\r\n #print('nrows',nrows)\r\n if verbose:\r\n fig = plt.figure(figsize=(10,3*num_cols))\r\n for i, (col, binvalue) in enumerate(bin_dict.items()):\r\n new_col = col+'_discrete'\r\n if strategy == 'gaussian':\r\n kbd = GaussianMixture(n_components=binvalue, random_state=99)\r\n df[new_col] = kbd.fit_predict(df[[col]]).astype(int)\r\n if not isinstance(test, str):\r\n test[new_col] = kbd.predict(test[[col]]).astype(int)\r\n else:\r\n kbd = KBinsDiscretizer(n_bins=binvalue, encode='ordinal', strategy=strategy)\r\n df[new_col] = kbd.fit_transform(df[[col]]).astype(int)\r\n if not isinstance(test, str):\r\n test[new_col] = kbd.transform(test[[col]]).astype(int)\r\n if verbose:\r\n ax1 = plt.subplot(nrows,2,i+1)\r\n ax1.scatter(df[col],df[new_col])\r\n ax1.set_title(new_col)\r\n if not isinstance(test, str):\r\n return df, test\r\n else:\r\n return df",
"def denseFeature(self, feat):\n return {'feat': feat}",
"def convertToDiscreteFunctionList(boolean: bool) -> cern.japc.value.DiscreteFunctionList:\n ...",
"def preprocess_var(bd, var):\n filepath_sv = f\"team67-ptp/data/{var}.csv\"\n filepath = bd\n data = feather.read_dataframe(filepath)\n df = data.copy()\n df2 = df[var]\n df2 = df2.to_frame()\n if df2[var].dtype is \"category\":\n df2[var] = df2[var].astype(\"category\").cat.codes\n filename = filepath_sv\n df2.to_csv(filename, index=False)\n print(\"Succesfully exported to csv\")\n else:\n filename = filepath_sv\n df2.to_csv(filename, index=False)\n print(\"Succesfully exported to csv\")",
"def addFeature(self, strName, lstDomain):\n # create a new variable CSPVariable object\n newFeature = CSPFeature(strName, lstDomain)\n # put the new variable in the graph's list of variables\n self.features.append(newFeature)",
"def select_var_feature(adata, min_score=0.5, nb_features=None, show=True, copy=False):\n if copy:\n inplace=False\n else:\n inplace=True\n\n adata = adata.copy() if not inplace else adata\n \n # calculate variability score\n cal_var(adata, show=show) # adds variability score for each feature \n # adata.var['variablility_score'] = abs(adata.var['prop_shared_cells']-0.5)\n var_annot = adata.var.sort_values(ascending=True, by ='variability_score')\n\n # calculate the min score to get a specific number of feature \n if nb_features != None and nb_features < len(adata.var_names): \n min_score = var_annot['variability_score'][nb_features] \n \n \n adata_tmp = adata[:,adata.var['variability_score']<=min_score].copy()\n \n ## return the filtered AnnData objet.\n if not inplace:\n adata_tmp = adata[:,adata.var['variability_score']<=min_score]\n return(adata_tmp)\n else:\n adata._inplace_subset_var(adata.var['variability_score']<=min_score)",
"def dense2cvxopt(value):\n import cvxopt\n return cvxopt.matrix(value, tc='d')",
"def feature_discretion(self, X, y):\n temp, X_interval = [], []\n if self._DISCRETION == \"percentile_discrete\":\n for i in range(0, X.shape[-1]):\n x = X[:, i]\n x_type = type_of_target(x)\n # logging.info(\"before: \"+\" \".join([str(i), str(set(X[:, i])), str(x_type)]))\n if 0:\n if x_type == 'continuous':\n x1, interval = self.percentile_discrete(x, self._WOE_N)\n X_interval.append(interval)\n temp.append(x1)\n # logging.info(\"continue_after: \" + \" \".join([str(i), str(set(x1)), str(x1)]))\n else:\n temp.append(x)\n # logging.info(\"after: \" + \" \".join([str(i), str(set(x)), str(x)]))\n else:\n x1, interval = self.percentile_discrete(x, self._WOE_N)\n X_interval.append(interval)\n temp.append(x1)\n # logging.info(\"continue_after: \" + \" \".join([str(i), str(set(x1)), str(x1)]))\n elif self._DISCRETION == \"interval_discrete\":\n for i in range(0, X.shape[-1]):\n x = X[:, i]\n # logging.info(\"before: \"+\" \".join([str(i), str(set(X[:, i]))]))\n x1, interval = self.interval_discrete(x, self._WOE_N)\n X_interval.append(interval)\n temp.append(x1)\n # logging.info(\"interval_after: \" + \" \".join([str(i), str(set(x1)), str(x1)]))\n elif self._DISCRETION == \"rf_discrete\":\n for i in range(0, X.shape[-1]):\n x = X[:, i]\n # logging.info(\"before: \"+\" \".join([str(i), str(set(X[:, i]))]))\n x1, interval = self.rf_discrete(x, y)\n X_interval.append(interval)\n temp.append(x1)\n # logging.info(\"rf_after: \" + \" \".join([str(i), str(set(x1)), str(x1)]))\n return np.array(temp).T, X_interval",
"def dict_to_feature(d):\n f = ee.Feature(None,ee.Dictionary(d))\n return f",
"def my_featurize(apartment):\n col =np.array([1, 2, 0, 0, 0, 0, 0, 0 ])\n a= pd.DataFrame(apartment[col])\n if(apartment.get('condition')== 'good'):\n col[1] =1\n else:\n if(apartment.get('condition')== 'zero condition'):\n col[1] = 0\n col[2] =apartment.get('num_rooms')\n col[3] =apartment.get('area')\n col[4] =apartment.get('num_bathrooms')\n col[5] =apartment.get('floor')\n col[6] =apartment.get('ceiling_height')\n col[7] =apartment.get('max_floor')\n\n return col, apartment['price']",
"def get_feature_set_SC2(tweet, sentimentvalues):\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n #Add lexicon values\n #total subjectivity score from word polarities, total objectivity score, number of subjective words, number of objective words, e\n sub_score = sentimentvalues[0]+sentimentvalues[1]\n obj_score = sentimentvalues[2]\n if sub_score>0:\n additional_freq[\"sub_score\"] = sub_score+1.0\n if obj_score>0:\n additional_freq[\"obj_score\"] = obj_score+1.0\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n \n return features",
"def _make_features(self, x):\n\t\tx = x.unsqueeze(1)\n\t\treturn torch.cat([x ** i for i in range(1, self._degree+1)], 1)",
"def preprocess_feature(df):",
"def classify_columns(df_preds, verbose=0):\r\n train = copy.deepcopy(df_preds)\r\n #### If there are 30 chars are more in a discrete_string_var, it is then considered an NLP variable\r\n max_nlp_char_size = 30\r\n max_cols_to_print = 30\r\n print('############## C L A S S I F Y I N G V A R I A B L E S ####################')\r\n print('Classifying variables in data set...')\r\n #### Cat_Limit defines the max number of categories a column can have to be called a categorical colum\r\n cat_limit = 35\r\n float_limit = 15 #### Make this limit low so that float variables below this limit become cat vars ###\r\n def add(a,b):\r\n return a+b\r\n sum_all_cols = dict()\r\n orig_cols_total = train.shape[1]\r\n #Types of columns\r\n cols_delete = [col for col in list(train) if (len(train[col].value_counts()) == 1\r\n ) | (train[col].isnull().sum()/len(train) >= 0.90)]\r\n train = train[left_subtract(list(train),cols_delete)]\r\n var_df = pd.Series(dict(train.dtypes)).reset_index(drop=False).rename(\r\n columns={0:'type_of_column'})\r\n sum_all_cols['cols_delete'] = cols_delete\r\n var_df['bool'] = var_df.apply(lambda x: 1 if x['type_of_column'] in ['bool','object']\r\n and len(train[x['index']].value_counts()) == 2 else 0, axis=1)\r\n string_bool_vars = list(var_df[(var_df['bool'] ==1)]['index'])\r\n sum_all_cols['string_bool_vars'] = string_bool_vars\r\n var_df['num_bool'] = var_df.apply(lambda x: 1 if x['type_of_column'] in [np.uint8,\r\n np.uint16, np.uint32, np.uint64,\r\n 'int8','int16','int32','int64',\r\n 'float16','float32','float64'] and len(\r\n train[x['index']].value_counts()) == 2 else 0, axis=1)\r\n num_bool_vars = list(var_df[(var_df['num_bool'] ==1)]['index'])\r\n sum_all_cols['num_bool_vars'] = num_bool_vars\r\n ###### This is where we take all Object vars and split them into diff kinds ###\r\n discrete_or_nlp = var_df.apply(lambda x: 1 if x['type_of_column'] in ['object'] and x[\r\n 'index'] not in string_bool_vars+cols_delete else 0,axis=1)\r\n ######### This is where we figure out whether a string var is nlp or discrete_string var ###\r\n var_df['nlp_strings'] = 0\r\n var_df['discrete_strings'] = 0\r\n var_df['cat'] = 0\r\n var_df['id_col'] = 0\r\n discrete_or_nlp_vars = var_df.loc[discrete_or_nlp==1]['index'].values.tolist()\r\n if len(var_df.loc[discrete_or_nlp==1]) != 0:\r\n for col in discrete_or_nlp_vars:\r\n #### first fill empty or missing vals since it will blowup ###\r\n train[col] = train[col].fillna(' ')\r\n if train[col].map(lambda x: len(x) if type(x)==str else 0).mean(\r\n ) >= max_nlp_char_size and len(train[col].value_counts()\r\n ) <= int(0.9*len(train)) and col not in string_bool_vars:\r\n var_df.loc[var_df['index']==col,'nlp_strings'] = 1\r\n elif len(train[col].value_counts()) > cat_limit and len(train[col].value_counts()\r\n ) <= int(0.9*len(train)) and col not in string_bool_vars:\r\n var_df.loc[var_df['index']==col,'discrete_strings'] = 1\r\n elif len(train[col].value_counts()) > cat_limit and len(train[col].value_counts()\r\n ) == len(train) and col not in string_bool_vars:\r\n var_df.loc[var_df['index']==col,'id_col'] = 1\r\n else:\r\n var_df.loc[var_df['index']==col,'cat'] = 1\r\n nlp_vars = list(var_df[(var_df['nlp_strings'] ==1)]['index'])\r\n sum_all_cols['nlp_vars'] = nlp_vars\r\n discrete_string_vars = list(var_df[(var_df['discrete_strings'] ==1) ]['index'])\r\n sum_all_cols['discrete_string_vars'] = discrete_string_vars\r\n ###### This happens only if a string column happens to be an ID column #######\r\n #### DO NOT Add this to ID_VARS yet. It will be done later.. Dont change it easily...\r\n #### Category DTYPE vars are very special = they can be left as is and not disturbed in Python. ###\r\n var_df['dcat'] = var_df.apply(lambda x: 1 if str(x['type_of_column'])=='category' else 0,\r\n axis=1)\r\n factor_vars = list(var_df[(var_df['dcat'] ==1)]['index'])\r\n sum_all_cols['factor_vars'] = factor_vars\r\n ########################################################################\r\n date_or_id = var_df.apply(lambda x: 1 if x['type_of_column'] in [np.uint8,\r\n np.uint16, np.uint32, np.uint64,\r\n 'int8','int16',\r\n 'int32','int64'] and x[\r\n 'index'] not in string_bool_vars+num_bool_vars+discrete_string_vars+nlp_vars else 0,\r\n axis=1)\r\n ######### This is where we figure out whether a numeric col is date or id variable ###\r\n var_df['int'] = 0\r\n var_df['date_time'] = 0\r\n ### if a particular column is date-time type, now set it as a date time variable ##\r\n var_df['date_time'] = var_df.apply(lambda x: 1 if x['type_of_column'] in ['<M8[ns]','datetime64[ns]'] and x[\r\n 'index'] not in string_bool_vars+num_bool_vars+discrete_string_vars+nlp_vars else 0,\r\n axis=1)\r\n ### this is where we save them as date time variables ###\r\n if len(var_df.loc[date_or_id==1]) != 0:\r\n for col in var_df.loc[date_or_id==1]['index'].values.tolist():\r\n if len(train[col].value_counts()) == len(train):\r\n if train[col].min() < 1900 or train[col].max() > 2050:\r\n var_df.loc[var_df['index']==col,'id_col'] = 1\r\n else:\r\n try:\r\n pd.to_datetime(train[col],infer_datetime_format=True)\r\n var_df.loc[var_df['index']==col,'date_time'] = 1\r\n except:\r\n var_df.loc[var_df['index']==col,'id_col'] = 1\r\n else:\r\n if train[col].min() < 1900 or train[col].max() > 2050:\r\n if col not in num_bool_vars:\r\n var_df.loc[var_df['index']==col,'int'] = 1\r\n else:\r\n try:\r\n pd.to_datetime(train[col],infer_datetime_format=True)\r\n var_df.loc[var_df['index']==col,'date_time'] = 1\r\n except:\r\n if col not in num_bool_vars:\r\n var_df.loc[var_df['index']==col,'int'] = 1\r\n else:\r\n pass\r\n int_vars = list(var_df[(var_df['int'] ==1)]['index'])\r\n date_vars = list(var_df[(var_df['date_time'] == 1)]['index'])\r\n id_vars = list(var_df[(var_df['id_col'] == 1)]['index'])\r\n sum_all_cols['int_vars'] = int_vars\r\n copy_date_vars = copy.deepcopy(date_vars)\r\n for date_var in copy_date_vars:\r\n #### This test is to make sure sure date vars are actually date vars\r\n try:\r\n pd.to_datetime(train[date_var],infer_datetime_format=True)\r\n except:\r\n ##### if not a date var, then just add it to delete it from processing\r\n cols_delete.append(date_var)\r\n date_vars.remove(date_var)\r\n sum_all_cols['date_vars'] = date_vars\r\n sum_all_cols['id_vars'] = id_vars\r\n sum_all_cols['cols_delete'] = cols_delete\r\n ## This is an EXTREMELY complicated logic for cat vars. Don't change it unless you test it many times!\r\n var_df['numeric'] = 0\r\n float_or_cat = var_df.apply(lambda x: 1 if x['type_of_column'] in ['float16',\r\n 'float32','float64'] else 0,\r\n axis=1)\r\n if len(var_df.loc[float_or_cat == 1]) > 0:\r\n for col in var_df.loc[float_or_cat == 1]['index'].values.tolist():\r\n if len(train[col].value_counts()) > 2 and len(train[col].value_counts()\r\n ) <= float_limit and len(train[col].value_counts()) <= len(train):\r\n var_df.loc[var_df['index']==col,'cat'] = 1\r\n else:\r\n if col not in num_bool_vars:\r\n var_df.loc[var_df['index']==col,'numeric'] = 1\r\n cat_vars = list(var_df[(var_df['cat'] ==1)]['index'])\r\n continuous_vars = list(var_df[(var_df['numeric'] ==1)]['index'])\r\n ######## V E R Y I M P O R T A N T ###################################################\r\n ##### There are a couple of extra tests you need to do to remove abberations in cat_vars ###\r\n cat_vars_copy = copy.deepcopy(cat_vars)\r\n for cat in cat_vars_copy:\r\n if df_preds[cat].dtype==float:\r\n continuous_vars.append(cat)\r\n cat_vars.remove(cat)\r\n var_df.loc[var_df['index']==cat,'cat'] = 0\r\n var_df.loc[var_df['index']==cat,'numeric'] = 1\r\n elif len(df_preds[cat].value_counts()) == df_preds.shape[0]:\r\n id_vars.append(cat)\r\n cat_vars.remove(cat)\r\n var_df.loc[var_df['index']==cat,'cat'] = 0\r\n var_df.loc[var_df['index']==cat,'id_col'] = 1\r\n sum_all_cols['cat_vars'] = cat_vars\r\n sum_all_cols['continuous_vars'] = continuous_vars\r\n sum_all_cols['id_vars'] = id_vars\r\n ###### This is where you consoldate the numbers ###########\r\n var_dict_sum = dict(zip(var_df.values[:,0], var_df.values[:,2:].sum(1)))\r\n for col, sumval in var_dict_sum.items():\r\n if sumval == 0:\r\n print('%s of type=%s is not classified' %(col,train[col].dtype))\r\n elif sumval > 1:\r\n print('%s of type=%s is classified into more then one type' %(col,train[col].dtype))\r\n else:\r\n pass\r\n ############### This is where you print all the types of variables ##############\r\n ####### Returns 8 vars in the following order: continuous_vars,int_vars,cat_vars,\r\n ### string_bool_vars,discrete_string_vars,nlp_vars,date_or_id_vars,cols_delete\r\n if verbose == 1:\r\n print(\" Number of Numeric Columns = \", len(continuous_vars))\r\n print(\" Number of Integer-Categorical Columns = \", len(int_vars))\r\n print(\" Number of String-Categorical Columns = \", len(cat_vars))\r\n print(\" Number of Factor-Categorical Columns = \", len(factor_vars))\r\n print(\" Number of String-Boolean Columns = \", len(string_bool_vars))\r\n print(\" Number of Numeric-Boolean Columns = \", len(num_bool_vars))\r\n print(\" Number of Discrete String Columns = \", len(discrete_string_vars))\r\n print(\" Number of NLP String Columns = \", len(nlp_vars))\r\n print(\" Number of Date Time Columns = \", len(date_vars))\r\n print(\" Number of ID Columns = \", len(id_vars))\r\n print(\" Number of Columns to Delete = \", len(cols_delete))\r\n if verbose == 2:\r\n marthas_columns(df_preds,verbose=1)\r\n print(\" Numeric Columns: %s\" %continuous_vars[:max_cols_to_print])\r\n print(\" Integer-Categorical Columns: %s\" %int_vars[:max_cols_to_print])\r\n print(\" String-Categorical Columns: %s\" %cat_vars[:max_cols_to_print])\r\n print(\" Factor-Categorical Columns: %s\" %factor_vars[:max_cols_to_print])\r\n print(\" String-Boolean Columns: %s\" %string_bool_vars[:max_cols_to_print])\r\n print(\" Numeric-Boolean Columns: %s\" %num_bool_vars[:max_cols_to_print])\r\n print(\" Discrete String Columns: %s\" %discrete_string_vars[:max_cols_to_print])\r\n print(\" NLP text Columns: %s\" %nlp_vars[:max_cols_to_print])\r\n print(\" Date Time Columns: %s\" %date_vars[:max_cols_to_print])\r\n print(\" ID Columns: %s\" %id_vars[:max_cols_to_print])\r\n print(\" Columns that will not be considered in modeling: %s\" %cols_delete[:max_cols_to_print])\r\n ##### now collect all the column types and column names into a single dictionary to return!\r\n len_sum_all_cols = reduce(add,[len(v) for v in sum_all_cols.values()])\r\n if len_sum_all_cols == orig_cols_total:\r\n print(' %d Predictors classified...' %orig_cols_total)\r\n #print(' This does not include the Target column(s)')\r\n else:\r\n print('No of columns classified %d does not match %d total cols. Continuing...' %(\r\n len_sum_all_cols, orig_cols_total))\r\n ls = sum_all_cols.values()\r\n flat_list = [item for sublist in ls for item in sublist]\r\n if len(left_subtract(list(train),flat_list)) == 0:\r\n print(' Missing columns = None')\r\n else:\r\n print(' Missing columns = %s' %left_subtract(list(train),flat_list))\r\n return sum_all_cols",
"def topdia(x):\r\n return Feature(x, \"TopDia\")",
"def my_featurize(apartment):\n return x, y",
"def cat2onehot_var(snt_ids, vocab_size, batch_size):\n\n targets = np.array([snt_ids]).reshape(-1)\n one_hot_targets = np.eye(vocab_size)[targets]\n result = Variable(torch.FloatTensor(one_hot_targets).view(-1, batch_size, vocab_size)) #\n\n return result",
"def restrict(self, variable, value):\n # 新因子变量列表为原因子变量列表减去被求和的变量\n restricted_variable = self.var_list.index(variable)\n new_var_list = self.var_list[:restricted_variable] + self.var_list[restricted_variable + 1:]\n\n # 对于新因子CPT中的表项,其值为原因子中与新因子变量取值相同且限制变量取对应值的的单个表项的值\n new_cpt = {}\n if restricted_variable == 0:\n for j in range(pow(2, len(new_var_list) - restricted_variable)):\n postfix = Util.to_binary(j, len(new_var_list) - restricted_variable)\n if len(self.var_list) == 1:\n postfix = ''\n new_cpt[postfix] = self.cpt[str(value) + postfix]\n elif restricted_variable == len(self.var_list) - 1:\n for i in range(pow(2, restricted_variable)):\n prefix = Util.to_binary(i, restricted_variable)\n new_cpt[prefix] = self.cpt[prefix + str(value)]\n else:\n for i in range(pow(2, restricted_variable)):\n prefix = Util.to_binary(i, restricted_variable)\n for j in range(pow(2, len(new_var_list) - restricted_variable)):\n postfix = Util.to_binary(j, len(new_var_list) - restricted_variable)\n new_cpt[prefix + postfix] = self.cpt[prefix + str(value) + postfix]\n new_node = Node('f' + str(new_var_list), new_var_list)\n new_node.set_cpt(new_cpt)\n return new_node",
"def make_features(x):\n x = x.unsqueeze(1)\n # torch.cat 实现tensor拼接\n return torch.cat([x ** i for i in range(1, POLY_DEGREE + 1)], 1)",
"def dict_to_feature(feature_dict, keys, max_value=None):\n feature = []\n for key, val in feature_dict.items(): # First level\n if key not in keys:\n continue\n if val is None or val == \"auto\" or key == \"autotuning\" or val == \"\":\n continue\n if isinstance(val, dict):\n feature.append(dict_to_feature(val, max_value))\n else:\n feature.append(float(val))\n\n # normalization, should not matter in tree models\n if max_value is not None:\n norm_feature = []\n for f, mv in zip(feature, max_value):\n norm_feature.append(f / mv)\n feature = norm_feature\n\n return feature",
"def categorical(df):\n\n # variables which need to be transformed to categorical\n categorical = [\"prop_country_id\", \"visitor_location_country_id\"]\n\n for var in categorical:\n df = pd.concat([df, pd.get_dummies(df[var], prefix=var)], axis=1)\n del df[var]\n\n return df",
"def collapse_var(nc, out, name, direction):\n var1 = nc.variables[name]\n N = (len(nc.dimensions[direction]) - 1) / 2\n\n print(\"Processing %s...\" % name)\n dims = var1.dimensions\n if len(dims) > 1: # only collapse spatial fields\n dims = [x for x in dims if x != direction]\n\n try:\n fill_value = var1._FillValue\n var2 = out.createVariable(name, var1.dtype,\n dimensions=dims, fill_value=fill_value)\n except:\n var2 = out.createVariable(name, var1.dtype,\n dimensions=dims)\n\n copy_attributes(var1, var2)\n\n if direction == 'x':\n var2[:] = var1[get_slice(var1.dimensions, x=N)]\n elif direction == 'y':\n var2[:] = var1[get_slice(var1.dimensions, y=N)]",
"def get_categorical_features(self, x: pd.DataFrame) -> pd.DataFrame:\n return x[self.categorical_features]",
"def preprocess_sf(bd, var):\n filepath_svf = f\"team67-ptp/data/{var}.ftr\"\n filepath = bd\n data = feather.read_dataframe(filepath)\n df = data.copy()\n df2 = df[var]\n df2 = df2.to_frame()\n if df2[var].dtype is \"category\":\n df2[var] = df2[var].astype(\"category\").cat.codes\n filename = filepath_svf\n df2.to_feather(filename)\n print(\"Succesfully exported to feather\")\n else:\n filename = filepath_svf\n df2.to_feather(filename)\n print(\"Succesfully exported to feather\")",
"def cvxopt2dense(value):\n return np.array(value)",
"def apply_randomization(features, label, randomize_prob):\n rnd_tok = lambda: tf.as_string(tf.random.uniform([], 0, 99999999, tf.int32))\n\n for idx in CAT_FEATURE_INDICES:\n key = feature_name(idx)\n # Ignore lint since tf.cond should evaluate lambda immediately.\n features[key] = tf.cond(tf.random.uniform([]) < randomize_prob,\n rnd_tok,\n lambda: features[key]) # pylint: disable=cell-var-from-loop\n return features, label",
"def get_optimal_discrete_feature_split(\n self, X: np.ndarray, y: np.ndarray, feature_col: int\n ):\n return BaseTree.get_discrete_split_value(\n X[:, feature_col], y, eval_func=self.eval_func\n )",
"def convert_study_to_feature(study: List[Types.SeriesObj]) -> List[Dict[str, tf.train.Feature]]:\n return [convert_series_to_feature(s) for s in study]"
] | [
"0.8234406",
"0.52805275",
"0.5167155",
"0.5081879",
"0.50444704",
"0.50157154",
"0.49738747",
"0.49235922",
"0.4897217",
"0.48730886",
"0.48664978",
"0.47522265",
"0.47458404",
"0.47244322",
"0.47193447",
"0.47139582",
"0.47023058",
"0.4691292",
"0.46388435",
"0.46278065",
"0.462432",
"0.4610667",
"0.46097022",
"0.46039566",
"0.45683187",
"0.4559977",
"0.45582384",
"0.45539427",
"0.4552161",
"0.45441282"
] | 0.784609 | 1 |
Check if featureVals contains FKEY This is a check to see if a core feature function has been previously computed. | def haskey(featureVals, fkey):
try:
featureVals[fkey]
except KeyError:
return False
#warn(HASKEYMSG % (fkey))
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_feature_by_keys(service_data=None, service_keys=None, ns_data=None, ns_keys=None):\n\n if service_data and not isinstance(service_data, Exception) and service_keys:\n if _is_keyval_greater_than_value(service_data, service_keys):\n return True\n\n if ns_data and ns_keys:\n for ns, nsval in ns_data.iteritems():\n if not nsval or isinstance(nsval, Exception):\n continue\n if _is_keyval_greater_than_value(nsval, ns_keys):\n return True\n\n return False",
"def __check_features(f_list, stopwords):\n ok = True\n for f in f_list:\n if not(__check_feature(f,stopwords)):\n return False\n return True",
"def match_features(phone_feats, other_feats):\n for feat in other_feats.keys():\n if phone_feats[feat] != other_feats[feat] and other_feats[feat] != UNDEF:\n return False\n return True",
"def checkFeatureInSet(self, featureSet, currFeature, idxValue):\n\n found = False\n currFeatureID = currFeature[idxValue]\n\n for feature in sorted(featureSet, key=lambda f: f[idxValue]):\n attr = feature.attributes()\n currValue = attr[idxValue]\n\n if currFeatureID == currValue:\n found = True\n return found\n\n return found",
"def check_featuring(self):\n existing_featuring = pd.read_csv(self.path_checkpoint)\n array_to_check = [float(self.radious), self.type_feature, self.type_filtering, self.h_filterig]\n bool_answer = (existing_featuring == array_to_check).all(1).any()\n # self.file_checkpoint_data.close()\n return bool_answer",
"def __contains__(self, feature):\n return feature == 'cvarsort' or feature in self.features",
"def important_features_(self):\n return self.scores_ > self.score_cutoff_",
"def __contains__(self, feature):\n return feature in self.features",
"def need_feature_generation(self):\n if self.feature_cmd_params:\n return True\n return False",
"def need_feature_generation(self):\n if self.feature_cmd_params:\n return True\n return False",
"def _isFIdx(self, featureName):\n return 1 if (featureName in self.featureNames) else 0",
"def filter_feature(feature, typ, value):\n return value is None or feature.__getattribute__(typ) == value",
"def has_feature(self, feature):\n features = self.features\n if features is None:\n return False\n \n return feature in features",
"def func4(key):\n return key in list(my_test_dict.keys())",
"def has_keys(self) -> bool:\n \n for key, value in self.key_satified.items():\n if value is not True:\n return False\n return True",
"def uses_feature(self, fcname):\n used = False\n if any([fcname.upper() in y for y in [x.upper() for x in self._featureclasses]]):\n used = True\n return used",
"def _get_kmeans_features(self):\n self._validate_kmeans_features()\n if self.kmeans_features == \"auto\":\n if self._get_mode() == \"Explain\":\n return False\n if self._get_mode() == \"Perform\":\n return False\n if self._get_mode() == \"Compete\":\n return True\n if self._get_mode() == \"Optuna\":\n return False\n else:\n return deepcopy(self.kmeans_features)",
"def fcoe_dirty(self):\n return any(c.dirty or c.renames_remaining for c in self.fcoe_confs)",
"def HasFOV(self):\n return _gmat_py.Hardware_HasFOV(self)",
"def func2(key):\n return key in my_test_dict.keys()",
"def __contains__(self, f) :\n if self.__disc is infinity :\n return True\n \n (s, l) = f\n\n (a, _, c) = apply_GL_to_form(self.__p1list[l], s)\n if not c % self.__level == 0 :\n return False\n\n return a + c < self.index()",
"def has(self, *args):\n return _ida_hexrays.qvector_ccase_t_has(self, *args)",
"def any(self):\n for v in self.sects.values():\n if np.any(v):\n return True\n if self.is_full():\n return False\n else:\n return np.any(self.defval)",
"def __call__(self, feature):\n return self.is_enabled(feature)",
"def exposes_features(self):\n return self._features_op is not None",
"def f_exists(self, varname):\r\n return (varname in self.locals_ptr)",
"def is_zero_dict( dict ):\n has_any_features = False\n for key in dict:\n has_any_features = has_any_features or dict[key]\n\n return not has_any_features",
"def feature_flags(self):\r\n return self.env_tokens.get('FEATURES', dict())",
"def has_vectored_fields(self):\r\n return any(ftype.vector for ftype in self._by_number)",
"def isselected(values, feature, parent):\r\n layername=values[0]\r\n fid = feature.id()\r\n layers = QgsMapLayerRegistry.instance().mapLayers()\r\n try:\r\n layer = layers[layername]\r\n except KeyError:\r\n try:\r\n layer = [l for l in layers.iteritems() if l[1].name() == layername][0][1]\r\n except IndexError:\r\n parent.setEvalErrorString( u'No layer with id or name {} found'.format( layername ) )\r\n return False\r\n\r\n return fid in layer.selectedFeaturesIds()"
] | [
"0.6286112",
"0.60849124",
"0.5997217",
"0.58951616",
"0.5878267",
"0.5877975",
"0.58187664",
"0.57946813",
"0.5696939",
"0.5696939",
"0.564731",
"0.56072927",
"0.55438966",
"0.5499882",
"0.5431996",
"0.5373477",
"0.53706175",
"0.53663695",
"0.536493",
"0.53626704",
"0.5316639",
"0.5302235",
"0.5296485",
"0.52895844",
"0.5287636",
"0.5281923",
"0.52775675",
"0.524521",
"0.524083",
"0.52370715"
] | 0.7618906 | 0 |
End detection. described in Eq. (50) of S. Watanabe et al "Hybrid CTC/Attention Architecture for EndtoEnd Speech Recognition" | def end_detect(ended_hyps, i, M=3, d_end=np.log(1 * np.exp(-10))):
if len(ended_hyps) == 0:
return False
count = 0
best_hyp = sorted(ended_hyps, key=lambda x: x["score"], reverse=True)[0]
for m in six.moves.range(M):
# get ended_hyps with their length is i - m
hyp_length = i - m
hyps_same_length = [x for x in ended_hyps if
len(x["yseq"]) == hyp_length]
if len(hyps_same_length) > 0:
best_hyp_same_length = sorted(
hyps_same_length, key=lambda x: x["score"], reverse=True)[0]
if best_hyp_same_length["score"] - best_hyp["score"] < d_end:
count += 1
if count == M:
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def end_detect(ended_hyps, i, M=3, D_end=np.log(1 * np.exp(-10))):\n if len(ended_hyps) == 0:\n return False\n count = 0\n best_hyp = sorted(ended_hyps, key=lambda x: x[\"score\"], reverse=True)[0]\n for m in range(M):\n # get ended_hyps with their length is i - m\n hyp_length = i - m\n hyps_same_length = [\n x for x in ended_hyps if len(x[\"yseq\"]) == hyp_length\n ]\n if len(hyps_same_length) > 0:\n best_hyp_same_length = sorted(\n hyps_same_length, key=lambda x: x[\"score\"], reverse=True)[0]\n if best_hyp_same_length[\"score\"] - best_hyp[\"score\"] < D_end:\n count += 1\n\n if count == M:\n return True\n else:\n return False",
"def is_end_episode():\n return patterns_pb2.Condition(is_end_episode=True, eq=1)",
"def detect_cuewords():\n\n # cuewords\n\n if t_word[:2] == 'ni':\n create_negation_frame()\n create_target_focus_scope()\n\n if t_word[:4] == 'kein':\n create_negation_frame()\n create_target_focus_scope()\n\n if t_word[:4] == 'nein':\n create_negation_frame()\n create_target_focus_scope()",
"def decide(self, data):\n\n data = struct.unpack('%dh' % (len(data) / 2, ), data)\n self.audio_recorded_in.extend(data)\n\n while len(self.audio_recorded_in) > self.cfg['VAD']['gmm']['framesize']:\n frame = self.audio_recorded_in[:self.cfg['VAD']['gmm']['framesize']]\n self.audio_recorded_in = self.audio_recorded_in[self.cfg['VAD']['gmm']['frameshift']:]\n\n mfcc = self.front_end.param(frame)\n\n log_prob_speech = self.gmm_speech.score(mfcc)\n log_prob_sil = self.gmm_sil.score(mfcc)\n\n self.log_probs_speech.append(log_prob_speech)\n self.log_probs_sil.append(log_prob_sil)\n\n log_prob_speech_avg = 0.0\n for log_prob_speech, log_prob_sil in zip(self.log_probs_speech, self.log_probs_sil):\n log_prob_speech_avg += log_prob_speech - logsumexp([log_prob_speech, log_prob_sil])\n log_prob_speech_avg /= len(self.log_probs_speech)\n\n prob_speech_avg = np.exp(log_prob_speech_avg)\n\n# print 'prob_speech_avg: %5.3f' % prob_speech_avg\n\n self.last_decision = prob_speech_avg\n\n # returns a speech / non-speech decisions\n return self.last_decision",
"def training_end(self):\n pass",
"def match_end_faces(test_end_face, end_faces):\n for ef in end_faces:\n if (test_end_face+180) % 360 == ef:\n return True\n return False",
"def recognize():\n return 0",
"def\tbegin_end(env, blc):\n\n\tb_e = np.empty((blc.shape[0], 2))\n\tinf = 0\n\tp = 0\n\twin = env.win_over\n\tb_e[0, 0] = inf\n\tif blc[0] + win <= blc[-1]:\n\t\tb_e[0, 1] = blc[0] + win\n\telse:\n\t\tb_e[0, 1] = blc[-1]\n\tif blc.shape[0] == 1:\n\t\tb_e[0, 1] = blc[0]\n\t\treturn (b_e)\n\tfor k in range(1, blc.shape[0] - 1):\n\t\tinf = blc[k - 1] - win\n\t\tb_e[k, 0] = inf\n\t\tif blc[k] + win <= blc[-1]:\n\t\t\tb_e[k, 1] = blc[k] + win\n\t\telse:\n\t\t\tb_e[k, 1] = blc[-1]\n\tb_e[blc.shape[0] - 1, 0] = blc[-2] - win\n\tb_e[blc.shape[0] - 1, 1] = blc[-1]\n\tneg = np.where(b_e < 0)[0]\n\tif neg.shape[0]:\n\t\tb_e = b_e[neg[-1]:]\n\t\tb_e[0, 0] = 0\n\treturn (b_e)",
"def detect(self, start_time, end_time):\n\n # Convert times to UTCDateTime objects\n start_time = UTCDateTime(start_time)\n end_time = UTCDateTime(end_time)\n\n # Decimate LUT\n self.lut = self.lut.decimate(self.decimate)\n\n # Detect uses the non-centred onset by default\n if self.onset_centred is None:\n self.onset_centred = False\n\n # Define pre-pad as a function of the onset windows\n if self.pre_pad is None:\n self.pre_pad = max(self.p_onset_win[1],\n self.s_onset_win[1]) \\\n + 3 * max(self.p_onset_win[0],\n self.s_onset_win[0])\n\n msg = \"=\" * 120 + \"\\n\"\n msg += \"\\tDETECT - Continuous Seismic Processing\\n\"\n msg += \"=\" * 120 + \"\\n\"\n msg += \"\\n\"\n msg += \"\\tParameters specified:\\n\"\n msg += \"\\t\\tStart time = {}\\n\"\n msg += \"\\t\\tEnd time = {}\\n\"\n msg += \"\\t\\tTime step (s) = {}\\n\"\n msg += \"\\t\\tNumber of CPUs = {}\\n\"\n msg += \"\\n\"\n msg += \"\\t\\tSampling rate = {}\\n\"\n msg += \"\\t\\tGrid decimation [X, Y, Z] = [{}, {}, {}]\\n\"\n msg += \"\\t\\tBandpass filter P = [{}, {}, {}]\\n\"\n msg += \"\\t\\tBandpass filter S = [{}, {}, {}]\\n\"\n msg += \"\\t\\tOnset P [STA, LTA] = [{}, {}]\\n\"\n msg += \"\\t\\tOnset S [STA, LTA] = [{}, {}]\\n\"\n msg += \"\\n\"\n msg += \"=\" * 120\n msg = msg.format(str(start_time), str(end_time), self.time_step,\n self.n_cores, self.sampling_rate,\n self.decimate[0], self.decimate[1], self.decimate[2],\n self.p_bp_filter[0], self.p_bp_filter[1],\n self.p_bp_filter[2], self.s_bp_filter[0],\n self.s_bp_filter[1], self.s_bp_filter[2],\n self.p_onset_win[0], self.p_onset_win[1],\n self.s_onset_win[0], self.s_onset_win[1])\n self.output.log(msg, self.log)\n\n # Detect max coalescence value and location at each time sample\n # within the decimated grid\n self._continuous_compute(start_time, end_time)",
"def _get_predict_end(self, end):\n\n out_of_sample = 0 # will be overwritten if needed\n if end is None: # use data for ARIMA - endog changes\n end = len(self.data.endog) - 1\n\n dates = self.data.dates\n freq = self.data.freq\n\n if isinstance(end, str):\n if dates is None:\n raise ValueError(\"Got a string for end and dates is None\")\n try:\n dtend = self._str_to_date(end)\n self.data.predict_end = dtend\n end = self._get_dates_loc(dates, dtend)\n except KeyError as err: # end is greater than dates[-1]...probably\n if dtend > self.data.dates[-1]:\n end = len(self.data.endog) - 1\n freq = self.data.freq\n out_of_sample = datetools._idx_from_dates(dates[-1], dtend,\n freq)\n else:\n if freq is None:\n raise ValueError(\"There is no frequency for these \"\n \"dates and date %s is not in dates \"\n \"index. Try giving a date that is in \"\n \"the dates index or use an integer.\"\n % dtend)\n else: #pragma: no cover\n raise err # should never get here\n self._make_predict_dates() # attaches self.data.predict_dates\n\n elif isinstance(end, int) and dates is not None:\n try:\n self.data.predict_end = dates[end]\n except IndexError as err:\n nobs = len(self.data.endog) - 1 # as an index\n out_of_sample = end - nobs\n end = nobs\n if freq is not None:\n self.data.predict_end = datetools._date_from_idx(dates[-1],\n out_of_sample, freq)\n elif out_of_sample <= 0: # have no frequency but are in sample\n #TODO: what error to catch here to make sure dates is\n #on the index?\n try:\n self.data.predict_end = self._get_dates_loc(dates,\n end)\n except KeyError:\n raise\n else:\n self.data.predict_end = end + out_of_sample\n self.data.predict_start = self._get_dates_loc(dates,\n self.data.predict_start)\n\n self._make_predict_dates()\n\n elif isinstance(end, int):\n nobs = len(self.data.endog) - 1 # is an index\n if end > nobs:\n out_of_sample = end - nobs\n end = nobs\n\n elif freq is None: # should have a date with freq = None\n raise ValueError(\"When freq is None, you must give an integer \"\n \"index for end.\")\n\n return end, out_of_sample",
"def epoch_end(self):\n pass",
"def has_end_reason(self):\n # Someday I will have real end reasons\n return False",
"def predict_end():\n data = request.json\n\n if data:\n predictor.pred_dict[\"end_date\"] = data[\"end_date\"]\n else:\n pass\n\n return 'Non tam praeclarum est scire latine, quam turpe nescire'",
"def guess_cuewords():\n\n if t_word[:3] == 'nie':\n create_negation_frame()\n create_target_focus_scope()\n\n if t_word[:3] == 'nic':\n create_negation_frame()\n create_target_focus_scope()",
"def test_sv_end_svend():\n # Example:\n # 2 321682 . T <DEL> 6 PASS SVTYPE=DEL;END=321887;SVLEN=-205;CIPOS=-56,20;CIEND=-10,62 GT:GQ 0/1:12\n end = sv_end(pos=321682, alt=\"<DEL>\", svend=321887, svlen=-205)\n assert end == 321886",
"def test_sv_end_bnd():\n # Example:\n # 2\t321681\tbnd_W\tG\tG]17:198982]\t6\tPASS\tSVTYPE=BND;MATEID=bnd_Y\tGT\t0/1\n end = sv_end(pos=321681, alt=ALT, svend=None, svlen=None)\n assert end == 198981",
"def doCombineCurEnd(self, endofword, nrc='', nextvowel=''): # nrc = next root consonant\n if not self.end:\n return\n self.final = PhonStateCAT.getFinal(self.end)\n nasalPhon = ''\n postVowelPhon = ''\n preVowelPhon = ''\n # geminates\n geminates = False\n if self.end.startswith('w'):\n preVowelPhon = 'w'\n self.vowel = self.end[1:2]\n else:\n self.vowel = self.end[:1]\n vowelPhon = self.vowel\n if nrc == self.final and self.final != '':\n geminates = True\n if self.gemminatesStrategy == 'len' or self.gemminatesStrategy == 'lentone':\n postVowelPhon = 'ː'\n ## Suffix\n finalPhon = ''\n if self.final == 'ng':\n nasalPhon = self.nasalchar # ?\n if geminates:\n pass\n elif self.final in PhonStateCAT.simpleFinalMapping:\n finalPhon = PhonStateCAT.simpleFinalMapping[self.final]\n elif self.final == '':\n if self.latent != '' and self.prefixStrategy != 'never' and (self.prefixSyllable == 'afterEmptyCoda' or self.prefixSyllable == 'afterEmptyCoda+'):\n finalPhon = PhonStateCAT.simpleLatentMapping[self.latent]\n finalPhon = ''\n else:\n print(\"unrecognized final: \"+self.final)\n self.phon += preVowelPhon+vowelPhon+nasalPhon+postVowelPhon+finalPhon\n if not endofword:\n self.phon += self.syllablesepchar",
"def get_envelope_end(env):\n denv = np.diff(env)\n i = np.where(np.abs(denv) > 0)[0]\n true_stop_index = np.max(i)+1\n return true_stop_index",
"def get_end_indicator( is_alive,force_end_at_t_max = False):\n #session-ending action indicator: uint8[batch,tick]\n is_end = T.eq(is_alive[:,:-1] - is_alive[:,1:],1)\n \n if force_end_at_t_max:\n session_ended_before = T.sum(is_end,axis=1,keepdims=True)\n is_end_at_tmax = 1 - T.gt(session_ended_before, 0 )\n else:\n is_end_at_tmax = T.zeros((is_end.shape[0],1),dtype=is_end.dtype) \n \n is_end = T.concatenate(\n [is_end,\n is_end_at_tmax],\n axis=1\n )\n return is_end",
"def detect_commercial(video_desp, histogram, transcript):\n blackframe_list = get_blackframe_list(histogram)\n black_windows = blackframe_list \\\n .dilate(1. / video_desp['fps']) \\\n .coalesce() \\\n .dilate(-1. / video_desp['fps']) \\\n .filter(min_length=MIN_BLACKWINDOW * 1. / video_desp.fps)\n\n # get all instances of >>, Announcer:, and >> Announcer: in transcript\n arrow_text = get_text_intervals(\">>\", transcript)\n announcer_text = get_text_intervals(\"Announcer:\", transcript)\n arrow_announcer_text = get_text_intervals(\">> Announcer:\", tarnscript)\n \n # get an interval for the whole video\n whole_video = IntervalList([(0., video_desp['video_length'], 0)])\n\n # whole video minus black windows to get segments in between black windows\n # then filter out anything that overlaps with \">>\" as long as it's not\n # \">> Announcer:\"\n # then coalesce, as long as it doesn't get too long\n def fold_fn(stack, interval):\n if len(stack) == 0:\n stack.append(interval)\n else:\n last = stack.pop()\n if or_pred(overlaps(), after(max_dist=.1)(interval, last))(interval, last):\n if last.union(interval).length() > MAX_COMMERCIAL_TIME:\n if last.length() > MAX_COMMERCIAL_TIME:\n stack.append(Interval(\n last.start, \n last.start + MAX_COMMERCIAL_TIME, \n last.payload))\n else:\n stack.append(last)\n stack.append(interval)\n else:\n stack.append(last.union(interval))\n else:\n stack.append(interval)\n commercials = whole_video \\\n .minus(black_windows) \\\n .filter_against(\n arrow_text.filter_against(arrow_announcer_text,\n predicate=not_pred(overlaps())),\n predicate=not_pred(overlaps())\n ) \\\n .set_union(black_windows) \\\n .fold_list(fold_fn, []) \\\n .filter_length(min_length = MIN_COMMERCIAL_TIME)\n \n # add in lowercase intervals\n lowercase_intervals = get_lowercase_intervals(transcript)\n commercials = commercials \\\n .set_union(lowercase_intervals) \\\n .dilate(MIN_COMMERCIAL_GAP / 2) \\\n .coalesce() \\\n .dilate(MIN_COMMERCIAL_GAP / 2)\n\n # get blank intervals\n blank_intervals = whole_video.minus(IntervalList([\n (start_sec, end_sec - TRANSCRIPT_DELAY, 0)\n for text, start_sec, end_sec in transcript\n ])).coalesce().filter_length(\n min_length=MIN_BLANKWINDOW, max_length=MAX_BLANKWINDOW)\n\n # add in blank intervals, but only if adding in the new intervals doesn't\n # get too long\n commercials = commercials.merge(blank_intervals,\n predicate=or_pred(before(max_dist=MAX_MERGE_GAP),\n after(max_dist=MAX_MERGE_GAP))\n ) \\\n .filter_length(max_length=MAX_MERGE_DURATION) \\\n .set_union(commercials) \\\n .dilate(MIN_COMMERCIAL_GAP / 2) \\\n .coalesce() \\\n .dilate(MIN_COMMERCIAL_GAP / 2)\n\n # post-process commercials to get rid of gaps, small commercials, and\n # islated blocks\n small_gaps = whole_video \\\n .minus(commercials) \\\n .filter_length(max_length = MAX_COMMERCIAL_GAP) \\\n .filter_against(\n arrow_text.filter_against(\n announcer_text,\n predicate=not_pred(overlaps())\n ), predicate=not_pred(overlaps()))\n\n # merge with small gaps, but only if that doesn't make things too long\n commercials = commercials \\\n .set_union(small_gaps.dilate(0.1)) \\\n .coalesce() \\\n .filter_length(max_length=MAX_COMMERCIAL_TIME) \\\n .set_union(commercials) \\\n .coalesce()\n\n # get isolated commercials\n not_isolated_commercials = commercials.filter_against(commercials,\n predicate=or_pred(before(max_dist=MAX_COMMERCIAL_TIME),\n after(max_dist=MAX_COMMERCIAL_TIME)))\n isolated_commercials = commercials.minus(not_isolated_commercials)\n commercials_to_delete = isolated_commercials \\\n .filter_length(max_length=MIN_COMMERCIAL_TIME_FINAL) \\\n .set_union(isolated_commercials \\\n .filter_against(blank_intervals, predicate=equals()) \\\n .filter_length(max_length=MAX_ISOLATED_BLANK_TIME))\n\n commercials = commercials.minus(commercials_to_delete)\n\n return commercials",
"def handle_record_end():\n LOG.info(\"End Recording...\")\n context = {'client_name': 'mycroft_listener',\n 'source': 'audio',\n 'destination': [\"skills\"]}\n bus.emit(Message('recognizer_loop:record_end', context=context))",
"def affected_end(self):\n types = {alt.type for alt in self.ALT} # set!\n BAD_MIX = {INS, SV, BND, SYMBOLIC} # don't mix well with others\n if (BAD_MIX & types) and len(types) == 1 and list(types)[0] == INS:\n # Only insertions, return 0-based position right of first base\n return self.POS # right of first base\n else: # Return 0-based end position, behind last REF base\n return (self.POS - 1) + len(self.REF)",
"def end_phase():\n pass",
"def stop_cb(evt):\n # print('CLOSING on {}'.format(evt))\n speech_recognizer.stop_continuous_recognition()\n global done\n done = True",
"def inference_end(self, inputs, results):\n return",
"def on_epoch_end(epoch, logs):\n global w2vmodel\n global max_seq_length\n global word_to_ix\n global ix_to_word\n global sample\n if epoch % 5 == 0:\n generate_text(model, w2vmodel, epoch, length=75, max_seq_length=max_seq_length,\n seed=\"Rain drop, drop top\\n\")\n return",
"def get_last_sentence_end(text: str) -> int:\n end = -1\n for e in sentence_ends:\n p_end = [m.end() for m in re.finditer(e, text)]\n end = max(end, p_end[-1] if len(p_end) > 0 else -1)\n return end",
"def end_episode(self):\n self.training_buffer.reset_all()\n for agent_id in self.cumulative_rewards:\n self.cumulative_rewards[agent_id] = 0\n for agent_id in self.episode_steps:\n self.episode_steps[agent_id] = 0\n if self.use_curiosity:\n for agent_id in self.intrinsic_rewards:\n self.intrinsic_rewards[agent_id] = 0",
"def test_endgameStrategy(self):\n self.result = \"\"\"\n 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 1 2 3 3 2 1 0 0\n 0 0 1 3 x x x x 1 0 0\n 0 0 2 x x 6 x 5 2 0 0\n 0 0 3 x 4 4 x x 2 0 0\n 0 0 3 x 5 5 x x 2 0 0\n 0 0 2 x x x x 3 1 0 0\n 0 0 1 2 3 3 2 1 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0\n \"\"\"",
"def predict_again(src):\n global rcnt\n global lcnt\n H,W = src.shape[:2]\n #cv2.imshow(\"cROPPPPPPED\",src)\n #print (src.shape)\n\n img1 = src[:,:int(W/2)]\n img2 = src[:,int(W/2)+1:]\n contoured1,area1 = drawContours11111(img1)\n contoured2,area2 = drawContours11111(img2)\n #cv2.imshow(\"blank_image\",contoured1)\n #cv2.imshow(\"blank_image1\",contoured2)\n print (area1,area2)\n if area2>area1:\n #print (\"New:::::::::::::RIGGGGGGGGGHT\")\n if rcnt >=3:\n print (\"New:::::::::::::RIGGGGGGGGGHT\")\n feedback.direction = 1\n feedback.detection = 1\n rcnt += 1\n lcnt = 0\n elif area1>area2:\n #print (\"New:::::::::::::LEFTTTTTTTTT\")\n if lcnt >=3:\n print (\"New:::::::::::::LEFTTTTTTTTT\")\n feedback.direction = -1\n feedback.detection = 1\n lcnt += 1\n rcnt = 0"
] | [
"0.59446865",
"0.5918407",
"0.5820141",
"0.5536501",
"0.54732686",
"0.5456574",
"0.5447737",
"0.541912",
"0.53824407",
"0.53753823",
"0.53732294",
"0.53102255",
"0.5226697",
"0.52063173",
"0.51992583",
"0.5165421",
"0.51476526",
"0.5133361",
"0.5113841",
"0.50879014",
"0.5086795",
"0.5074366",
"0.5073692",
"0.50706536",
"0.50664604",
"0.5064536",
"0.5033014",
"0.5032949",
"0.5032797",
"0.5011365"
] | 0.5958459 | 0 |
Geocode the addresses and build an address table | def build_addresses(self):
from ambry.geo.geocoders import DstkGeocoder
facilities = self.partitions.find(table='facilities')
def address_gen():
for row in facilities.query("SELECT * FROM facilities"):
address = "{}, {}, {} {}".format(row['dba_address1'], row['dba_city'], 'CA', row['dba_zip_code'])
yield (address, row)
dstk_service = self.config.service('dstk')
dstk_gc = DstkGeocoder(dstk_service, address_gen())
p = self.partitions.find_or_new(table='facilities_addresses')
p.clean()
lr = self.init_log_rate(500)
with p.inserter() as ins:
for i, (k, r, inp_row) in enumerate(dstk_gc.geocode()):
lr("Addresses "+str(i))
r['facilities_id'] = inp_row['id']
ins.insert(r) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def geolocate_address(self):\n self.geolocator = Nominatim(user_agent=\"fundaft\")\n\n # If latitude / longitude are missing, try to geocode them on the basis\n # of the address \n self.coords = [self.get_coords(address) if np.isnan(lat)\n else (lat, lon) for address, lat, lon in\n zip(self.df_ads['property_title'], \n self.df_ads['latitude'], \n self.df_ads['longitude'])]\n \n df = pd.DataFrame(self.coords, columns=['latitude', 'longitude'])\n \n # If new coordinates are not in Dublin, change to na again\n df = self.is_in_dublin(df)\n\n self.df_ads[[\"latitude\",\"longitude\"]] = df",
"def transform(self):\n print(\"Add City, State\")\n geocoder_prefix_url = self.config_dict.get('geocoder_prefix_url')\n geocoder_suffix_url = self.config_dict.get('geocoder_suffix_url')\n transformed_file = open(f\"{self.config_dict.get('proj_dir')}new_addresses.csv\", \"w\")\n transformed_file.write(\"X,Y,Type\\n\")\n with open(f\"{self.config_dict.get('proj_dir')}addresses.csv\", \"r\") as partial_file:\n csv_dict = csv.DictReader(partial_file, delimiter=',')\n for row in csv_dict:\n address = row[\"Street Address\"] + \" Boulder CO\"\n print(address)\n geocode_url = f\"{geocoder_prefix_url}{address}{geocoder_suffix_url}\"\n print(geocode_url)\n r = requests.get(geocode_url)\n\n resp_dist = r.json()\n x = resp_dist['result']['addressMatches'][0]['coordinates']['x']\n y = resp_dist['result']['addressMatches'][0]['coordinates']['y']\n transformed_file.write(f\"{x},{y}, Residential\\n\")\n\n transformed_file.close()",
"def create_address_data():\r\n print(\"Reading address data file\")\r\n addresses = pd.read_csv(raw_loc + 'san_francisco_addresses.csv')\r\n addresses.columns = map(str.lower, addresses.columns)\r\n\r\n keepcolumns = ['lon', 'lat', 'number', 'street']\r\n addresses = addresses[keepcolumns]\r\n addresses['number'] = addresses['number'].apply(lambda x: re.findall( '\\d+', x)[0]).astype(int)\r\n addresses['address'] = addresses.apply(lambda x: str(x['number']) + \" \" + str(x['street']), axis = 1)\r\n addresses['streetname'] = addresses['street'].apply(return_street)\r\n addresses.drop_duplicates(subset = 'address', inplace = True)\r\n addresses['type'] = 'known'\r\n addresses.to_sql('raw_address_data', if_exists = 'replace', con = conn)\r\n\r\n\r\n print(\"Finding similar addresses\")\r\n df = pd.read_sql_query('Select distinct tickstreetno , tickstreetname , count(*) total_tickets from raw_ticket_data t1'\r\n ' left join raw_address_data t2 on t1.TickStreetNo = t2.number and t1.TickStreetName = t2.streetname '\r\n \" where t2.address is null group by tickstreetno, tickstreetname \", conn)\r\n\r\n df['TickStreetNo'] = df['TickStreetNo'].apply(return_num)\r\n df['TickStreetName'] = df['TickStreetName'].apply(replace_street)\r\n df['TickStreetName'] = df['TickStreetName'].apply(return_street)\r\n df['blocknum'] = df['TickStreetNo'].apply(lambda x: math.ceil(x/100))\r\n df.drop_duplicates(inplace = True)\r\n\r\n df2 = addresses\r\n df2['blocknum'] = df2['number'].apply(lambda x: math.ceil(x/100))\r\n newdf = df.merge(df2, how = 'left', left_on = ['TickStreetName', 'blocknum'], \\\r\n right_on = ['streetname', 'blocknum'])\r\n\r\n\r\n unfound = newdf[pd.isnull(newdf.number)]\r\n unfound['type'] == \"unknown\"\r\n newdf = newdf[pd.isnull(newdf.number) == False]\r\n newdf['delta'] = np.abs(newdf['number'] - newdf['TickStreetNo'])\r\n newdf.sort_values(by = 'delta', inplace = True)\r\n newdf.drop_duplicates(subset = ['TickStreetName', 'TickStreetNo'], keep = 'first', inplace = True)\r\n\r\n newdf = newdf[[ 'lon', 'lat', 'TickStreetNo', 'street', 'address','streetname' ]]\r\n newdf.columns = ['lon', 'lat', 'number', 'street', 'address','streetname' ]\r\n newdf['address'] = newdf['number'].map(str) + ' ' + newdf['street']\r\n newdf.drop_duplicates(inplace = True)\r\n newdf['type'] = 'similar'\r\n newdf.to_sql('raw_address_data', conn, if_exists = 'append')\r\n unfound = unfound[unfound.TickStreetNo < 10000]\r\n\r\n\r\n print(\"Searching for Intersection Addresses\")\r\n #unfound = unfound[(unfound.TickStreetNo < 10000) & (unfound.TickStreetNo > 0)]\r\n isection = unfound[['TickStreetNo','TickStreetName', 'total_tickets']]\r\n isection['address'] = isection['TickStreetName'].apply(return_intersections)\r\n unfound = isection[pd.isnull(isection.address) == True]\r\n isection = isection[pd.isnull(isection.address) == False]\r\n isection = isection.merge(addresses, left_on = 'address', right_on = 'address')\r\n isection = isection[['number', 'streetname', 'street', 'address', 'lat', 'lon']]\r\n isection.to_sql('raw_address_data', if_exists = 'append', con = conn)\r\n\r\n\r\n\r\n print(\"Searching for Unknown Addresses\")\r\n unfound.drop_duplicates(inplace = True)\r\n tqdm.pandas()\r\n unfound['street'] = unfound.apply(lambda x: return_streetname_unknown(x['TickStreetNo'], x['TickStreetName']), axis = 1)\r\n unfound['address'] = unfound.apply(lambda x: str(x['TickStreetNo']) + \" \" + str(x['street']), axis = 1)\r\n numlookup = 5000\r\n print(\"There are \" + str(unfound.shape[0]) + \" addresses we couldn't find, we're goin to lookup \" + str(numlookup))\r\n lookup = unfound.sort_values(by = 'total_tickets', ascending = False)[:numlookup] #CHANGE TO 5000\r\n lookup['coordinates'] = lookup['address'].progress_apply(lambda x: create_locs(x + ' SAN FRANCISCO CA'))\r\n lookup.dropna(subset = ['coordinates'], inplace = True)\r\n lookup['lat'] = lookup['coordinates'].apply(lambda x: x[0])\r\n lookup['lon'] = lookup['coordinates'].apply(lambda x: x[1])\r\n lookup.rename(columns = {'TickStreetNo':'number', 'TickStreetName':'streetname'}, inplace = True)\r\n lookup = lookup[['lat', 'lon', 'street', 'number', 'streetname', 'address']]\r\n unfound = unfound[unfound['address'].isin(lookup['address']) == False]\r\n unfound['type'] = 'unfound'\r\n lookup['type'] = 'searched'\r\n lookup.to_sql('raw_address_data', if_exists = 'append', con = conn)\r\n\r\n\r\n print(\"associating neighborhoods\")\r\n addresses = pd.read_sql_query('Select * from raw_address_data', conn)\r\n addresses['geometry'] = addresses.apply(lambda x: Point(x['lon'], x['lat']), axis = 1)\r\n point = gpd.GeoDataFrame(addresses['geometry'])\r\n point.crs = {'init': 'epsg:4326'}\r\n poly = gpd.GeoDataFrame.from_file(raw_loc+ 'AnalysisNeighborhoods.geojson')\r\n pointInPolys = gpd.tools.sjoin(point, poly, how='left')\r\n addresses['geometry'] = addresses['geometry'].astype(str)\r\n pointInPolys['geometry'] = pointInPolys['geometry'].astype(str)\r\n addresses = addresses.merge(pointInPolys, left_on = 'geometry', right_on = 'geometry')\r\n addresses.drop(columns = ['geometry', 'index', 'index_right'], inplace = True)\r\n addresses.drop_duplicates(subset = 'address', inplace = True)\r\n addresses['number'] = addresses['number'].astype(int)\r\n addresses.to_sql('address_data', conn, if_exists = 'replace')\r\n\r\n\r\n unfound.rename(columns = {'TickStreetNo':'number', 'TickStreetName': 'streetname'}, inplace = True)\r\n unfound.drop(columns = 'total_tickets', inplace = True)\r\n unfound['number'] = unfound['number'].astype(int)\r\n unfound.to_sql('address_data', if_exists = 'append', con = conn)\r\n\r\n\r\n\r\n \"\"\"Function is to separate addresses into those that may have have more than one address associated with a ticket and street name combo. \"\"\"\r\n grouped = addresses.groupby(by = ['number', 'streetname'], as_index = False)['address'].agg('count')\r\n grouped.sort_values(by = 'address', ascending = False)\r\n grouped.columns = ['number', 'streetname', 'count_ad']\r\n single_address = grouped[grouped.count_ad ==1]\r\n single_address = single_address.merge(addresses, left_on = ['number', 'streetname'], right_on = ['number', 'streetname'])\r\n double_address = addresses[addresses.address.isin(single_address['address']) == False]\r\n single_address.to_sql('single_address', conn, if_exists = 'replace')\r\n\r\n return single_address, double_address, addresses",
"def read_database_addresses(self):\n\n col_kvk = self.address_keys[KVK_KEY]\n col_name = self.address_keys[NAME_KEY]\n col_adr = self.address_keys[ADDRESS_KEY]\n col_post = self.address_keys[POSTAL_CODE_KEY]\n col_city = self.address_keys[CITY_KEY]\n\n self.address_df = self.read_csv_input_file(self.address_input_file_name,\n usecols=[col_kvk, col_name, col_adr,\n col_post, col_city],\n names=[KVK_KEY, NAME_KEY, ADDRESS_KEY,\n POSTAL_CODE_KEY, CITY_KEY],\n unique_key=POSTAL_CODE_KEY)\n self.remove_duplicated_kvk_entries()\n\n self.logger.debug(\"Done\")",
"def address():\n # We start with generating the street name. For this we choose\n # between the most common prefixes and our own prefixes\n prefix = dice.randint(1, 100)\n if prefix <= 10: # 10%\n prefix = \"Haupt\"\n elif prefix <= 18: # 8%\n prefix = \"Schul\"\n elif prefix <= 25: # 7%\n prefix = \"Garten\"\n elif prefix <= 32: # 7%\n prefix = \"Dorf\"\n elif prefix <= 39: # 7%\n prefix = \"Bahnhof\"\n elif prefix <= 46: # 7%\n prefix = \"Wiesen\"\n elif prefix <= 52: # 6%\n prefix = \"Berg\"\n elif prefix <= 56: # 4%\n prefix = \"Kirch\"\n elif prefix <= 60: # 4%\n prefix = \"Wald\"\n elif prefix <= 64: # 4%\n prefix = \"Ring\"\n else:\n prefix = dice.choice(names.prefix)\n\n # Now we can add the suffix\n suffix = dice.randint(1, 100)\n if suffix <= 78:\n suffix = \"straße\"\n elif suffix <= 96:\n suffix = \"weg\"\n elif suffix <= 98:\n suffix = \"allee\"\n elif suffix == 99:\n suffix = \"ring\"\n elif suffix == 100:\n suffix = \"platz\"\n\n # When we have a city name as prefix, we need to capitalize the\n # suffix since it will be two words\n if prefix[-1] == \" \":\n suffix = suffix.capitalize()\n\n # Now we can add them together\n street = prefix + suffix\n\n # We need a house number as well. In Germany most numbers have\n # between one and four digits, so we will use this as base. Lower\n # numbers are more common, so we'll give it a 10% probability of\n # using 3 digits and 1% of using 4 digits\n digits = dice.randint(1, 100)\n if digits == 100:\n house_number = str(dice.randint(1000, 9999))\n elif digits >= 90:\n house_number = str(dice.randint(100, 999))\n else:\n house_number = str(dice.randint(1, 99))\n address_full = street + \" \" + house_number\n return address_full",
"def locate(self):\n \n #CONNECT TO API\n api = GoogleV3(api_key = self.google_key)\n\n #INITALIZE ARRAY\n array = []\n\n #START GEOCODING ADDRESSES\n for i in tqdm(range(len(self.df)), desc='Geocoding Addresses'):\n\n \n row = self.df.iloc[i]\n\n #GET ADDRESS VARIABLES\n st_name = row['street_name']\n st_number = row['house_number']\n city = row['city']\n state = row['state/province']\n listing_number = row['listing_number']\n zip = row['postal_code']\n\n\n #FORMAT ADDRESS FOR API\n full_address = str(\"{} {},{},{},{}\".format(st_number, st_name, city, state, zip))\n\n #TRY TO LOCATE WITH GOOGLE\n try:\n \n location = api.geocode(full_address, timeout=10)\n\n lat = location.latitude\n lon = location.longitude\n \n\n info = [lat,lon, listing_number]\n\n array.append(info)\n\n next \n\n #Go to next if you cant locate\n except:\n\n info = [0,0, listing_number]\n\n array.append(info)\n\n next\n\n #CONVERT SERIES TO DATAFRAME\n geo_data = pd.DataFrame(data = array, columns = ['lat', 'lon', 'listing_number'])\n \n #INNER JOIN DATA TO DATAFRAME\n self.df = pd.merge(self.df, geo_data, on= 'listing_number', how = 'inner')",
"def add_address(self, address_list=None):\n sql = u' INSERT INTO address_TBL ' \\\n u'(line_1, line_2, city, county, country, billing_address, main_address, client_company_ID) ' \\\n u'VALUES (%s, %s, %s, %s, %s, %s, %s, %s);'\n if address_list is None:\n address_list = self.data_set['address']\n\n c, conn = connection(self.schema)\n\n try:\n for address in address_list:\n if address['line_2'] is None:\n address['line_2'] = 'NULL'\n if address['billing'] is None:\n address['billing'] = 0\n if address['default'] is None:\n address['default'] = 0\n\n data = (address['line_1'],\n address['line_2'],\n address['city'],\n address['county'],\n address['country'],\n address['billing'],\n address['default'],\n self.id)\n\n c.execute(sql, data)\n finally:\n conn_close(c, conn)",
"def _set_search_addresses(self):\n if self._report_data and self._report_data['details']:\n for detail in self._report_data['details']:\n if detail.get('ownerGroups'):\n for group in detail['ownerGroups']:\n for owner in group['owners']:\n Report._format_address(owner['address'])\n if detail.get('location') and 'address' in detail['location']:\n Report._format_address(detail['location']['address'])\n if detail.get('notes'):\n for note in detail['notes']:\n if note.get('contactAddress'):\n Report._format_address(note['contactAddress'])\n elif note.get('givingNoticeParty') and note['givingNoticeParty'].get('address'):\n Report._format_address(note['givingNoticeParty']['address'])",
"def apply(data, options=default_options, config=default_config, warning=print):\n\n if options[\"reverse\"]:\n\n # convert address to lat,lon\n if not \"address\" in list(data.columns):\n raise Exception(\"reserve address resolution requires 'address' field\")\n data.reset_index(inplace=True) # index is not meaningful\n for retries in range(config[\"retries\"]):\n try:\n pos = geocode(data[\"address\"],\n provider = config[\"provider\"],\n user_agent = config[\"user_agent\"],\n timeout = config[\"timeout\"],\n )\n break\n except Exception as err:\n pos = err\n import time\n time.sleep(config[\"sleep\"])\n if type(pos) is Exception or type(pos) is ModuleNotFoundError:\n raise pos\n data[\"longitude\"] = list(map(lambda p: p.x,pos[\"geometry\"]))\n data[\"latitude\"] = list(map(lambda p: p.y,pos[\"geometry\"]))\n return data\n\n else:\n\n # convert lat,lon to address\n try:\n lats = list(map(lambda x: float(x),data[\"latitude\"]))\n lons = list(map(lambda x: float(x),data[\"longitude\"]))\n pos = list(map(lambda xy: Point(xy),list(zip(lons,lats))))\n except:\n pos = None\n if type(pos) == type(None):\n raise Exception(\"address resolution requires 'latitude' and 'longitude' fields\")\n for retries in range(config[\"retries\"]):\n try:\n addr = reverse_geocode(pos,\n provider = config[\"provider\"],\n user_agent = config[\"user_agent\"],\n timeout = config[\"timeout\"],\n )\n break\n except Exception as err:\n addr = err\n import time\n time.sleep(config[\"sleep\"])\n if type(addr) is Exception or type(addr) is ModuleNotFoundError:\n raise addr\n data[\"address\"] = Series(addr[\"address\"],dtype=\"string\").tolist()\n return data",
"def _compute_adress(self):\r\n\t\tfor leads in self:\r\n\t\t\tleads.address = leads.street + \" \" + leads.street2",
"def geo_coder(house_number, boro_code, street_name, zip_code): \r\n wa1 = '1B{}{}{}{}{}C{}{}'.format(rightpad(house_number, 16), rightpad('', 38), boro_code, rightpad('', 10), rightpad(street_name, 32), rightpad('', 113), rightpad(zip_code, 5))\r\n wa1 = rightpad(wa1, 1200)\r\n wa2 = rightpad('', 4300)\r\n NYCGeo.NYCgeo(wa1, wa2)\r\n return wa1, wa2",
"def address_dict(self):\n new_table = {}\n for record in self._table:\n address = self.build_address(record)\n new_table[address] = record\n return new_table",
"def get_address_and_parking():\n client = MongoClient()\n\n address = request.get_json()['address']\n time = request.get_json()['time']\n\n address_data = []\n space_data = []\n\n try:\n user_point = geo_functions.geocode_address(address)\n closeBlocks = geo_functions.findCloseBlocks(user_point[\"coordinates\"], 200 , client)\n except ValueError as e:\n return jsonify({\"message\": e.message}), 400\n\n address_data.append({\"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": user_point[\"coordinates\"]},\n \"properties\": {\n \"cleanAddress\": user_point[\"address\"]\n }})\n\n blockCoords = geo_functions.findBlockCoordinates(closeBlocks, client)\n space_data.extend(blockCoords)\n\n try:\n space_data = geo_functions.getBlockAvailability(space_data, time, client)\n except ValueError as e:\n return jsonify({\"message\": e.message}), 400\n\n mapping_data = {\"address_data\": address_data,\n \"space_data\" : space_data}\n\n client.close()\n\n return jsonify(mapping_data)",
"def build_search_locations(suburbs=['Balgowlah']):\n\n postcode_file = os.path.join(os.path.join(os.path.dirname(os.path.realpath(__file__)),'..'),'postcodes.csv')\n postcodes = pd.read_csv(postcode_file)\n \n if 'NSW' in suburbs:\n postcodes = postcodes[postcodes['State'] == 'NSW']\n if 'QLD' in suburbs:\n postcodes = postcodes[postcodes['State'] == 'QLD']\n if 'SA' in suburbs:\n postcodes = postcodes[postcodes['State'] == 'SA']\n if 'NT' in suburbs:\n postcodes = postcodes[postcodes['State'] == 'NT']\n if 'ACT' in suburbs:\n postcodes = postcodes[postcodes['State'] == 'ACT']\n if 'WA' in suburbs:\n postcodes = postcodes[postcodes['State'] == 'WA']\n if 'TAS' in suburbs:\n postcodes = postcodes[postcodes['State'] == 'TAS']\n\n if set(suburbs).issubset(['All', 'NSW', 'QLD', 'SA', 'NT', 'ACT', 'WA', 'TAS']):\n suburbs = postcodes['Suburb']\n\n # buld the locations with additional parameters\n searchLocations = {}\n for suburb in suburbs:\n location_df = postcodes[postcodes['Suburb'] == suburb]\n\n if location_df.shape[0] > 0:\n location = {'state': location_df['State'].values[0], \n 'suburb': location_df['Suburb'].values[0], \n 'postcode': location_df['Postcode'].values[0],\n 'includeSurroundingSuburbs': True}\n searchLocations[suburb] = location\n else:\n print (f'{suburb} is not in the list.')\n\n return searchLocations",
"def build_address(record):\n pass",
"def site_address_etl():\r\n with arcetl.ArcETL(\"Site Addresses\") as etl:\r\n etl.extract(dataset.SITE_ADDRESS.path(\"maint\"))\r\n # Clean maintenance values.\r\n transform.clear_nonpositive(etl, field_names=[\"house_nbr\"])\r\n transform.clean_whitespace(\r\n etl,\r\n field_names=[\r\n \"house_suffix_code\",\r\n \"pre_direction_code\",\r\n \"street_name\",\r\n \"street_type_code\",\r\n \"unit_type_code\",\r\n \"unit_id\",\r\n \"city_name\",\r\n \"landuse\",\r\n \"maptaxlot\",\r\n \"account\",\r\n ],\r\n )\r\n transform.force_uppercase(\r\n etl,\r\n field_names=[\r\n \"house_suffix_code\",\r\n \"pre_direction_code\",\r\n \"street_name\",\r\n \"street_type_code\",\r\n \"unit_type_code\",\r\n \"unit_id\",\r\n \"maptaxlot\",\r\n \"valid\",\r\n \"archived\",\r\n ],\r\n )\r\n transform.clear_non_numeric_text(etl, field_names=[\"account\"])\r\n etl.transform(\r\n arcetl.attributes.update_by_function,\r\n field_name=\"landuse\",\r\n function=(lambda x: x if is_numeric(x) else \"0\"),\r\n )\r\n transform.force_yn(etl, field_names=[\"archived\"], default=\"N\")\r\n transform.force_yn(etl, field_names=[\"valid\"], default=\"Y\")\r\n transform.add_missing_fields(etl, dataset.SITE_ADDRESS, tags=[\"pub\"])\r\n # Assign geometry attributes.\r\n coordinate_system_xy_keys = {\r\n 2914: {\"x\": \"x_coordinate\", \"y\": \"y_coordinate\"},\r\n 4326: {\"x\": \"longitude\", \"y\": \"latitude\"},\r\n }\r\n for spatial_reference_id, xy_key in coordinate_system_xy_keys.items():\r\n for axis, key in xy_key.items():\r\n etl.transform(\r\n arcetl.attributes.update_by_geometry,\r\n field_name=key,\r\n spatial_reference_item=spatial_reference_id,\r\n geometry_properties=[\"centroid\", axis],\r\n )\r\n # Assign overlays.\r\n overlay_kwargs = [\r\n # City attributes.\r\n {\r\n \"field_name\": \"geocity\",\r\n \"overlay_field_name\": \"inccityabbr\",\r\n \"overlay_dataset_path\": dataset.INCORPORATED_CITY_LIMITS.path(),\r\n },\r\n {\r\n \"field_name\": \"annexhist\",\r\n \"overlay_field_name\": \"annexnum\",\r\n \"overlay_dataset_path\": dataset.ANNEXATION_HISTORY.path(\"pub\"),\r\n },\r\n # Have to do overlay rather than join because some lack codes.\r\n {\r\n \"field_name\": \"yearanx\",\r\n \"overlay_field_name\": \"annexyear\",\r\n \"overlay_dataset_path\": dataset.ANNEXATION_HISTORY.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"ugb\",\r\n \"overlay_field_name\": \"ugbcity\",\r\n \"overlay_dataset_path\": dataset.UGB.path(\"pub\"),\r\n },\r\n # Planning & zoning attributes.\r\n {\r\n \"field_name\": \"greenwy\",\r\n \"overlay_field_name\": \"greenway\",\r\n \"overlay_dataset_path\": dataset.WILLAMETTE_RIVER_GREENWAY.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"nodaldev\",\r\n \"overlay_field_name\": \"nodearea\",\r\n \"overlay_dataset_path\": dataset.NODAL_DEVELOPMENT_AREA.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"plandes_id\",\r\n \"overlay_field_name\": \"plandes_id\",\r\n \"overlay_dataset_path\": dataset.PLAN_DESIGNATION.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"sprsvcbndy\",\r\n \"overlay_field_name\": \"is_inside\",\r\n \"overlay_dataset_path\": dataset.SPRINGFIELD_HANSEN_EXTENT.path(),\r\n },\r\n # Public safety attributes.\r\n {\r\n \"field_name\": \"ambulance_district\",\r\n \"overlay_field_name\": \"asacode\",\r\n \"overlay_dataset_path\": dataset.AMBULANCE_SERVICE_AREA.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"firedist\",\r\n \"overlay_field_name\": \"fireprotprov\",\r\n \"overlay_dataset_path\": dataset.FIRE_PROTECTION_AREA.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"police_beat\",\r\n \"overlay_field_name\": \"CAD\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.LCOG_GIS_PROJECTS,\r\n \"Public_Safety\\\\PSAPS\\\\CLPSAP\\\\SunGard_CAD\\\\Maintained_Layers\",\r\n \"Maintained_Layers.gdb\\\\Fire_Law_Tow\\\\law_beat\",\r\n ),\r\n },\r\n {\r\n \"field_name\": \"psap_code\",\r\n \"overlay_field_name\": \"psap_code\",\r\n \"overlay_dataset_path\": dataset.PSAP_AREA.path(\"pub\"),\r\n },\r\n # Election attributes.\r\n {\r\n \"field_name\": \"electionpr\",\r\n \"overlay_field_name\": \"precntnum\",\r\n \"overlay_dataset_path\": dataset.ELECTION_PRECINCT.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"ccward\",\r\n \"overlay_field_name\": \"ward\",\r\n \"overlay_dataset_path\": dataset.CITY_WARD.path(),\r\n },\r\n {\r\n \"field_name\": \"clpud_subdivision\",\r\n \"overlay_field_name\": \"SUBDIVISIO\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.LCOG_GIS_PROJECTS,\r\n \"UtilityDistricts\\\\CentralLincolnPUD\\\\Redistricting2012\",\r\n \"CLPUD_Subdivisions.shp\",\r\n ),\r\n },\r\n {\r\n \"field_name\": \"cocommdist\",\r\n \"overlay_field_name\": \"commrdist\",\r\n \"overlay_dataset_path\": (\r\n dataset.COUNTY_COMMISSIONER_DISTRICT.path(\"pub\")\r\n ),\r\n },\r\n {\r\n \"field_name\": \"epud\",\r\n \"overlay_field_name\": \"boardid\",\r\n \"overlay_dataset_path\": dataset.EPUD_SUBDISTRICT.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"hwpud_subdivision\",\r\n \"overlay_field_name\": \"BoardZone\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.LCOG_GIS_PROJECTS,\r\n \"UtilityDistricts\\\\HecetaWaterPUD\\\\NewBoardSubzones\",\r\n \"HecetaData.gdb\",\r\n \"ScenarioB\",\r\n ),\r\n },\r\n {\r\n \"field_name\": \"lcczone\",\r\n \"overlay_field_name\": \"lccbrdzone\",\r\n \"overlay_dataset_path\": dataset.LCC_BOARD_ZONE.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"senatedist\",\r\n \"overlay_field_name\": \"sendist\",\r\n \"overlay_dataset_path\": dataset.STATE_SENATOR_DISTRICT.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"strepdist\",\r\n \"overlay_field_name\": \"repdist\",\r\n \"overlay_dataset_path\": (\r\n dataset.STATE_REPRESENTATIVE_DISTRICT.path(\"pub\")\r\n ),\r\n },\r\n {\r\n \"field_name\": \"swcd\",\r\n \"overlay_field_name\": \"swcdist\",\r\n \"overlay_dataset_path\": (\r\n dataset.SOIL_WATER_CONSERVATION_DISTRICT.path(\"pub\")\r\n ),\r\n },\r\n {\r\n \"field_name\": \"swcdzone\",\r\n \"overlay_field_name\": \"swczone\",\r\n \"overlay_dataset_path\": (\r\n dataset.SOIL_WATER_CONSERVATION_DISTRICT.path(\"pub\")\r\n ),\r\n },\r\n # Education attributes.\r\n {\r\n \"field_name\": \"schooldist\",\r\n \"overlay_field_name\": \"district\",\r\n \"overlay_dataset_path\": dataset.SCHOOL_DISTRICT.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"elem\",\r\n \"overlay_field_name\": \"attend\",\r\n \"overlay_dataset_path\": dataset.ELEMENTARY_SCHOOL_AREA.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"middle\",\r\n \"overlay_field_name\": \"attend\",\r\n \"overlay_dataset_path\": dataset.MIDDLE_SCHOOL_AREA.path(\"pub\"),\r\n },\r\n {\r\n \"field_name\": \"high\",\r\n \"overlay_field_name\": \"attend\",\r\n \"overlay_dataset_path\": dataset.HIGH_SCHOOL_AREA.path(\"pub\"),\r\n },\r\n # Transportation attributes.\r\n {\r\n \"field_name\": \"ltddist\",\r\n \"overlay_field_name\": \"LTD\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA, \"transport\\\\ltd\\\\2012 LTD Boundary.shp\"\r\n ),\r\n },\r\n {\r\n \"field_name\": \"ltdridesrc\",\r\n \"overlay_field_name\": \"LTD\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA, \"transport\\\\ltd\\\\2015 RideSource Boundary.shp\"\r\n ),\r\n },\r\n {\r\n \"field_name\": \"cats\",\r\n \"overlay_field_name\": \"CATSBNDY\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA, \"transport\\\\eug\\\\catsbndy.shp\"\r\n ),\r\n },\r\n {\r\n \"field_name\": \"trans_analysis_zone\",\r\n \"overlay_field_name\": \"TAZ_NUM\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA, \"transport\\\\MTAZ16.shp\"\r\n ),\r\n },\r\n # Natural attributes.\r\n {\r\n \"field_name\": \"firmnumber\",\r\n \"overlay_field_name\": \"firm_pan\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA, \"natural\\\\flood\\\\Flood.gdb\\\\FIRMPanel\"\r\n ),\r\n },\r\n {\r\n \"field_name\": \"soilkey\",\r\n \"overlay_field_name\": \"mukey\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA, \"natural\\\\soils\\\\Soils.gdb\\\\Soil\"\r\n ),\r\n },\r\n {\r\n \"field_name\": \"wetland\",\r\n \"overlay_field_name\": \"WET_TYPE\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA, \"natural\\\\eug\\\\Wetland\\\\wetlands.shp\"\r\n ),\r\n },\r\n # Census attributes.\r\n {\r\n \"field_name\": \"ctract\",\r\n \"overlay_field_name\": \"TRACT\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA,\r\n \"federal\\\\census\\\\lane\\\\2010\",\r\n \"lc_census2010.gdb\\\\lc_tracts2010\",\r\n ),\r\n },\r\n {\r\n \"field_name\": \"blockgr\",\r\n \"overlay_field_name\": \"BlockGroup\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA,\r\n \"federal\\\\census\\\\lane\\\\2010\",\r\n \"lc_census2010.gdb\\\\lc_blockgroups2010\",\r\n ),\r\n },\r\n # Other district attributes.\r\n {\r\n \"field_name\": \"neighbor\",\r\n \"overlay_field_name\": \"NEIBORHD\",\r\n \"overlay_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA,\r\n \"boundary\\\\districts\\\\eug\",\r\n \"Boundary.gdb\\\\EugNeighborhoods\",\r\n ),\r\n },\r\n ]\r\n for kwargs in overlay_kwargs:\r\n etl.transform(\r\n arcetl.attributes.update_by_overlay,\r\n overlay_central_coincident=True,\r\n **kwargs\r\n )\r\n # Override overlays for special cases.\r\n for override in OVERRIDE_ATTRS:\r\n for kwargs in OVERRIDE_ATTRS[override].get(\"overlay_kwargs\", []):\r\n etl.transform(\r\n arcetl.attributes.update_by_value,\r\n dataset_where_sql=OVERRIDE_ATTRS[override].get(\"where_sql\"),\r\n **kwargs\r\n )\r\n # Clean overlay values.\r\n transform.clean_whitespace(\r\n etl, field_names=[\"police_beat\", \"wetland\", \"ctract\", \"blockgr\", \"neighbor\"]\r\n )\r\n transform.force_uppercase(etl, field_names=[\"cats\", \"ltddist\", \"ltdridesrc\"])\r\n # Set default overlay values where missing.\r\n transform.force_yn(\r\n etl,\r\n field_names=[\"greenwy\", \"sprsvcbndy\", \"cats\", \"ltddist\", \"ltdridesrc\"],\r\n default=\"N\",\r\n )\r\n # Remove invalid overlay values.\r\n transform.clear_nonpositive(etl, field_names=[\"ctract\", \"blockgr\"])\r\n etl.transform(\r\n arcetl.attributes.update_by_function,\r\n field_name=\"neighbor\",\r\n function=(lambda x: x if x and int(x) != 99 else None),\r\n )\r\n # Assign joinable field values after overlays.\r\n join_kwargs = [\r\n # Core attributes.\r\n {\r\n \"field_name\": \"pre_direction\",\r\n \"join_field_name\": \"description\",\r\n \"join_dataset_path\": dataset.STREET_DIRECTION.path(),\r\n \"on_field_pairs\": [(\"pre_direction_code\", \"code\")],\r\n },\r\n {\r\n \"field_name\": \"street_type\",\r\n \"join_field_name\": \"description\",\r\n \"join_dataset_path\": dataset.STREET_TYPE.path(),\r\n \"on_field_pairs\": [(\"street_type_code\", \"code\")],\r\n },\r\n {\r\n \"field_name\": \"unit_type\",\r\n \"join_field_name\": \"description\",\r\n \"join_dataset_path\": dataset.UNIT_TYPE.path(),\r\n \"on_field_pairs\": [(\"unit_type_code\", \"code\")],\r\n },\r\n {\r\n \"field_name\": \"city_name_abbr\",\r\n \"join_field_name\": \"CityNameAbbr\",\r\n \"join_dataset_path\": dataset.CITY.path(),\r\n \"on_field_pairs\": [(\"city_name\", \"CityName\")],\r\n },\r\n # Extended attributes.\r\n {\r\n \"field_name\": \"five_digit_zip_code\",\r\n \"join_field_name\": \"zip_code\",\r\n \"join_dataset_path\": dataset.ADDRESS_POSTAL_INFO.path(),\r\n \"on_field_pairs\": [(\"geofeat_id\", \"geofeat_id\")],\r\n },\r\n # Any addresses not assigned zip from USPS gets an overlay zip.\r\n {\r\n \"field_name\": \"five_digit_zip_code\",\r\n \"dataset_where_sql\": \"five_digit_zip_code is null\",\r\n \"join_field_name\": \"zip_code_overlay\",\r\n \"join_dataset_path\": dataset.ADDRESS_POSTAL_INFO.path(),\r\n \"on_field_pairs\": [(\"geofeat_id\", \"geofeat_id\")],\r\n },\r\n {\r\n \"field_name\": \"four_digit_zip_code\",\r\n \"join_field_name\": \"plus_four_code\",\r\n \"join_dataset_path\": dataset.ADDRESS_POSTAL_INFO.path(),\r\n \"on_field_pairs\": [(\"geofeat_id\", \"geofeat_id\")],\r\n },\r\n {\r\n \"field_name\": \"usps_delivery_point_code\",\r\n \"join_field_name\": \"delivery_point_code\",\r\n \"join_dataset_path\": dataset.ADDRESS_POSTAL_INFO.path(),\r\n \"on_field_pairs\": [(\"geofeat_id\", \"geofeat_id\")],\r\n },\r\n {\r\n \"field_name\": \"postal_carrier_route\",\r\n \"join_field_name\": \"carrier_route\",\r\n \"join_dataset_path\": dataset.ADDRESS_POSTAL_INFO.path(),\r\n \"on_field_pairs\": [(\"geofeat_id\", \"geofeat_id\")],\r\n },\r\n {\r\n \"field_name\": \"usps_is_cmra\",\r\n \"join_field_name\": \"is_cmra\",\r\n \"join_dataset_path\": dataset.ADDRESS_POSTAL_INFO.path(),\r\n \"on_field_pairs\": [(\"geofeat_id\", \"geofeat_id\")],\r\n },\r\n {\r\n \"field_name\": \"usps_is_vacant\",\r\n \"join_field_name\": \"is_vacant\",\r\n \"join_dataset_path\": dataset.ADDRESS_POSTAL_INFO.path(),\r\n \"on_field_pairs\": [(\"geofeat_id\", \"geofeat_id\")],\r\n },\r\n {\r\n \"field_name\": \"usps_has_mail_service\",\r\n \"join_field_name\": \"has_mail_service\",\r\n \"join_dataset_path\": dataset.ADDRESS_POSTAL_INFO.path(),\r\n \"on_field_pairs\": [(\"geofeat_id\", \"geofeat_id\")],\r\n },\r\n {\r\n \"field_name\": \"landuse_desc\",\r\n \"join_field_name\": \"ludesc\",\r\n \"join_dataset_path\": dataset.LAND_USE_CODES_DETAILED.path(\"pub\"),\r\n \"on_field_pairs\": [(\"landuse\", \"landusec\")],\r\n },\r\n {\r\n \"field_name\": \"usecode\",\r\n \"join_field_name\": \"usecode\",\r\n \"join_dataset_path\": dataset.LAND_USE_CODES_DETAILED.path(\"pub\"),\r\n \"on_field_pairs\": [(\"landuse\", \"landusec\")],\r\n },\r\n {\r\n \"field_name\": \"usedesc\",\r\n \"join_field_name\": \"ucname\",\r\n \"join_dataset_path\": dataset.LAND_USE_CODES_USE_CODES.path(\"pub\"),\r\n \"on_field_pairs\": [(\"usecode\", \"usecode\")],\r\n },\r\n # A&T attributes.\r\n {\r\n \"field_name\": \"tca\",\r\n \"join_field_name\": \"tax_code_overlay\",\r\n \"join_dataset_path\": dataset.ADDRESS_ASSESS_TAX_INFO.path(),\r\n \"on_field_pairs\": [(\"geofeat_id\", \"geofeat_id\")],\r\n },\r\n # City attributes.\r\n {\r\n \"field_name\": \"geocity_name\",\r\n \"join_field_name\": \"inccityname\",\r\n \"join_dataset_path\": dataset.INCORPORATED_CITY_LIMITS.path(),\r\n \"on_field_pairs\": [(\"geocity\", \"inccityabbr\")],\r\n },\r\n {\r\n \"field_name\": \"ugb_city_name\",\r\n \"join_field_name\": \"ugbcityname\",\r\n \"join_dataset_path\": dataset.UGB.path(\"pub\"),\r\n \"on_field_pairs\": [(\"ugb\", \"ugbcity\")],\r\n },\r\n # Planning & zoning attributes.\r\n {\r\n \"field_name\": \"nodaldev_name\",\r\n \"join_field_name\": \"nodename\",\r\n \"join_dataset_path\": dataset.NODAL_DEVELOPMENT_AREA.path(\"pub\"),\r\n \"on_field_pairs\": [(\"nodaldev\", \"nodearea\")],\r\n },\r\n {\r\n \"field_name\": \"plandesjuris\",\r\n \"join_field_name\": \"planjuris\",\r\n \"join_dataset_path\": dataset.PLAN_DESIGNATION.path(\"pub\"),\r\n \"on_field_pairs\": [(\"plandes_id\", \"plandes_id\")],\r\n },\r\n {\r\n \"field_name\": \"plandes\",\r\n \"join_field_name\": \"plandes\",\r\n \"join_dataset_path\": dataset.PLAN_DESIGNATION.path(\"pub\"),\r\n \"on_field_pairs\": [(\"plandes_id\", \"plandes_id\")],\r\n },\r\n {\r\n \"field_name\": \"plandesdesc\",\r\n \"join_field_name\": \"plandesnam\",\r\n \"join_dataset_path\": dataset.PLAN_DESIGNATION.path(\"pub\"),\r\n \"on_field_pairs\": [(\"plandes_id\", \"plandes_id\")],\r\n },\r\n # Public safety attributes.\r\n {\r\n \"field_name\": \"ambulance_service_area\",\r\n \"join_field_name\": \"asa\",\r\n \"join_dataset_path\": dataset.AMBULANCE_SERVICE_AREA.path(\"pub\"),\r\n \"on_field_pairs\": [(\"ambulance_district\", \"asacode\")],\r\n },\r\n {\r\n \"field_name\": \"ambulance_service_provider\",\r\n \"join_field_name\": \"provider\",\r\n \"join_dataset_path\": dataset.AMBULANCE_SERVICE_AREA.path(\"pub\"),\r\n \"on_field_pairs\": [(\"ambulance_district\", \"asacode\")],\r\n },\r\n {\r\n \"field_name\": \"fire_protection_provider\",\r\n \"join_field_name\": \"fpprovname\",\r\n \"join_dataset_path\": dataset.FIRE_PROTECTION_AREA.path(\"pub\"),\r\n \"on_field_pairs\": [(\"firedist\", \"fireprotprov\")],\r\n },\r\n {\r\n \"field_name\": \"psap_name\",\r\n \"join_field_name\": \"psap_name\",\r\n \"join_dataset_path\": dataset.PSAP_AREA.path(\"pub\"),\r\n \"on_field_pairs\": [(\"psap_code\", \"psap_code\")],\r\n },\r\n {\r\n \"field_name\": \"emergency_service_number\",\r\n \"join_field_name\": \"emergency_service_number\",\r\n \"join_dataset_path\": dataset.EMERGENCY_SERVICE_NUMBER.path(),\r\n \"on_field_pairs\": [\r\n # City used as proxy for police.\r\n (\"geocity\", \"city_limits\"),\r\n (\"ambulance_district\", \"asa_code\"),\r\n (\"firedist\", \"fire_district\"),\r\n (\"psap_code\", \"psap_code\")\r\n ],\r\n },\r\n {\r\n \"field_name\": \"emergency_service_number\",\r\n \"join_field_name\": \"emergency_service_number\",\r\n \"join_dataset_path\": dataset.EMERGENCY_SERVICE_NUMBER.path(),\r\n \"on_field_pairs\": [\r\n # City used as proxy for police.\r\n (\"geocity\", \"city_limits\"),\r\n (\"ambulance_district\", \"asa_code\"),\r\n (\"firedist\", \"fire_district\"),\r\n ],\r\n \"dataset_where_sql\": \"emergency_service_number is null\",\r\n },\r\n # Election attributes.\r\n {\r\n \"field_name\": \"city_councilor\",\r\n \"join_field_name\": \"councilor\",\r\n \"join_dataset_path\": dataset.CITY_WARD.path(),\r\n \"on_field_pairs\": [(\"ccward\", \"ward\")],\r\n },\r\n {\r\n \"field_name\": \"cocommdist_name\",\r\n \"join_field_name\": \"cmdistname\",\r\n \"join_dataset_path\": dataset.COUNTY_COMMISSIONER_DISTRICT.path(\"pub\"),\r\n \"on_field_pairs\": [(\"cocommdist\", \"commrdist\")],\r\n },\r\n {\r\n \"field_name\": \"county_commissioner\",\r\n \"join_field_name\": \"commrname\",\r\n \"join_dataset_path\": dataset.COUNTY_COMMISSIONER_DISTRICT.path(\"pub\"),\r\n \"on_field_pairs\": [(\"cocommdist\", \"commrdist\")],\r\n },\r\n {\r\n \"field_name\": \"eweb_commissioner_name\",\r\n \"join_field_name\": \"eweb_commissioner_name\",\r\n \"join_dataset_path\": dataset.EWEB_COMMISSIONER.path(\"pub\"),\r\n \"on_field_pairs\": [(\"ccward\", \"city_council_ward\")],\r\n },\r\n {\r\n \"field_name\": \"state_representative\",\r\n \"join_field_name\": \"repname\",\r\n \"join_dataset_path\": dataset.STATE_REPRESENTATIVE_DISTRICT.path(\"pub\"),\r\n \"on_field_pairs\": [(\"strepdist\", \"repdist\")],\r\n },\r\n {\r\n \"field_name\": \"state_senator\",\r\n \"join_field_name\": \"senname\",\r\n \"join_dataset_path\": dataset.STATE_SENATOR_DISTRICT.path(\"pub\"),\r\n \"on_field_pairs\": [(\"senatedist\", \"sendist\")],\r\n },\r\n # Education attributes.\r\n {\r\n \"field_name\": \"schooldist_name\",\r\n \"join_field_name\": \"names\",\r\n \"join_dataset_path\": dataset.SCHOOL_DISTRICT.path(\"pub\"),\r\n \"on_field_pairs\": [(\"schooldist\", \"district\")],\r\n },\r\n {\r\n \"field_name\": \"elem_name\",\r\n \"join_field_name\": \"elem_school\",\r\n \"join_dataset_path\": dataset.ELEMENTARY_SCHOOL_AREA.path(\"pub\"),\r\n \"on_field_pairs\": [(\"elem\", \"attend\")],\r\n },\r\n {\r\n \"field_name\": \"middle_name\",\r\n \"join_field_name\": \"middle_school\",\r\n \"join_dataset_path\": dataset.MIDDLE_SCHOOL_AREA.path(\"pub\"),\r\n \"on_field_pairs\": [(\"middle\", \"attend\")],\r\n },\r\n {\r\n \"field_name\": \"high_name\",\r\n \"join_field_name\": \"high_school\",\r\n \"join_dataset_path\": dataset.HIGH_SCHOOL_AREA.path(\"pub\"),\r\n \"on_field_pairs\": [(\"high\", \"attend\")],\r\n },\r\n # Natural attributes.\r\n {\r\n \"field_name\": \"firmprinted\",\r\n \"join_field_name\": \"panel_printed\",\r\n \"join_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA, \"natural\\\\flood\\\\Flood.gdb\\\\FIRMPanel\"\r\n ),\r\n \"on_field_pairs\": [(\"firmnumber\", \"firm_pan\")],\r\n },\r\n {\r\n \"field_name\": \"firm_community_id\",\r\n \"join_field_name\": \"com_nfo_id\",\r\n \"join_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA, \"natural\\\\flood\\\\Flood.gdb\\\\CommunityInfo\"\r\n ),\r\n \"on_field_pairs\": [(\"geocity\", \"community_code\")],\r\n },\r\n {\r\n \"field_name\": \"firm_community_post_firm_date\",\r\n \"join_field_name\": \"in_frm_dat\",\r\n \"join_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA, \"natural\\\\flood\\\\Flood.gdb\\\\CommunityInfo\"\r\n ),\r\n \"on_field_pairs\": [(\"geocity\", \"community_code\")],\r\n },\r\n {\r\n \"field_name\": \"soiltype\",\r\n \"join_field_name\": \"musym\",\r\n \"join_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA, \"natural\\\\soils\\\\Soils.gdb\\\\MUAggAtt\"\r\n ),\r\n \"on_field_pairs\": [(\"soilkey\", \"mukey\")],\r\n },\r\n # Other district attributes.\r\n {\r\n \"field_name\": \"neighborhood_name\",\r\n \"join_field_name\": \"NAME\",\r\n \"join_dataset_path\": os.path.join(\r\n path.REGIONAL_DATA,\r\n \"boundary\\\\districts\\\\eug\\\\Boundary.gdb\\\\EugNeighborhoods\",\r\n ),\r\n \"on_field_pairs\": [(\"neighbor\", \"NEIBORHD\")],\r\n },\r\n ]\r\n for kwargs in join_kwargs:\r\n etl.transform(arcetl.attributes.update_by_joined_value, **kwargs)\r\n # Clean join values.\r\n transform.clean_whitespace(etl, field_names=[\"neighborhood_name\"])\r\n # Remove Metro Plan designations, per City of Eugene request.\r\n transform.clear_all_values(\r\n etl,\r\n field_names=[\"plandes\", \"plandesdesc\"],\r\n dataset_where_sql=\"plandesjuris = 'MTP'\",\r\n )\r\n # Remove +4 ZIP where initial ZIP is missing.\r\n transform.clear_all_values(\r\n etl,\r\n field_names=[\"four_digit_zip_code\"],\r\n dataset_where_sql=\"five_digit_zip_code is null\",\r\n )\r\n # Assign constants.\r\n constant_kwargs = [\r\n {\"field_name\": \"state_code\", \"value\": \"OR\"},\r\n {\"field_name\": \"state_name\", \"value\": \"Oregon\"},\r\n {\"field_name\": \"county_name\", \"value\": \"Lane\"},\r\n ]\r\n for kwargs in constant_kwargs:\r\n etl.transform(arcetl.attributes.update_by_value, **kwargs)\r\n # Override constants for special cases.\r\n for override in OVERRIDE_ATTRS:\r\n for kwargs in OVERRIDE_ATTRS[override].get(\"constant_kwargs\", []):\r\n etl.transform(\r\n arcetl.attributes.update_by_value,\r\n dataset_where_sql=OVERRIDE_ATTRS[override].get(\"where_sql\"),\r\n **kwargs\r\n )\r\n # Build values from functions.\r\n function_kwargs = [\r\n {\r\n \"field_name\": \"street_name_full\",\r\n \"function\": concatenate_arguments,\r\n \"arg_field_names\": [\r\n \"pre_direction_code\",\r\n \"street_name\",\r\n \"street_type_code\",\r\n ],\r\n },\r\n {\r\n \"field_name\": \"city_state_zip\",\r\n \"function\": city_state_zip,\r\n \"kwarg_field_names\": [\"city_name\", \"state_code\", \"five_digit_zip_code\"],\r\n },\r\n {\r\n \"field_name\": \"concat_address_no_unit\",\r\n \"function\": concatenate_arguments,\r\n \"arg_field_names\": [\r\n \"house_nbr\",\r\n \"house_suffix_code\",\r\n \"street_name_full\",\r\n ],\r\n },\r\n {\r\n \"field_name\": \"concat_address\",\r\n \"function\": concatenate_arguments,\r\n \"arg_field_names\": [\r\n \"concat_address_no_unit\",\r\n \"unit_type_code\",\r\n \"unit_id\",\r\n ],\r\n },\r\n {\r\n \"field_name\": \"concat_address_no_direction\",\r\n \"function\": concatenate_arguments,\r\n \"arg_field_names\": [\r\n \"house_nbr\",\r\n \"house_suffix_code\",\r\n \"street_name\",\r\n \"street_type_code\",\r\n \"unit_type_code\",\r\n \"unit_id\",\r\n ],\r\n },\r\n {\r\n \"field_name\": \"concat_address_full\",\r\n \"function\": concat_address_full,\r\n \"kwarg_field_names\": [\r\n \"concat_address\",\r\n \"city_name\",\r\n \"state_code\",\r\n \"five_digit_zip_code\",\r\n \"four_digit_zip_code\",\r\n ],\r\n },\r\n {\r\n \"field_name\": \"mapnumber\",\r\n \"function\": (lambda x: x[:8] if x else None),\r\n \"arg_field_names\": [\"maptaxlot\"],\r\n },\r\n {\r\n \"field_name\": \"taxlot\",\r\n \"function\": (lambda x: x[-5:] if x else None),\r\n \"arg_field_names\": [\"maptaxlot\"],\r\n },\r\n {\r\n \"field_name\": \"maptaxlot_hyphen\",\r\n \"function\": maptaxlot_separated,\r\n \"arg_field_names\": [\"maptaxlot\"],\r\n },\r\n ]\r\n for kwargs in function_kwargs:\r\n etl.transform(\r\n arcetl.attributes.update_by_function, field_as_first_arg=False, **kwargs\r\n )\r\n # Take care of addresses flagged not to update in publication.\r\n ids = {}\r\n id_set_kwargs = {\r\n \"in_publication\": {\"dataset_path\": dataset.SITE_ADDRESS.path(\"pub\")},\r\n \"in_transform\": {\"dataset_path\": etl.transform_path},\r\n \"no_update\": {\r\n \"dataset_path\": dataset.ADDRESS_ISSUES.path(),\r\n \"dataset_where_sql\": \"update_publication = 0\",\r\n },\r\n }\r\n for key, kwargs in id_set_kwargs.items():\r\n ids[key] = set(\r\n _id\r\n for _id, in arcetl.attributes.as_iters(\r\n field_names=\"site_address_gfid\", **kwargs\r\n )\r\n )\r\n ids[\"rollback\"] = ids[\"no_update\"] & ids[\"in_transform\"] & ids[\"in_publication\"]\r\n ids[\"hold\"] = ids[\"no_update\"] & (ids[\"in_transform\"] - ids[\"in_publication\"])\r\n rollback_features = [\r\n feat\r\n for feat in arcetl.attributes.as_dicts(dataset.SITE_ADDRESS.path(\"pub\"))\r\n if feat[\"site_address_gfid\"] in ids[\"rollback\"]\r\n ]\r\n # Strip OIDs (not part of update).\r\n for feat in rollback_features:\r\n del feat[\"oid@\"]\r\n if rollback_features:\r\n etl.transform(\r\n arcetl.features.update_from_dicts,\r\n update_features=rollback_features,\r\n id_field_names=\"site_address_gfid\",\r\n field_names=rollback_features[0].keys(),\r\n delete_missing_features=False,\r\n )\r\n etl.transform(\r\n arcetl.features.delete_by_id,\r\n delete_ids=ids[\"hold\"],\r\n id_field_names=\"site_address_gfid\",\r\n )\r\n LOG.info(\"%s addresses held from publication\", len(ids[\"hold\"]))\r\n LOG.info(\"%s addresses rolled-back from publication\", len(ids[\"rollback\"]))\r\n if any([ids[\"hold\"], ids[\"rollback\"]]):\r\n send_publication_issues_message()\r\n etl.load(dataset.SITE_ADDRESS.path(\"pub\"))\r\n send_new_lincom_address_message()",
"def _prepare_geocode_result(results):\n # Prepare the data for the DataFrame as a dict of lists\n d = defaultdict(list)\n index = []\n\n for i, s in iteritems(results):\n address, loc = s\n\n # loc is lat, lon and we want lon, lat\n if loc is None:\n p = Point()\n else:\n p = Point(loc[1], loc[0])\n\n if address is None:\n address = np.nan\n\n d['geometry'].append(p)\n d['address'].append(address)\n index.append(i)\n\n df = gpd.GeoDataFrame(d, index=index)\n df.crs = from_epsg(4326)\n\n return df",
"def formatAddress():\n # Strings to load data\n stringFile = '/Users/Louis/Documents/Research/Code/cleanedData/'\n days = {'cleaned01-Dec-2015':2,# tuesday\n 'cleaned02-Dec-2015':3,# wednesday\n 'cleaned03-Dec-2015':4,# ...\n 'cleaned04-Dec-2015':5,\n 'cleaned07-Dec-2015':1,\n 'cleaned08-Dec-2015':2,\n 'cleaned09-Dec-2015':3,\n 'cleaned10-Dec-2015':4,\n 'cleaned11-Dec-2015':5,\n 'cleaned14-Dec-2015':1,\n 'cleaned15-Dec-2015':2,\n 'cleaned16-Dec-2015':3,\n 'cleaned17-Dec-2015':4,\n 'cleaned18-Dec-2015':5,\n 'cleaned21-Dec-2015':1}\n \n # Store results\n addresses = []\n CourierSuppliedAddresses = []\n \n for day in days.keys():\n # Configuration for CSV reading\n with open(stringFile+day+'_modified.csv') as csvfile:\n # Dictionary containing the info\n reader = csv.DictReader(csvfile,delimiter = ',')\n # print(day)\n \n for row in reader:\n addresses.append(row['Address'])\n CourierSuppliedAddresses.append(row['CourierSuppliedAddress'])\n \n addresses = list(set(addresses))\n addresses.sort()\n \n CourierSuppliedAddresses = list(set(CourierSuppliedAddresses))\n CourierSuppliedAddresses.sort()\n return addresses, CourierSuppliedAddresses",
"def get_address() -> pd.DataFrame:\n return GETTER.organisationaddress.merge(GETTER.address, on=\"address_id\").drop(\n \"address_id\", 1\n )",
"def pandas_address_view(base_directory, filter_to_locality=None):\n\n # Define the paths required\n street_locality_file = os.path.join(\n base_directory, 'Standard', 'SA_STREET_LOCALITY_psv.psv')\n address_detail_file = os.path.join(\n base_directory, 'Standard', 'SA_ADDRESS_DETAIL_psv.psv')\n address_default_geocode_file = os.path.join(\n base_directory, 'Standard', 'SA_ADDRESS_DEFAULT_GEOCODE_psv.psv')\n\n # Load the data\n #\n # Only keep these columns as things like the creation date aren't needed.\n street_locality_columns = [\n \"STREET_LOCALITY_PID\", \"STREET_CLASS_CODE\", \"STREET_NAME\",\n 'STREET_TYPE_CODE', 'STREET_SUFFIX_CODE',\n ]\n\n address_detail_columns_to_ignore = {\n 'DATE_CREATED', 'DATE_LAST_MODIFIED', 'DATE_RETIRED', 'GNAF_PROPERTY_PID',\n }\n\n geocode_columns = [\n 'ADDRESS_DETAIL_PID', 'LONGITUDE', 'LATITUDE',\n # GEOCODE_TYPE_CODE helps identifier where it refers to.\n ]\n\n def should_keep_address_detail_column(column):\n return column not in address_detail_columns_to_ignore\n\n street_locality = pandas.read_csv(street_locality_file,\n sep='|',\n usecols=street_locality_columns)\n address_detail = pandas.read_csv(address_detail_file,\n sep='|',\n dtype={\n 'BUILDING_NAME': str,\n 'NUMBER_FIRST': str,\n 'NUMBER_FIRST_SUFFIX': str,\n },\n keep_default_na=False,\n usecols=should_keep_address_detail_column)\n address_geocode = pandas.read_csv(address_default_geocode_file,\n sep='|',\n usecols=geocode_columns)\n\n if filter_to_locality:\n # Filter address detail down to a specific locality\n address_detail = address_detail.loc[\n address_detail['LOCALITY_PID'] == filter_to_locality]\n\n merged = address_detail.join(\n street_locality.set_index('STREET_LOCALITY_PID'),\n on='STREET_LOCALITY_PID',\n lsuffix='_address', rsuffix='_street')\n\n merged = merged.join(\n address_geocode.set_index('ADDRESS_DETAIL_PID'),\n on='ADDRESS_DETAIL_PID',\n rsuffix='_geocode')\n\n return merged",
"def create_locs(address):\r\n geolocator = Nominatim(user_agent = 'SF_Parking_EDA')\r\n try:\r\n location = geolocator.geocode(address, timeout = 10)\r\n except:\r\n location = None\r\n time.sleep(1)\r\n\r\n if location != None and check_location(location):\r\n return (location.latitude, location.longitude )\r\n else:\r\n return None",
"def forward_geocode(self, params, address_input_data ):\n processed_address_list = []\n # check avoids redundancy for combined 'forward geocode and validate' \n # option as API does both by default\n if self.__is_address_list_processed:\n processed_address_list = address_input_data\n else:\n request_list = self.__prepare_smarty_request_list(address_input_data)\n processed_address_list = self.__process_smarty_request_list(request_list, \n address_input_data )\n self.__is_address_list_processed = True\n print(f'< {self.num_addresses_processed} addresses processed >')\n return processed_address_list",
"def partition_geocode(con: sqlite3.Connection, cur: sqlite3.Cursor, quarter: str, county_cht: str):\n cur.execute('''SELECT 土地區段位置或建物區門牌 FROM \"{0}/TRX\"\n WHERE 縣市 = ?\n GROUP BY 土地區段位置或建物區門牌;'''.format(quarter), (county_cht,))\n for address, in cur.fetchall():\n cur.execute('''SELECT GEO.編號\n FROM \"{0}/TRX\" AS TRX, \"{0}/GEO\" AS GEO\n WHERE TRX.編號 = GEO.編號\n AND TRX.土地區段位置或建物區門牌 = ?\n AND GEO.LAT_Avg ISNULL;'''.format(quarter), (address,))\n identities = cur.fetchall()\n if not identities:\n continue\n print(\"[%d] \"%(len(identities)) + address)\n try:\n results = selective_geocode(address)\n except geo.AddressError:\n continue\n if len(results[\"lat\"]) != 5 or len(results[\"lon\"]) != 5:\n continue\n results[\"lat\"].append(sum(results[\"lat\"]) / len(results[\"lat\"]))\n results[\"lon\"].append(sum(results[\"lon\"]) / len(results[\"lon\"]))\n combined = [num for zipped in zip(results[\"lat\"], results[\"lon\"]) for num in zipped]\n values = [(tuple(combined) + identity) for identity in identities]\n cur.executemany('''UPDATE \"{0}/GEO\" SET\n LAT_1 = ?, LON_1 = ?,\n LAT_2 = ?, LON_2 = ?,\n LAT_3 = ?, LON_3 = ?,\n LAT_4 = ?, LON_4 = ?,\n LAT_5 = ?, LON_5 = ?,\n LAT_Avg = ?, LON_Avg = ?\n WHERE 編號 = ?;'''.format(quarter), values)\n con.commit()",
"def get_coordinates(table, replace_columns=False, remove_nans=False):\n assert \"zip code\" in table.labels or ((\"city\" in table.labels or \"county\" in table.labels) and \"state\" in table.labels)\n ref = Table.read_table(pkg_resources.resource_filename(__name__, \"geodata/geocode_states.csv\"))\n\n index_name = \"\".join(table.labels) # Ensures that index can't possibly be one of the preexisting columns\n index_name += \" \"\n \n table = table.with_columns(index_name, np.arange(table.num_rows))\n lat = np.array([np.nan] * table.num_rows)\n lon = np.array([np.nan] * table.num_rows)\n unassigned = set(range(table.num_rows)) \n while len(unassigned) > 0:\n index = unassigned.pop()\n row = table.take(index).take(0)\n if \"zip code\" in table.labels:\n select = table.where(\"zip code\", row[\"zip code\"][0]).column(index_name)\n unassigned -= set(select)\n try:\n ref_lat, ref_lon = ref.where(\"zip\", int(row[\"zip code\"][0])).select(\"lat\", \"lon\").row(0)\n lat[select] = ref_lat\n lon[select] = ref_lon\n except IndexError:\n pass\n else:\n state_select = table.where(\"state\", row[\"state\"][0]).column(index_name)\n county_select = table.where(\"county\", row[\"county\"][0]).column(index_name) if \"county\" in table.labels else np.arange(table.num_rows)\n city_select = table.where(\"city\", row[\"city\"][0]).column(index_name) if \"city\" in table.labels else np.arange(table.num_rows)\n select = set.intersection(set(state_select), set(county_select), set(city_select))\n unassigned -= select\n select = list(select)\n try:\n matched_ref = ref.where(\"state\", row[\"state\"][0])\n if \"county\" in table.labels:\n matched_ref = matched_ref.where(\"county\", row[\"county\"][0].lower())\n if \"city\" in table.labels:\n matched_ref = matched_ref.where(\"city\", row[\"city\"][0].lower())\n ref_lat, ref_lon = matched_ref.select(\"lat\", \"lon\").row(0)\n lat[select] = ref_lat\n lon[select] = ref_lon\n except IndexError:\n pass\n table = table.with_columns(\"lat\", lat, \"lon\", lon)\n table = table.drop(index_name)\n if replace_columns:\n for label in [\"county\", \"city\", \"zip code\", \"state\"]:\n try:\n table = table.drop(label)\n except KeyError:\n pass\n if remove_nans: \n table = table.where(\"lat\", are.below(float(\"inf\"))) # NaNs are not considered to be smaller than infinity\n return table",
"def suggestions(self, input, borough_code=None):\n parsed = parser.address(input)\n if borough_code:\n parsed['BOROUGH_CODE'] = borough_code\n self.similiar_names = []\n self.results = []\n if parsed['PHN'] and parsed['STREET']:\n if not parsed['BOROUGH_CODE'] and not parsed['ZIP']:\n # iterate borocodes\n for x in range(1, 6):\n self._geocode(phn=parsed['PHN'], street=parsed['STREET'], borough_code=x)\n # try address with borough code if present\n elif parsed['BOROUGH_CODE']:\n self._geocode(phn=parsed['PHN'], street=parsed['STREET'], borough_code=parsed['BOROUGH_CODE'])\n # try address with zip code if present\n elif parsed['ZIP']:\n self._geocode(phn=parsed['PHN'], street=parsed['STREET'], zip=parsed['ZIP'])\n # validate and retrieve any addresses\n if len(self.similiar_names):\n for name in self.similiar_names:\n self._geocode(phn=parsed['PHN'], street=name['street'], borough_code=name['borough_code'])\n if None in self.results:\n self.results = list(filter(lambda v: v is not None, self.results))\n\n return self.results",
"def Process_address(self):\n\n self.data = np.load(self.cache_path, allow_pickle=True)[()]\n\n ID = self.data.keys()\n\n cnt = 0\n for id in tqdm(ID):\n if not 'address' in self.data[id] and 'post' in self.data[id]:\n self.data[id]['address'] = ''\n self.data[id]['location'] = ''\n Q = self.Query_baidu(self.data[id]['post'])\n self.data[id]['address'] = Q['address']\n self.data[id]['location'] = Q['location']\n cnt += 1\n\n print(\"Query %d info\" % cnt)\n\n np.save(self.cache_path, self.data)",
"def GenerateAddressBook(self, node):\n if not self.addressbook:\n return\n addrs = Tree('addresses')\n addrs.AddParent(node)\n index=0\n for zone in self.addressbook:\n # building individual addresses\n groups = sorted(self.addressbook[zone])\n for group in groups:\n ips = nacaddr.SortAddrList(self.addressbook[zone][group])\n ips = nacaddr.CollapseAddrList(ips)\n self.addressbook[zone][group] = ips\n count = index + 0\n for address in self.addressbook[zone][group]:\n prefix_type = 'ipv4-prefix '\n if isinstance( address, nacaddr.IPv6):\n prefix_type = 'ipv6-prefix '\n addr_list = Tree('address'+' _' + group, prefix_type +\n ' ' + str(address) + ';')\n addr_list.AddParent(addrs)\n count += 1\n index += count\n\n addr_groups=Tree('address-groups')\n addr_groups.AddParent(node)\n for zone in self.addressbook:\n # building address-sets\n addrlist = ''\n for group in self.addressbook[zone]:\n addrlist = addrlist + '_' + group + ' '\n group_t=Tree('group ' + zone, 'address-list [ ' + addrlist + '];')\n group_t.AddParent(addr_groups)",
"def createAddressSet(self) -> ghidra.program.model.address.AddressSet:\n ...",
"def _set_addresses(self):\n if self._report_key in (ReportTypes.SEARCH_DETAIL_REPORT, ReportTypes.SEARCH_BODY_REPORT) and \\\n self._report_data['totalResultsSize'] > 0:\n self._set_search_addresses()\n elif self._report_key in (ReportTypes.MHR_REGISTRATION, ReportTypes.MHR_TRANSFER,\n ReportTypes.MHR_EXEMPTION, ReportTypes.MHR_TRANSPORT_PERMIT, ReportTypes.MHR_NOTE):\n self._set_registration_addresses()",
"def geotransform(street_address_column, borough_column, zip_code_column, in_csv_file_loc, out_csv_file_loc):\r\n with open(out_csv_file_loc, 'wb') as csv_new_file:\r\n fieldnames = ['2010 Census Block',\r\n '2010 Census Block Suffix',\r\n '2010 Census Tract',\r\n 'Assembly District',\r\n 'Atomic Polygon',\r\n 'B10SC First Borough and Street Code',\r\n 'Bike Lane',\r\n 'Borough Block Lot (BBL)',\r\n 'Building Identification Number (BIN) of Input Address or NAP',\r\n 'City Council District',\r\n 'Community District',\r\n 'Community School District',\r\n 'Congressional District',\r\n 'DSNY Snow Priority Code',\r\n 'Election District',\r\n 'First Borough Name',\r\n 'House Number Display Format',\r\n 'House Number Sort Format',\r\n 'Hurricane Evacuation Zone (HEZ)',\r\n 'Message',\r\n 'NTA Name',\r\n 'Neighborhood Tabulation Area (NTA)',\r\n 'Police Precinct',\r\n 'Roadway Type',\r\n 'Second Street Name Normalized',\r\n 'Spatial Coordinates of Segment',\r\n 'State Senatorial District',\r\n 'USPS Preferred City Name',\r\n 'X-Y Coordinates of Lot Centroid',\r\n 'Zip Code',\r\n 'Latitude',\r\n 'Longitude',\r\n 'Spatial X',\r\n 'Spatial Y']\r\n writer = csv.DictWriter(csv_new_file, fieldnames=fieldnames)\r\n writer.writeheader()\r\n \r\n with open(in_csv_file_loc, 'rb') as csvfile:\r\n csvreader = csv.DictReader(csvfile, delimiter = ',')\r\n for row in csvreader:\r\n full_address = row[street_address_column].strip()\r\n split_full_address = full_address.split(' ')\r\n house_number = split_full_address[0]\r\n borough = row[borough_column].strip()\r\n boro_code = borough_transform(borough)\r\n zip_code = row[zip_code_column].strip()\r\n street_name = ' '.join(split_full_address[1:])\r\n \r\n (wa1, wa2) = geo_coder(house_number, boro_code, street_name, zip_code)\r\n \r\n output = Parser(wa1, wa2)\r\n \r\n writer.writerow(output)"
] | [
"0.69380814",
"0.63339984",
"0.61368567",
"0.60685796",
"0.6055097",
"0.6045649",
"0.60010093",
"0.5977058",
"0.5975706",
"0.59695",
"0.59433144",
"0.5934059",
"0.5919156",
"0.5903649",
"0.5892241",
"0.5878959",
"0.5866598",
"0.5815604",
"0.5803367",
"0.57603157",
"0.5752638",
"0.5745259",
"0.57299316",
"0.5726765",
"0.57263255",
"0.571738",
"0.5714102",
"0.571304",
"0.5704194",
"0.57018334"
] | 0.74458176 | 0 |
Build the facilities_blockgroups crosswalk file to assign facilities to blockgroups. | def build_block_cross(self):
from ambry.geo.util import find_geo_containment, find_containment
from geoid import civick
lr = self.init_log_rate(3000)
def gen_bound():
boundaries = self.library.dep('blockgroups').partition
# Note, ogc_fid is the primary key. The id column is created by the shapefile.
for i,boundary in enumerate(boundaries.query(
"SELECT AsText(geometry) AS wkt, gvid FROM blockgroups")):
lr('Load rtree')
yield i, boundary['wkt'] , boundary['gvid']
def gen_points():
for row in self.partitions.find(table = 'facilities_addresses').rows:
if row['longitude'] and row['latitude']:
yield (row['longitude'], row['latitude']), row['facilities_id']
p = self.partitions.find_or_new(table='facilities_geoids')
p.clean()
with p.inserter() as ins:
for point, point_o, cntr_geo, cntr_o in find_containment(gen_bound(),gen_points()):
blockgroup_gvid = civick.Blockgroup.parse(cntr_o)
tract_gvid = blockgroup_gvid.convert(civick.Tract)
county_gvid = blockgroup_gvid.convert(civick.County)
ins.insert(dict(facilities_id = point_o,
blockgroup_gvid = str(blockgroup_gvid),
tract_gvid = str(tract_gvid),
county_gvid = str(county_gvid)
))
lr('Marking point containment') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def task_output_block_groups():\n for dept in Department.list():\n yield {\n 'name': dept.name,\n 'file_dep': [dept.block_groups_path],\n 'targets': [dept.block_groups_output],\n 'actions': ['cp %(dependencies)s %(targets)s'],\n 'clean': True,\n }",
"def generate_test_blocks(self):\n def generate_blocks_for_floor(block_names, floor_name, data):\n item_dict = {}\n for name in block_names:\n item_dict['{}_{}'.format(name, floor_name)] = {\n 'name': name,\n 'floor': floor_name,\n **data,\n }\n return item_dict\n\n block_data = {\n 'pixels_to_m_x': 40,\n 'pixels_to_m_y': 40,\n 'floor_map': self.get_test_floor_map_image(),\n }\n self.bs_f0_l1_o1_dict = \\\n generate_blocks_for_floor(\n ['b1', 'b2', 'b3_del', 'b4_del', 'b5_del', 'b6_del'],\n 'f0_l1_o1',\n block_data)\n\n self.bs_f1_l1_o1_dict = \\\n generate_blocks_for_floor(\n ['b1', 'b2'],\n 'f1_l1_o1',\n block_data)\n\n self.bs_f0_l1_sub1_o1_dict = \\\n generate_blocks_for_floor(\n ['b1', 'b2', 'b3_del', 'b4_del', 'b5_del'],\n 'f0_l1_sub1_o1',\n block_data)\n\n self.bs_f0_l1_o2_dict = \\\n generate_blocks_for_floor(\n ['b1', 'b2_del', 'b3_del'],\n 'f0_l1_o2',\n block_data)\n\n self.bs_f0_l1_sub1_o2_dict = \\\n generate_blocks_for_floor(\n ['b1', 'b2_del', 'b3_del'],\n 'f0_l1_sub1_o2',\n block_data)\n\n self.bs_dict = {\n **self.bs_f0_l1_o1_dict,\n **self.bs_f1_l1_o1_dict,\n **self.bs_f0_l1_sub1_o1_dict,\n **self.bs_f0_l1_o2_dict,\n **self.bs_f0_l1_sub1_o2_dict\n }\n\n # generate blocks in database\n self.blocks = self.create_blocks_from_data(self.bs_dict, self.floors)",
"def build_frames_for_budgets(budgets=['180','300','600','default'], tools=['evosuite','randoop']):\n for budget in budgets:\n for tool in tools:\n print('* Budget {} for {}'.format(budget, tool))\n coverage = pd.read_csv('data/{}/{}-{}.csv'.format(budget, tool, budget))\n label = 'BranchCoverage' if 'BranchCoverage' in coverage.columns else 'branch_coverage'\n coverage = coverage[coverage[label] != 0]\n aux = merge(coverage)\n print('{} entries'.format(aux.shape[0]))\n aux.to_csv('data/{}/{}.csv'.format(budget, tool), index=False)",
"def _prepare_files(self, grouping_by):\n self.post_conf_dict = {}\n self.pre_conf_dict = {}\n main_folder = self.main_folder\n\n file_path = 'devlab/tests/groups_example.yaml'\n exmpl_file_path = os.path.join(main_folder, file_path)\n pre_conf = open(exmpl_file_path, 'r')\n self.pre_conf_dict = yaml.load(pre_conf)\n\n inst_id_list = []\n inst_3 = None\n for key in self.pre_conf_dict.keys():\n if key == 'user_defined_group_1':\n for val in self.pre_conf_dict[key]:\n for inst in self.src_vms:\n if inst['name'] == val:\n inst_id_list.append(inst['id'])\n elif key == 'user_defined_group_2':\n for inst in self.src_vms:\n if inst['name'] == self.pre_conf_dict[key][0]:\n inst_3 = inst['id']\n self.pre_conf_dict['group_by'] = [unicode(grouping_by)]\n self.pre_conf_dict['user_defined_group_1'] = inst_id_list\n self.pre_conf_dict['user_defined_group_2'] = [inst_3]\n self.new_file_name = 'test_file.yaml'\n file_to_write_into = os.path.join(os.getcwd(), self.new_file_name)\n with open(file_to_write_into, 'w') as stream:\n yaml.dump(self.pre_conf_dict, stream, default_flow_style=False)\n fab_path = os.path.join('devlab/tests', self.new_file_name)\n _cmd = 'cd {cf_folder} && fab get_groups:{config_ini},{new_file}'\n cmd = _cmd.format(cf_folder=main_folder, new_file=fab_path,\n config_ini='devlab/tests/configuration.ini')\n os.system(cmd)\n post_file_path = os.path.join(main_folder, 'vm_groups.yaml')\n post_conf = file(post_file_path, 'r')\n self.post_conf_dict = yaml.load(post_conf)",
"def generate(self):\n # Clear the scene for the level\n pmcs.newFile(force=True)\n\n # Pivot for rotation\n pivot = (self.block_dimensions[X]/2.0, 0, self.block_dimensions[Z]/2.0)\n\n # Go through each level item and place block\n for i in range(self.lvl.size[X]):\n for j in range(self.lvl.size[Y]):\n for k in range(self.lvl.size[Z]):\n blk = self.lvl.get_block((i, j, k))\n\n # Don't worry about Empty or RampDummy blocks\n if blk.block_type != blocks.BlockType.EMPTY and blk.block_type != blocks.BlockType.RAMP_DUMMY:\n # Load the scene file\n pmcs.importFile(pmcs.Path(blk.pth))\n\n # Give block a unique name\n new_name = \"{}_{}_{}_{}\".format(self.group_name, i, j, k)\n pmcg.rename(self.group_name, new_name)\n\n # Rotate it based on the orientation\n\n pmcg.rotate(new_name, [0, 90 * int(blk.orientation), 0], pivot=pivot)\n\n # Move it to the correct spot\n new_spot = [i * self.block_dimensions[X],\n j * self.block_dimensions[Y],\n k * self.block_dimensions[Z]]\n pmcg.move(new_name, new_spot)",
"def build_nested_blocks(self):\n pass",
"def make_groups(self):\n for g in self.groups:\n self.add_group(groupname=g['groupname'],\n grouptitle=g['grouptitle'],\n path_to_group=g['path'])",
"def block_group(inputs,\n filters,\n block_fn,\n blocks,\n strides,\n is_training,\n name,\n pruning_method='baseline',\n init_method='baseline',\n data_format='channels_first',\n end_sparsity=0.,\n weight_decay=0.):\n with tf.name_scope(name):\n end_point = 'block_group_projection_%s' % name\n # Only the first block per block_group uses projection shortcut and strides.\n inputs = block_fn(\n inputs,\n filters,\n is_training,\n strides,\n use_projection=True,\n pruning_method=pruning_method,\n init_method=init_method,\n data_format=data_format,\n end_sparsity=end_sparsity,\n weight_decay=weight_decay,\n name=end_point)\n\n for n in range(1, blocks):\n with tf.name_scope('block_group_%d' % n):\n end_point = '%s_%d_1' % (name, n)\n inputs = block_fn(\n inputs,\n filters,\n is_training,\n 1,\n pruning_method=pruning_method,\n init_method=init_method,\n data_format=data_format,\n end_sparsity=end_sparsity,\n weight_decay=weight_decay,\n name=end_point)\n\n return tf.identity(inputs, name)",
"def get_state_blockgroups_file(state=48, district=7, leg_body='US-REP', year='2015'):\r\n\r\n blockgroups_file = get_state_blockgroups_geojson_filename(state=state)\r\n \r\n state = \"{0:0>2}\".format(state)\r\n district = \"{0:0>2}\".format(district)\r\n \r\n print( blockgroups_file )\r\n\r\n if not os.path.isfile(blockgroups_file):\r\n print( \"Downloading blockgroups\" )\r\n bgs_url = 'ftp://ftp2.census.gov/geo/tiger/TIGER{year}/BG/tl_{year}_{state}_bg.zip'.format(year=year, state=state)\r\n bgs_dl_file = geojson_path + 'bgs.zip'\r\n download_file(bgs_url, bgs_dl_file)\r\n extract_all(bgs_dl_file, geojson_path)\r\n bgs_shapefile = glob(geojson_path + '*shp')[0]\r\n\r\n print( \"Converting blockgroups file to GEOJSON\")\r\n bgs = gpd.read_file(bgs_shapefile)\r\n bgs = bgs.to_crs({'init': u'epsg:4326'})\r\n bgs.to_file(blockgroups_file, driver='GeoJSON')\r\n\r\n # cleanup geojson dir\r\n shapefile_prefix = glob(geojson_path + '*shp')[0].split(\r\n geojson_path)[1].split('.')[0]\r\n shapefiles = glob(geojson_path + shapefile_prefix + '*')\r\n for f in shapefiles:\r\n os.remove(f)\r\n os.remove(bgs_dl_file)",
"def get_blockgroup_census_data(api, fields, census_data = {}, state=48, district=7, leg_body='US-REP', year='2015'):\r\n blockgroup_key = 'bg'\r\n if year not in census_data.keys():\r\n census_data[year] = { blockgroup_key: {} }\r\n else:\r\n if blockgroup_key not in census_data[year].keys():\r\n census_data[year][blockgroup_key] = { }\r\n # TODO make dynamic to state and district\r\n\r\n state = \"{0:0>2}\".format(state)\r\n district = \"{0:0>2}\".format(district)\r\n \r\n state_abbr = str(states.mapping('fips', 'abbr')[state])\r\n district_abbr = leg_body + '-' + state_abbr + district\r\n data_path = 'static/data/'\r\n bgs_in_district_JSON = data_path + district_abbr + '-blockgroups.json'\r\n\r\n bgs_in_district = pd.read_json(bgs_in_district_JSON)\r\n \r\n # Setup Census query\r\n census_query = Census(api, year=int(year))\r\n num_of_bgs = len(bgs_in_district)\r\n i = 0.0\r\n pbar = tqdm(\r\n total=num_of_bgs, initial=0, \r\n unit_scale=True, desc='Downloading Blockgroups'\r\n )\r\n for bg_index, bg in bgs_in_district.iterrows():\r\n bg_stats = census_query.acs5.state_county_blockgroup(\r\n fields=fields, \r\n state_fips=bg['STATEFP'], \r\n county_fips=bg['COUNTYFP'], \r\n blockgroup=bg['BLKGRPCE'],\r\n tract=bg['TRACTCE']\r\n )\r\n bg_stats = bg_stats[0]\r\n geoid = str(bg['GEOID'])\r\n if geoid in census_data[year][blockgroup_key].keys():\r\n census_data[year][blockgroup_key][geoid].update(bg_stats)\r\n else:\r\n census_data[year][blockgroup_key][geoid] = bg_stats\r\n # print percent complete\r\n pbar.update(1)\r\n pbar.close()\r\n return census_data",
"def build_stage2_6(self):\n paf, cfm = self.stage2_6.values()\n for i in range(2, 7):\n paf_ = OrderedDict([(k.replace('i', str(i)),paf[k]) for k in paf])\n cfm_ = OrderedDict([(k.replace('i', str(i)),cfm[k]) for k in cfm])\n stage_ = OrderedDict(PAF=paf_, CFM=cfm_)\n setattr(self, f'stage{i}', stage_)",
"def init_valet_groups(self):\n\n for rk, r in self.stack.items():\n properties = r.get(\"properties\", {})\n metadata = properties.get(\"metadata\", {})\n\n if len(metadata) > 0:\n valet_rules = metadata.get(\"valet_groups\", None)\n\n if valet_rules is not None and valet_rules != \"\":\n rule_list = []\n if isinstance(valet_rules, six.string_types):\n rules = valet_rules.split(\",\")\n for gr in rules:\n rule_list.append(gr.strip())\n else:\n self.status = \"incorrect valet group metadata format\"\n self.logger.error(self.status)\n return\n\n # Check rule validation of valet_groups.\n self.status = self.resource.check_valid_rules(self.tenant_id,\n rule_list,\n use_ex=self.use_dha)\n if self.status != \"ok\":\n self.logger.error(self.status)\n return\n\n self.status = self._make_valet_groups(properties.get(\"name\"),\n properties[\"availability_zone\"][0],\n rule_list)\n if self.status != \"ok\":\n self.logger.error(self.status)\n return\n\n # Check and create server groups if they do not exist.\n scheduler_hints = properties.get(\"scheduler_hints\", {})\n if len(scheduler_hints) > 0:\n for hint_key in scheduler_hints.keys():\n if hint_key == \"group\":\n hint = scheduler_hints[hint_key]\n self.status = self._make_group(properties.get(\"name\"), hint)\n if self.status != \"ok\":\n self.logger.error(self.status)\n return",
"def __init__(self, lvl, block_dimensions=DEFAULT_BLOCK_SIZE, group_name=DEFAULT_GROUP_NAME):\n self.lvl = lvl\n self.block_dimensions = block_dimensions\n self.group_name = group_name",
"def _prepare_blocks():\n\n counter = blocks[0]['freeStart']\n maxBlocks = blocks[0]['maxBlocks']\n while(counter < maxBlocks) :\n try:\n # print (mount['parent'] + '/linddata.' + str(counter))\n f = open(mount['parent'] + '/linddata.' + str(counter), 'r') \n except IOError, e:\n return STATUS['M_BD']\n else :\n fdatastring = f.next()\n fdata = deserializedata(fdatastring)\n blocks[counter] = fdata\n counter += 1\n \n return STATUS['OK']",
"def batchAnalysis(groupfil):\n groups = []\n with open(groupfil, 'r') as fIn:\n for line in fIn:\n groups.append(line.strip().split(','))\n \n checks = ['maxV', 'maxDerivV', 'maxDerivdV', 'minDerivV',\n 'minDerivdV', 'preMinV', 'postMinV', 'preMaxCurveV',\n 'preMaxCurveK', 'postMaxCurveV', 'postMaxCurveK',\n 'height', 'repolarizationV', 'intervals', 'frequencies']\n props = {ch: {gr: {} for gr in list(set([g[1] for g in groups]))}\n for ch in checks} # A dict of dicts\n # props [properties] [group name] [cell name]\n cells = [f[0].split('/')[-1].split('_')[0] for f in groups]\n \n # Add a few more keys\n props['activity'] = {gr: {} for gr in list(set([g[1] for g in groups]))}\n \n # Assign all the properties to the props dict\n for g in groups:\n df = pd.read_csv(g[0])\n df = df.drop('Unnamed: 33', 1) # Garbage\n df = df.drop('freq', 1) # These are downsampled\n df = df.dropna() # Dropna\n \n # If there are multiple clusters, add them in order\n if max(df.clust_inds) == 1: # Two clusters\n numClusts = int(max(df.clust_inds)+1)\n for ch in checks:\n for clust in range(numClusts):\n try:\n props[ch][g[1]][cells[groups.index(g)]].append(df[df['clust_inds']==clust][ch].dropna().values)\n except:\n props[ch][g[1]][cells[groups.index(g)]] = [df[df['clust_inds']==clust][ch].dropna().values]\n else: # Just one cluster\n for ch in checks:\n props[ch][g[1]][cells[groups.index(g)]] = [df[ch].dropna().values]\n # Get activity profile\n tIn, cBouts = timeInClusters(df)\n props['activity'][g[1]][cells[groups.index(g)]] = [tIn, cBouts]\n \n return props",
"def sbc_groups():\n cam = \"sbc\"\n for light, lens, window in [(True, True, True),\n (True, True, False),\n (True, False, False),\n (False, False, False)]: \n filenames = flatfiles(cam)\n filenames = get_light_sbc(filenames, light)\n filenames = get_lens(filenames, lens)\n filenames = get_window_sbc(filenames, window)\n images = valid_images(filenames)\n process_images(images, cam, (light, lens, window))",
"def _process(self, facilities: str):\n\n with self._lock:\n # Reset everything before processing\n self._locast_dmas = []\n\n for i, line in enumerate(facilities.split(\"\\n\")):\n if not line:\n continue\n\n line = line.strip()\n facility = {}\n cells = line.split(\"|\")\n\n if len(cells) != len(COLUMNS):\n raise Exception(\n f\"Unable to parse FCC facility on line {i+1}. Length: {len(cells)}, expected: {len(COLUMNS)}\")\n\n # Map the line into a dict, so it's easier to work with\n for i, col in enumerate(COLUMNS):\n facility[col] = cells[i]\n\n # Only care about specific facilities\n if facility[\"lic_expiration_date\"] and \\\n facility[\"nielsen_dma\"] and \\\n facility[\"fac_status\"] == 'LICEN' and \\\n facility['fac_service'] in ('DT', 'TX', 'TV', 'TB', 'LD', 'DC'):\n\n # Only care about non expired licence facilities\n lic_expiration_date = datetime.strptime(\n facility[\"lic_expiration_date\"], '%m/%d/%Y') + \\\n timedelta(hours=23, minutes=59, seconds=59)\n\n # Add the facility to the index, keyed by nielsen_dma and fac_callsign\n if lic_expiration_date > datetime.now():\n nielsen_dma = facility['nielsen_dma']\n call_sign = facility['fac_callsign'].split(\"-\")[0]\n\n locast_dma_id = self._find_locast_dma_id_by_fcc_dma_name(\n nielsen_dma)\n\n if locast_dma_id:\n key = (locast_dma_id, call_sign)\n self._dma_facilities_map[key] = facility",
"def rebuild(self, stopState = 'define'):\n try:\n _str_func = 'rebuild'\n log.debug(cgmGEN.logString_start(_str_func))\n _short = self.mNode\n\n log.debug(cgmGEN.logString_sub(_str_func,\"Checking blockProfile\"))\n blockType = self.blockType\n mBlockModule = self.p_blockModule\n _blockProfile = self.getMayaAttr('blockProfile')\n _d_profiles = {} \n try:_d_profiles = mBlockModule.d_block_profiles\n except:\n log.error(cgmGEN.logString_msg(_str_func,'No d_block_profile_found'))\n \n _typeDict= _d_profiles.get(_blockProfile,{})\n if _blockProfile and not _typeDict:\n log.error(cgmGEN.logString_msg(_str_func,'blockType not found in blockProfiles. Please fix | found {0}'.format(_blockProfile)))\n pprint.pprint(_d_profiles.keys())\n return False\n _baseDat = _typeDict.get('baseDat')\n \n ml_children = self.getBlockChildren()\n \n mBlockMirror = self.getMessageAsMeta('blockMirror')\n \n _blockType = self.blockType\n _side = get_side(self)\n \n l_datKeys = ATTR.get_datListKeys(_short)\n d_lists = {}\n for l in l_datKeys:\n if ATTR.datList_exists(_short,l):\n d_lists[l] = ATTR.datList_get(_short,l)\n \n _d = {'blockType':self.blockType,\n 'autoForm':False,\n 'side':_side,\n 'baseSize':baseSize_get(self),\n 'blockProfile':_blockProfile,\n 'blockParent': self.p_blockParent}\n \n\n for a in 'cgmName','blockProfile':\n if a in ['cgmName']:\n _d['name'] = self.getMayaAttr(a)\n elif self.hasAttr(a):\n _d[a] = self.getMayaAttr(a) \n \n blockDat = self.getBlockDat()\n if blockDat['ud'].get('baseDat'):\n blockDat['ud'].pop('baseDat')\n \n mLoc = self.doLoc()\n self.delete()\n \n \n mDup = cgmMeta.createMetaNode('cgmRigBlock',\n **_d)\n \n mDup.doSnapTo(mLoc)\n mLoc.delete()\n\n \n\n mDup.blockDat = blockDat\n \n #if _baseDat:\n #log.warning(cgmGEN.logString_msg(_str_func,'resetting baseDat: {0}'.format(_baseDat)))\n #mDup.baseDat = _baseDat \n \n for a,l in d_lists.iteritems():\n ATTR.datList_connect(mDup.mNode,a,l)\n \n blockDat_load(mDup)\n\n for mChild in ml_children:\n mChild.p_blockParent = mDup\n \n if mBlockMirror:\n mDup.connectChildNode(mBlockMirror,'blockMirror','blockMirror')#Connect\n \n \n \n return mDup \n \n \n\n except Exception,err:\n cgmGEN.cgmExceptCB(Exception,err)",
"def _prepare(self, mol):\n\n # Order the building blocks by number of functional groups\n # so that building blocks with more functional groups are\n # always placed first.\n\n bb_verts = dict()\n bbs = sorted(\n mol.building_block_vertices,\n key=lambda bb: len(bb.func_groups),\n reverse=True\n )\n for bb in bbs:\n bb_verts[bb] = mol.building_block_vertices[bb]\n mol.building_block_vertices = bb_verts\n return super()._prepare(mol)",
"def create_target_groups(ctx):\n data = self.create_target_groups()\n ctx.info('Created target groups for the load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)",
"def configure_groups(mods, apps):\n specs = configure_dd_spec_list(mods, apps)\n groups = [\n ScratchPad('scratch', config_dropdowns(specs)),\n Group('1', label='', layout='verticaltile'),\n Group('1a', label='', layout='monadthreecol'),\n Group('2', label='', layout='verticaltile'),\n Group('2a', label='', layout='maximize'),\n Group('3', label='', layout='treetab'),\n Group('3a', label='', layout='treetab'),\n Group('4', label='', layout='monadtall'),\n Group('4a', label='', layout='monadtall'),\n Group('5', label='', layout='max',\n matches=[Match(wm_class=['emacs'])]),\n Group('5a', label='', layout='max'),\n Group('6', layout='treetab', label=''),\n Group('6a', label='', layout='max'),\n Group('7', label=''),\n Group('7a', label='', layout='treetab'),\n Group('8', label='', layout='max'),\n Group('8a', label='', layout='max'),\n Group('9', label='', layout='treetab', matches=[\n Match(wm_class=['microsoft teams - preview']),\n Match(wm_class=['msoutlook-nativefier-9dd141']),\n ]),\n Group('9a', label='', layout='treetab', matches=[\n Match(wm_class=['jira-nativefier-894f7c'])\n ]),\n Group('0', label='', layout='floating'),\n ]\n keys = keymap.bind_keys(mods, apps, groups, specs)\n return (groups, keys)",
"def _genBlocksByName(self):\n self.blocksByName = {\n block.getName(): block for block in self.getBlocks(includeAll=True)\n }",
"def load_building_blocks(path):\t\t\n\t#TODO : automatization\n\tbenzene = Building_Block(abbrev=\"B\", num_atoms=6,origin=0, para_pos=3, para_angle=0, meta_pos=4 , meta_angle = -np.pi/3., ortho_pos=5, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/benzene.xyz\")\n\tnapthtalene = Building_Block(abbrev=\"N\", num_atoms=18,origin=0, para_pos=12, para_angle=0., meta_pos=11 , meta_angle = -np.pi/3., ortho_pos=10, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/naphtalene.xyz\")\n\tdbPc1 = Building_Block(abbrev=\"dbPc1\", num_atoms=32,origin=13, para_pos=1, para_angle=0, meta_pos=0 , meta_angle = +np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc1_block.xyz\")\n\tdbPc4 = Building_Block(abbrev=\"dbPc4\", num_atoms=55,origin=22, para_pos=1, para_angle=0, meta_pos=0 , meta_angle = -np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc4.xyz\")\n\tdbPc6 = Building_Block(abbrev=\"dbPc6\", num_atoms=52,origin=17, para_pos=0, para_angle=0, meta_pos=1 , meta_angle = -np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc6.xyz\")\n\tdbPc5 = Building_Block(abbrev=\"dbPc5\", num_atoms=58,origin=12, para_pos=26, para_angle=0, meta_pos=20 , meta_angle = -np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc5.xyz\")\n\tpseudo_para_naph_PCP = Building_Block(abbrev=\"pseudo-para_naph_PCP\", num_atoms=44,origin=0, para_pos=18, para_angle=0, meta_pos=16 , meta_angle = -np.pi/3, ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/pseudo-para_naph_PCP.xyz\")\n\tline =Building_Block(abbrev=\"line\", num_atoms=4,origin=0, para_pos=1, para_angle=0, meta_pos=1 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/line.xyz\")\n\t#rot=Building_Block(abbrev=\"line\", num_atoms=47,origin=6, para_pos=16, para_angle=0, meta_pos=20 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=2, path=path+\"/rot.xyz\")\n\t#stacked_anth=Building_Block(abbrev=\"stacked_anth\", num_atoms=62,origin=3, para_pos=22, para_angle=0, meta_pos=30 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=2, path=path+\"/stacked_anth.xyz\")\n\t\n\tbuilding_blocks = [benzene,napthtalene,dbPc1,dbPc4,dbPc6, dbPc5,pseudo_para_naph_PCP, line]\n\n\treturn building_blocks",
"def __init__(self, file, sdef, name, path, attrs, parent, link_info=None):\n super(Group, self).__init__(file, sdef, name, path, attrs, parent, link_info)\n self.description = []\n self.parent_attributes = {}\n self.get_expanded_def_and_includes()\n # print \"after get_expanded_def_and_includes, includes=\"\n # pp.pprint(self.includes)\n self.get_member_stats()\n self.add_parent_attributes()\n self.merge_attrs()\n if self.link_info:\n # this group is linking to another. Already done in Node class. Nothing to do here\n pass\n else:\n# self.h5node = self.h5parent.create_group(self.name)\n #- self.file.file_pointer.create_group(self.full_path)\n #- self.file.h5commands.append(\"create_group(%s)\" % self.full_path)\n self.file.create_group(self.full_path)\n # add attribute values to node\n self.set_attr_values()",
"def _make_valet_groups(self, _rk, _az, _rule_list):\n\n for rn in _rule_list:\n rule = self.resource.group_rules[rn]\n\n # Valet group naming convention.\n # It contains datacenter id and availability_zone\n # followed by service id and vnf id\n # depending on scope.\n # And concatenate rule name.\n # Exception: quorum-diversity\n\n group_id = self.datacenter_id + \":\"\n\n if rule.rule_type != \"quorum-diversity\":\n group_id += _az + \":\"\n\n if rule.app_scope == \"lcp\":\n group_id += rn\n elif rule.app_scope == \"service\":\n group_id += self.service_instance_id + \":\" + rn\n elif rule.app_scope == \"vnf\":\n group_id += self.service_instance_id + \":\" + self.vnf_instance_id + \":\" + rn\n else:\n return \"unknown app_scope value\"\n\n if group_id in self.groups.keys():\n group = self.groups[group_id]\n else:\n group = Group(group_id)\n group.group_type = rule.rule_type\n group.factory = \"valet\"\n group.level = rule.level\n\n self.groups[group_id] = group\n\n group.server_list.append(self.app_name + \":\" + _rk)\n\n return \"ok\"",
"def makeGroupsFromCutFile(self):\n if self.cutfile == None:\n print \"Cannot make groups without a cuts file\"\n return ([],[])\n else:\n groups = []\n labels = []\n yields = []\n all_cols = self.qie.columns.values\n # For each predefined group\n for grouplist in cut_groups:\n labels.append(grouplist[0])\n g = None\n # For each cut in that group\n for cut in grouplist[1]:\n # Get min and max values for main cuts (TODO: handle marginal cuts)\n cut_min = self.cuts[cut][0]\n cut_max = self.cuts[cut][1]\n # For each df column corresponding to that cut (sometimes more than one measurement)\n for col in all_cols:\n if col.split(\"_\")[0] == cut:\n g_tmp = (self.qie[col] < cut_min) | (self.qie[col] > cut_max)\n if 'NoneType' in str(type(g)) :\n g = g_tmp\n else: \n g = g | g_tmp\n # Make exclusive groups\n if len(groups) > 0:\n g = g & (self.NotGroup(groups))\n groups.append(g)\n yields.append(g.sum())\n # Make final group containing all other chips\n groups.append(self.NotGroup(groups))\n labels.append(\"Good\")\n yields.append(groups[-1].sum())\n self.makeYieldsTable(yields, labels)\n # Add column to data frame containing \"Good\" (1), \"bad\" (0), \"marginal\" (2,..) info\n self.qie[\"Sorting\"] = np.where(groups[-1], 1, 0)\n print sum(self.qie[\"Sorting\"])\n #print self.qie\n self.makeSortingFile()\n return (groups, labels)",
"def join_groups(self, groups, blacklist):\n floodwait = False\n for group in groups:\n if group not in blacklist and group not in self.dialog_names:\n if not floodwait:\n print(\"[*] Trying to join {}..\".format(group))\n try:\n channel = self.client.get_entity(group)\n self.client(JoinChannelRequest(channel))\n self.dialog_names.add(group) #avoid trying to join the same group twice\n print(\" [+]->Succesfully joined {} \".format(group))\n except errors.rpc_error_list.FloodWaitError as e:\n floodwait = True\n self.leftout_groups.add(group)\n date = datetime.datetime.now()\n self.block = date + datetime.timedelta(seconds = e.seconds) #adds waittime to current time to determine the date when the block ends\n print(\" [!]->\"+str(e))\n except errors.rpc_error_list.UsernameInvalidError as e:\n self.blacklist.add(group)\n print(\" [!]->\"+str(e))\n except errors.rpc_error_list.UsernameNotOccupiedError as e:\n self.blacklist.add(group)\n print(\" [!]->\"+str(e))\n except TypeError as e:\n self.blacklist.add(group)\n print(\" [!]->\"+str(e))\n except errors.rpc_error_list.InviteHashExpiredError as e:\n self.blacklist.add(group)\n print(\" [!]->\"+str(e))\n else:\n self.leftout_groups.add(group)",
"def generate_cfg():\n \n if not os.path.exists(cfg_path):\n os.mkdir(cfg_path)\n \n for img_path in get_template_paths():\n extractor = BlockExtractor(img_path)\n extractor.get_cfg()\n for block in extractor.get_blocks():\n img = BlockParser(img_path, block).block_image()\n #cv.imshow(\"Block\", img)\n #cv.waitKey() & 0xFF",
"def block_group(inputs, filters, block_fn, blocks, strides, is_training, name,\n data_format='channels_first'):\n # Only the first block per block_group uses projection shortcut and strides.\n inputs = block_fn(inputs, filters, is_training, strides,\n use_projection=True, data_format=data_format)\n\n for _ in range(1, blocks):\n inputs = block_fn(inputs, filters, is_training, 1,\n data_format=data_format)\n\n return tf.identity(inputs, name)",
"def gappyBlocks():\n path = \"./data/\"\n for file in os.listdir(path):\n if file.endswith(\".fa\") or file.endswith(\".fasta\"):\n alignin = AlignIO.read(path + file, \"fasta\")\n try:\n filecore = file.rstrip(\".fa\")\n except:\n filecore = file.rstrip(\".fasta\")\n fileout = path + filecore + \".blocks\"\n \n # constants\n align = []\n gap = []\n border = []\n blocks = []\n \n # specify cut-off of gaps in column (in percentage)\n cut_min = 0.1\n cut_max = 0.9\n \n # alignment\n for pos in range(0,(alignin.get_alignment_length())):\n column=alignin[:,pos]\n align.append(column)\n if \"-\" in column:\n col=list(column)\n gaps=col.count(\"-\")\n if gaps > (cut_min*len(col)) and gaps < (cut_max*len(col)):\n gap.append(pos)\n \n if gap != []:\n border.append(gap[0])\n border.append(gap[len(gap)-1])\n for i in range(0,(len(gap)-1)):\n if int(gap[i]+1)!=int(gap[i+1]):\n border.append(gap[i])\n \n for j in range((len(gap)-1), 0, -1):\n if int(gap[j]-1)!=int(gap[j-1]):\n border.append(gap[j])\n # list of positions for the blocks\n order=sorted(border)\n \n # get the blocks and writes to the .blocks file\n o=open(fileout, \"w\")\n \n for i in range(0,len(order)-1,2):\n beg=int(order[i])\n end=int(order[i+1])\n count = end-beg \n block=alignin[:,beg:end]\n \n # specify the minimum length of a gap\n if count < 3:\n pass\n else: \n blocks.append(block) \n o.write('***Block***'+\"\\n\"+\"Start:\"+str(beg)+\\\n \"\\n\"+\"Count:\"+str(count)+\"\\n\")\n for record in block:\n o.write(str(record.seq)+\"\\n\")\n o.close()\n else:\n o=open(fileout, \"w\")\n o.close()\n pass\n return"
] | [
"0.56382924",
"0.520446",
"0.51788396",
"0.51779836",
"0.5162532",
"0.5090228",
"0.50537544",
"0.5002893",
"0.49870488",
"0.4973123",
"0.49609685",
"0.49350616",
"0.49249336",
"0.49217957",
"0.49188414",
"0.49178317",
"0.49041072",
"0.4886093",
"0.4883396",
"0.48815715",
"0.4867034",
"0.486429",
"0.48561937",
"0.48157653",
"0.48004654",
"0.47829014",
"0.4774548",
"0.47741896",
"0.47691095",
"0.47606367"
] | 0.64450157 | 0 |
This will run through all of the web interface using selenium. The input should be a pdb_file full path. It will download the wrappers/bonds as PDB_NAME_wrappers.txt and PDB_NAME_bonds.txt. | def run_wrappa(browser, pdb_file):
# Wrappa has 3 MB limit
if os.path.getsize(pdb_file) > 3000000:
logging.warn("%s is too large (size is %d), skipping", pdb_file, os.path.getsize(pdb_file))
return False
if os.path.isfile(pdb_file[:-4] + "_bonds.txt"):
logging.warn("%s has already been processed, skipping", pdb_file)
return False
full_path = os.path.abspath(pdb_file)
directory, file_name = os.path.split(full_path)
pdb_name = file_name[:-4]
try:
browser.get("http://www.wrappa.org/wrappa01/wrappa")
browser.find_element_by_name("pdbFileName").send_keys(full_path)
browser.find_element_by_xpath("//*[@type='submit']").click()
# Use the default configuration
browser.find_element_by_xpath("//*[@type='submit']").click()
# Analyze
browser.find_element_by_xpath("//*[@type='submit']").click()
# Download the files created
browser.find_element_by_link_text("Bonds").click()
save_page_as(browser, os.path.join(directory, pdb_name + "_wrappers.txt"))
browser.back()
browser.find_element_by_link_text("Wrappers").click()
save_page_as(browser, os.path.join(directory, pdb_name + "_bonds.txt"))
except Exception as e:
logging.exception(e)
logging.error("Encounterd exception for %s", pdb_file)
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dl_pdb(url_dom, pdb_id, dom_sid):\n good_url = re.sub(r'(output=html)', 'output=txt', url_dom)\n\n print(\"Dowloading the good domain of \" + pdb_id + \".pdb from the SCOP \" +\n \"website...\")\n urlreq.urlretrieve(good_url, \"data/\" + dom_sid + '.pdb')\n print(\"Download finished !\\n\")",
"def download_pdb(pdb_name):\n # URL of the PDB\n url = 'http://files.rcsb.org/download/'\n if pdb_name[-4:] == '.pdb':\n file_name = pdb_name\n else:\n file_name = pdb_name + '.pdb'\n http = urllib3.PoolManager()\n url = url + file_name\n req = http.request('GET', url, preload_content = False)\n with open(file_name, 'wb') as out:\n while True:\n data = req.read()\n if not data:\n break\n out.write(data)\n req.release_conn()",
"def customized_pondlets(driver, file_name):\n file_path = get_file_path(file_name)\n data = []\n\n with open(file_path) as file:\n data = file.readlines()\n\n driver.find_element_by_xpath(\n addlink.format(model_path='customized_pondlet/customizedpondlet')\n ).click()\n\n for row in data:\n items = row[:-1].split('\\t')\n\n input_value = {\n 'title': items[0],\n 'level': items[1],\n 'lang': items[2],\n 'author': items[3],\n 'date': items[4],\n }\n\n wait_xpath(driver, addanother)\n\n driver.find_element_by_id('id_title').send_keys(input_value['title'])\n\n driver.find_element_by_id('id_level').clear()\n driver.find_element_by_id('id_level').send_keys(input_value['level'])\n\n Select(driver.find_element_by_id('id_lang')\n ).select_by_value(input_value['lang'])\n\n driver.find_element_by_id('id_author').send_keys(input_value['author'])\n\n if input_value['date'].lower() == 'today':\n driver.find_element_by_link_text('Today').click()\n else:\n driver.find_element_by_id(\n 'id_finalized_date').send_keys(input_value['date'])\n\n driver.find_element_by_xpath(addanother).click()\n wait_xpath(driver, success)\n\n driver.find_element_by_id('site-name').click()\n wait_url(driver, dashboard)",
"def go_through_businesses(driver, soup, links_file, zipcode):\n result_tags = soup.find_all('li', {'class': 'regular-search-result'})\n correct_businesses = 0\n for result_tag in result_tags:\n business_tag = result_tag.find('div', {'class': 'biz-listing-large'})\n # Check that it's in the right zip code\n if not correct_zipcode(business_tag, zipcode):\n continue\n\n business_link = YELP_BASE + business_tag.find('div', {'class': 'main-attributes'}).find('a', {'class': 'biz-name'})['href']\n # Add business link to links file\n links_line = business_link + '\\t' + zipcode\n write_line(links_file, links_line)\n correct_businesses += 1\n\n print 'found ' + str(correct_businesses) + ' businesses for ' + zipcode\n\n return driver",
"def process_pdb(self, pdb_filename) :\n args = [self.command, pdb_filename]\n try :\n p = Popen(args, stdout=PIPE)\n (out,err) = p.communicate() \n except OSError :\n raise RuntimeError(\"Cannot communicate with STRIDE.\") \n return out",
"def flight_call(origin,destin,trDate):\n baseDataUrl = \"https://www.makemytrip.com/flight/search?itinerary=\" + origin + \"-\" + destin + \"-\" + trDate + \"&tripType=O&paxType=A-1_C-0_I-0&intl=false&=&cabinClass=E\"\n\n try:\n driver = webdriver.Chrome(ChromeDriverManager().install())\n # Chrome driver is being used.\n print(\"Requesting URL: \" + baseDataUrl)\n\n driver.get(baseDataUrl) # URL requested in browser.\n print(\"Webpage found ...\")\n\n element_xpath = '//*[@id=\"left-side--wrapper\"]/div[2]' # First box with relevant flight data.\n\n # Wait until the first box with relevant flight data appears on Screen\n options = webdriver.ChromeOptions()\n options.headless = True\n element = WebDriverWait(driver, 15).until(EC.visibility_of_element_located((By.XPATH, element_xpath)))\n\n # Scroll the page till bottom to get full data available in the DOM.\n print(\"Scrolling document upto bottom ...\")\n for j in range(1, 100):\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n # Find the document body and get its inner HTML for processing in BeautifulSoup parser.\n body = driver.find_element_by_tag_name(\"body\").get_attribute(\"innerHTML\")\n # print(body)\n print(\"Closing Chrome ...\") # No more usage needed.\n driver.quit() # Browser Closed.\n\n print(\"Getting data from DOM ...\")\n soupBody = BeautifulSoup(body, features=\"html.parser\") # Parse the inner HTML using BeautifulSoup\n\n # Extract the required tags\n a_name_l = soupBody.findAll('div', class_='fli-list')\n print(\n '------------------------------------------------------------------------------------------------------------------------------------')\n print(\n '-----------------------------------------------------------------------------------------------------------------------------------')\n # print(a_name_l)\n a_name = []\n for x in a_name_l:\n a_name.append(x.find('span', class_='boldFont blackText airlineName').text)\n\n print(a_name)\n # for x in a_name:\n # spanFlightName = x.find('p').text\n # spanFlightName = soupBody.findAll(\"span\", class_='boldFont blackText airlineName') # Tags with Flight Name\n\n # pFlightCode = soupBody.findAll(\"p\", {\"class\": \"fli-code\"}) # Tags with Flight Code\n a_name_ts = soupBody.findAll('div', attrs={\"class\": \"flightTimeSection\"})\n a_name_dtime = []\n for x in a_name_ts:\n a_name_dtime.append(x.find('p').text)\n print(a_name_dtime)\n # divDeptTime = soupBody.findAll(\"div\", attrs={\"class\": \"dept-time\"}) # Tags with Departure Time\n # pDeptCity = soupBody.findAll(\"p\", attrs={\"class\": \"dept-city\"}) # Tags with Departure City\n table_cty = soupBody.findAll('div', attrs={\"class\": \"flightTimeSection\"})\n pDeptCity = []\n for x in table_cty:\n pDeptCity.append(x.find('p', class_='darkText').text)\n print(pDeptCity)\n # pFlightDuration = soupBody.findAll(\"p\", attrs={\"class\": \"fli-duration\"}) # Tags with Flight Duration\n table_dur = soupBody.findAll('div', attrs={\"class\": \"appendRight40\"})\n pFlightDuration = []\n for x in table_dur:\n pFlightDuration.append(x.find('p').text)\n print(pFlightDuration)\n # pArrivalTime = soupBody.findAll(\"p\", attrs={\"class\": \"reaching-time append_bottom3\"}) # Tags with Arrival Time\n # table = soupBody.findAll('div', attrs={\"class\": \"flightTimeSection\"})\n # pArrivalTime = []\n # for x in table:\n # pArrivalTime.append(x.find('p').text)\n # #pArrivalCity = soupBody.findAll(\"p\", attrs={\"class\": \"arrival-city\"}) # Tags with Arrival City\n # table = soupBody.findAll('div', attrs={\"class\": \"flightTimeSection\"})\n # for x in table:\n # pArrivalCity = x.find('p').text\n # #spanFlightCost = soupBody.findAll(\"span\", attrs={\"class\": \"actual-price\"}) # Tags with Flight Cost\n spanFlightCost_l = soupBody.findAll('div', class_='priceSection')\n\n spanFlightCost = []\n for x in spanFlightCost_l:\n FlightCost=list(x.find('p').text[2:])\n FlightCost.remove(',')\n FlightCostStr=''.join(map(str,FlightCost))\n FlightCostFinal=int(FlightCostStr)\n spanFlightCost.append(FlightCostFinal) \n print('55555555555555555555555')\n print(x.find('p').text[2:])\n \n # FlightCost=list(spanFlightCost)\n # FlightCost.remove(',')\n # FlightCostStr=''.join(str,FlightCost)\n # FlightCostFinal=int(FlightCostStr)\n # print('22222222222222222222222222222222222')\n # print(FlightCostFinal)\n\n\n # Data Headers\n flightsData = [[\"flight_name\", \"departure_time\", \"departure_city\", \"flight_duration\", \"arrival_time\",\n \"arrival_city\", \"flight_cost\"]]\n list_dept_time = a_name_dtime[::2]\n list_arr_time = a_name_dtime[1::2]\n list_dept_city = a_name_dtime[::2]\n list_arr_city = a_name_dtime[1::2]\n # print(len(spanFlightName))\n # print(len(pFlightCode))\n print(len(a_name))\n print(len(a_name_dtime)) # 1\n print(len(list_dept_time))\n print(list_dept_time)\n print(len(list_arr_time))\n print(list_arr_time)\n\n print(len(pDeptCity)) # 1\n print(len(list_dept_city))\n print(list_dept_city)\n print(len(list_arr_city))\n print(list_arr_city)\n print(len(pFlightDuration))\n # print(len(pArrivalTime))\n print(len(spanFlightCost))\n\n Flight_Details = pd.DataFrame(\n {'Flight Name': a_name,\n 'Departure Time': list_dept_time,\n 'Arrival Time': list_arr_time,\n 'From': list_dept_city,\n 'To': list_arr_city,\n 'Duration': pFlightDuration,\n 'Cost': spanFlightCost\n })\n # print(len(pArrivalCity))\n return Flight_Details\n\n # # Extracting data from tags and appending to main database flightsData\n # for j in range(0, len(a_name) ):\n # flightsData.append([a_name[j].text, a_name_dtime[j].text, pDeptCity[j].text,\n # pFlightDuration[j].text, a_name_dtime[j].text, pDeptCity[j].text,\n # spanFlightCost[j].text])\n #\n # # Output File for FlightsData. This file will have the data in comma separated form.\n # outputFile = \"FlightsData_\" + origin + \"-\" + destin + \"-\" + trDate.split(\"/\")[0] + \"-\" + trDate.split(\"/\")[\n # 1] + \"-\" + trDate.split(\"/\")[2] + \".csv\"\n #\n # # Publishing Data to File\n # print(\"Writing flight data to file: \" + outputFile + \" ...\")\n # with open(outputFile, 'w', newline='') as spfile:\n # csv_writer = csv.writer(spfile)\n # csv_writer.writerows(flightsData)\n # print(\"Data Extracted and Saved to File. \")\n # # print(\"Records\\nFlight Name: \"+ str(len(a_name)) + \"\\nDept Time: \"+ str(len(a_name_dtime)) + \"\\nDept City: \"+ str(len(pDeptCity)) + \"\\nFlight Duration: \"+ str(len(pFlightDuration)) + \"\\nArrival Time: \"+ str(len(pArrivalTime)) + \"\\nArrival City: \"+ str(len(pArrivalCity)) + \"\\nFlight Cost: \"+ str(len(spanFlightCost)))\n # # print(flightsData)\n # # print(outputFile)\n # # print(a_name)\n except Exception as e:\n print(str(e))",
"def download_pdb(pdb, pdb_path=None):\n if pdb_path is None:\n pdb_path = PDB_PATH\n \n path_uniprot = Path(pdb_path)\n name = pdb + \".pdb\"\n url = \"https://files.rcsb.org/download/\" + pdb + \".pdb\"\n full_name = path_uniprot / name\n # Check if .pdb is already downloaded, if not => download\n if not full_name.is_file():\n download_url(url, str(path_uniprot), name)",
"def get_bonds():\r\n\r\n print('getting bond data...')\r\n # securtiyList = ['US29265WAA62 Corp', 'XS1713463559 Corp', 'XS2000719992 Corp', 'XS0954675129 Corp', 'XS0954675129 Corp']\r\n fieldList = ['ticker', 'coupon', 'nxt_call_dt', 'final_maturity', \r\n 'mty_typ', 'px_mid', 'z_sprd_mid', 'yas_ispread', 'yas_bond_yld', \r\n 'yas_risk','crncy', 'payment_rank', 'industry_sector','rtg_moody','rtg_sp']\r\n\r\n # the script fis here\r\n dir_path = os.path.dirname(os.path.realpath(__file__))\r\n\r\n # get items from sheet (in the same folder)\r\n fileToGet = 'hybridSecurityList.xlsx'\r\n # join file and correct folder\r\n fileToGet = os.path.join(dir_path, fileToGet)\r\n secListXlsx = pd.ExcelFile(fileToGet)\r\n \r\n # the names of the available sheets\r\n print('getting security list from:', secListXlsx.sheet_names)\r\n df = pd.DataFrame(secListXlsx.parse('tickers'))\r\n print('summary of the data')\r\n print(df.info)\r\n \r\n # put all isin's in a list\r\n isin = df.iloc[:,1].tolist()\r\n securtiyList = [x + \" Corp\" for x in isin]\r\n\r\n # make the lists unique (and keep the order)\r\n securtiyList = list(collections.OrderedDict.fromkeys(securtiyList))\r\n\r\n # get the data from bloomberg\r\n print('getting data from bbg')\r\n if len(securtiyList) > 49:\r\n # there are lots of securities\r\n print(f'lots of data requested: {len(securtiyList)} items requested')\r\n reducedListSize = input(f'reduce list size to value or press \"x\" to use {len(securtiyList)} >>>')\r\n if reducedListSize == 'x':\r\n # keep the same size\r\n reducedListSize = len(securtiyList)\r\n else:\r\n # reeduce the list size\r\n reducedListSize = int(reducedListSize)\r\n \r\n # set the reduced list to size selected\r\n reducedList = securtiyList[0:reducedListSize]\r\n # run code on the smaller list\r\n bondData = bloombergAPI_bigData(reducedList,fieldList, 10)\r\n else:\r\n bondData = blp.bdp(tickers=securtiyList, flds=fieldList)\r\n\r\n\r\n #print('number of columns:', data.head())\r\n print('data is fetched:')\r\n print(bondData.info)\r\n\r\n return bondData",
"def capture_web(pdb_file, output_file):\n # Register the streaming http handlers with urllib2\n register_openers()\n # use relpath to hide local path\n with open( os.path.relpath( pdb_file ), \"r\" ) as fp:\n # headers contains the necessary Content-Type and Content-Length\n # datagen is a generator object that yields the encoded parameters\n datagen, headers = multipart_encode({\n \"upfile\": fp,\n \"GO\": \"GO\",\n \"note\": \"note\"\n })\n # Create the Request object\n request = urllib2.Request(\n CAPTURE_URL + \"capture_ul.cgi\", datagen, headers\n )\n # Actually do the request, get and read the response\n response = urllib2.urlopen(request).read()\n with open( output_file, 'w' ) as fp:\n fp.write( response )",
"def getFutbinDataAndPopulateTable(self, futbin_url):\n browser = self.driver\n driver = self.driver\n\n tab_url = futbin_url\n\n browser.execute_script(\"window.open('');\")\n browser.switch_to.window(browser.window_handles[1])\n browser.get(tab_url)\n\n name = WebDriverWait(driver, 20).until(EC.visibility_of_element_located(\n (By.XPATH, \"/html/body/div[8]/div[15]/div/div/div[1]/div[2]/table/tbody/tr[2]/td\"))).text\n team = WebDriverWait(driver, 20).until(EC.visibility_of_element_located(\n (By.XPATH, \"/html/body/div[8]/div[15]/div/div/div[1]/div[2]/table/tbody/tr[3]/td/a\"))).text\n nation = WebDriverWait(driver, 20).until(EC.visibility_of_element_located(\n (By.XPATH, \"/html/body/div[8]/div[12]/div[3]/div[1]/div/ul/li[1]/a\"))).text\n cardtype = WebDriverWait(driver, 20).until(EC.visibility_of_element_located(\n (By.XPATH, \"/html/body/div[8]/div[15]/div/div/div[1]/div[2]/table/tbody/tr[12]/td\"))).text\n rating = WebDriverWait(driver, 20).until(EC.visibility_of_element_located(\n (By.XPATH, \"/html/body/div[8]/div[13]/div[2]/div/div/div[1]/div/a/div/div[2]\"))).text\n cardname = WebDriverWait(driver, 20).until(EC.visibility_of_element_located(\n (By.XPATH, \"/html/body/div[8]/div[13]/div[2]/div/div/div[1]/div/a/div/div[3]\"))).text\n position = WebDriverWait(driver, 20).until(EC.visibility_of_element_located(\n (By.XPATH, \"/html/body/div[8]/div[13]/div[2]/div/div/div[1]/div/a/div/div[4]\"))).text\n\n internals_location = driver.find_element(\n By.XPATH, \"/html/body/div[8]/div[5]/div\")\n internal_id = int(internals_location.get_attribute(\"data-baseid\"))\n futbin_id = internals_location.get_attribute(\"data-id\")\n\n # price, lastupdated = get_futbin_price_lastupdated(fifa_id)\n\n r = requests.get(\n 'https://www.futbin.com/22/playerPrices?player={0}'.format(internal_id))\n\n data = r.json()\n price = data[str(internal_id)][\"prices\"][\"xbox\"][\"LCPrice\"]\n lastupdated = data[str(internal_id)][\"prices\"][\"xbox\"][\"updated\"]\n\n # 18 mins ago\n # 48 mins ago\n # 1 hour ago\n # 2 hours ago\n if (lastupdated == \"Never\"):\n return 0, 100\n elif (\"mins ago\" in lastupdated):\n lastupdated = lastupdated[:-9]\n lastupdated = int(lastupdated)\n elif(\"hour ago\" in lastupdated):\n lastupdated = lastupdated[:-9]\n lastupdated = int(lastupdated) * 60\n elif(\"hours ago\" in lastupdated):\n lastupdated = lastupdated[:-10]\n lastupdated = int(lastupdated) * 60\n elif(\"seconds\" in lastupdated):\n lastupdated = 1\n elif(\"second\" in lastupdated):\n lastupdated = 1\n else:\n return 0, 100\n\n price = price.replace(\",\", \"\")\n price = int(price)\n\n # MINUTES\n lastupdated = int(lastupdated)\n futbin_id = int(futbin_id)\n market_price = 0\n buy_pct = .85\n agg = [name, cardname, rating, team, nation, cardtype, position,\n internal_id, futbin_id, price, lastupdated, market_price, buy_pct]\n\n full_entry = \"\"\n for word in agg:\n word = str(word)\n word_comma = word + \",\"\n full_entry += word_comma\n\n # Remove last comma\n full_entry = full_entry[:-1]\n print(full_entry)\n\n # Add new line to end\n hs = open(\"./data/player_list.txt\", \"a\", encoding=\"utf8\")\n hs.write(full_entry + \"\\n\")\n hs.close()\n\n log_event(self.queue, \"Added player \" + str(cardname))\n\n # ~ ~ ~ ~ ~ ~ ~ Close the futbin tab ~ ~ ~ ~ ~\n browser.close()\n\n # Switch back to the first tab with URL A\n browser.switch_to.window(browser.window_handles[0])\n # log_event(self.queue, \"Fetched player info\")",
"def fetchPDB(pdb_id):\n url = 'http://www.rcsb.org/pdb/files/%s.pdb' % pdb_id.split('.')[0]\n return urllib.urlopen(url).read()",
"def process_directory(browser, directory):\n\n count = 0\n for pdb in find_pdb_files(directory):\n if run_wrappa(browser, pdb):\n logging.info(\"Processed %s\", pdb)\n count += 1\n logging.info(\"Fully processed %d pdb files\", count)",
"def main():\r\n windows_driver = '/mnt/c/Users/kurtrm/Documents/bin/chromedriver.exe'\r\n browser = Chrome(executable_path=windows_driver)\r\n\r\n url = 'https://www.pcta.org/discover-the-trail/' \\\r\n 'thru-hiking-long-distance-hiking/2600-miler-list/'\r\n\r\n browser.get(url)\r\n year_range = range(1952, 2018) # Range of years of recorded thru-hikes\r\n\r\n for year in year_range:\r\n select = Select(browser.find_element_by_id('year'))\r\n select.select_by_value(str(year))\r\n time.sleep(1.5)\r\n miler_list = browser.find_elements_by_css_selector('td')\r\n if miler_list[0].text != 'No records found for the selected year.':\r\n people = extract_names(miler_list, year)\r\n load_mongo_db('pct', 'completions', people)",
"def run(self):\n try:\n\n # set the arguments and options\n chromeOptions = Options()\n prefs = {\"profile.managed_default_content_settings.images\": 2}\n chromeOptions.add_experimental_option(\"prefs\", prefs)\n chromeOptions.add_experimental_option(\"excludeSwitches\", [\"enable-logging\"])\n chromeOptions.add_argument(\"--headless\")\n chromeOptions.add_argument(\"--blink-settings=imagesEnabled=false\")\n chromeOptions.add_argument(\"--disable-popup-blocking\")\n chromeOptions.add_argument(\"--ignore-certificate-errors\")\n chromeOptions.add_argument(\"--allow-insecure-localhost\")\n chromeOptions.add_argument(\"--allow-running-insecure-content\")\n chromeOptions.accept_untrusted_certs = True\n chromeOptions.assume_untrusted_cert_issuer = True\n service_args = [\"hide_console\"]\n currentPath = (\n os.path.dirname(os.path.abspath(__file__))\n + \"\\\\ChromeDriver\\\\\"\n + chrome_browser_version\n + \"\\\\chromedriver.exe\"\n )\n\n while True:\n try:\n print(\"\\nCalling Driver\")\n\n # Creating an instance of chrome\n driver = Chrome(\n executable_path=currentPath,\n options=chromeOptions,\n service_args=service_args,\n )\n print(\"Driver Called\")\n driver.set_page_load_timeout(10)\n driver.delete_all_cookies()\n\n # open a page\n driver.get(\"Enter Checking Site Here\")\n print(\"Getting Site\")\n try:\n \"\"\"\n \n remove this try except if the your wifi doesn't block websites\n \n \"\"\"\n\n # xpath if the website is blocked\n element = driver.find_element_by_xpath(\n \"Enter xpath to an element in the blocked page\"\n )\n print(\"Site Blocked\\n\")\n\n except:\n try:\n # xpath to any thing in the website to make sure you are connected to the internet\n element = driver.find_element_by_xpath(\n \"/Enter xpath to an element in the page\"\n )\n print(\"Site Opening\\n\")\n\n except:\n try:\n \"\"\"\n \n if your portal doesn't have auto redirect, uncomment the following line and type in your login url\n \n \"\"\"\n\n # driver.get(\"Paste Login Webiste URL Here\")\n\n # change the ids to those in your login website\n # you can use developer mode to find the id of fields (use ctrl + shift + i)\n # change the username and password to the required one\n print(\"Trying To Login\")\n # select usnername field\n element = driver.find_element_by_id(\n \"Ending id of user input field\"\n )\n print(\"User Found\")\n element.send_keys(\"Enter username\")\n print(\"User Inputted\")\n # select password field\n element = driver.find_element_by_id(\n \"Ending id of password input field\"\n )\n print(\"Passwprd Found\")\n element.send_keys(\"Enter password\")\n print(\"Password Inputted\")\n # select submit button\n element = driver.find_element_by_id(\n \"Enter id of submit button\"\n )\n print(\"Button Found\")\n element.click()\n print(\"Logged In\\n\")\n # except NoSuchElementException as ex:\n # print(\"Can't Login\")\n # event.wait(120)\n except Exception as ex:\n print(\n \"Can't login:\\t\\tAn exception of type {0} occurred. Arguments:\\n{1}\".format(\n type(ex).__name__, ex.args\n )\n )\n event.wait(60)\n continue\n\n except Exception as ex:\n print(\n \"Error in loop:\\t\\tAn exception of type {0} occurred. Arguments:\\n{1}\".format(\n type(ex).__name__, ex.args\n )\n )\n try:\n driver.quit()\n except Exception as ex:\n print(\n \"Error in Quitting:\\t\\tAn exception of type {0} occurred. Arguments:\\n{1}\".format(\n type(ex).__name__, ex.args\n )\n )\n\n event.wait(60)\n continue\n\n try:\n driver.quit()\n except Exception as ex:\n print(\n \"Error in Quitting in loop:\\t\\tAn exception of type {0} occurred. Arguments:\\n{1}\".format(\n type(ex).__name__, ex.args\n )\n )\n event.wait(300)\n continue\n\n except Exception as ex:\n print(\n \"Error outside loop:\\t\\tAn exception of type {0} occurred. Arguments:\\n{1}\".format(\n type(ex).__name__, ex.args\n )\n )\n\n finally:\n try:\n driver.quit()\n except Exception as ex:\n print(\n \"Error in Quitting in final:\\t\\tAn exception of type {0} occurred. Arguments:\\n{1}\".format(\n type(ex).__name__, ex.args\n )\n )\n finally:\n print(\"Login Thread Exited\")",
"def exec(nb,tickers,pa,typ,First):\n dl = Downloader() # the SEC_edgar_downloader package\n if First: # if First == True\n # loading the data\n for t in tickers: # looping over all tickers\n dl.get(typ, t,amount=nb, download_details=True)\n\n assert os.path.isdir(pa),'Wrong File Directory Check Again' # assert the SEC edgar folder is created \n\n # proccesing it\n for t in tickers:\n writingtxt(findhtml(pa,t,typ),t,nb,typ)",
"def wal_selenium(folder_path, driver_path, url, html_username_attribute, login_username, html_password_attribute,\n login_password, html_login_button_attribute):\n driver = set_web_driver_profile_and_preferences(folder_path, driver_path)\n\n try:\n driver.get(url)\n time.sleep(10)\n\n driver.find_element_by_id(html_username_attribute).send_keys(login_username)\n time.sleep(5)\n driver.find_element_by_id(html_password_attribute).send_keys(login_password)\n time.sleep(5)\n driver.find_element_by_id(html_login_button_attribute).click()\n time.sleep(10)\n\n driver.find_element_by_id(\"menuAppsLink\").click()\n time.sleep(5)\n\n driver.find_element_by_id(\"txtSitemapSearch\").send_keys(\"Decision Support\")\n time.sleep(5)\n driver.find_element_by_id(\"filterSearchIcon\").click()\n time.sleep(5)\n driver.find_element_by_partial_link_text(\"Decision Support\").click()\n time.sleep(5)\n\n driver.find_element_by_xpath(\"//*[@title='{0}']\".format(\"My Saved Reports\")).click()\n time.sleep(5)\n\n driver.switch_to.frame(\"ifrContent\")\n driver.find_element_by_id(\"IMG27289255\").click()\n time.sleep(5)\n ActionChains(driver).context_click(driver.find_element_by_xpath(\"//span[text()='{0}']\".format(\"GPSQLServerPOSReport\"))).perform()\n time.sleep(5)\n driver.find_element_by_xpath(\"//*[contains(@onclick, '{0}')]\".format(\"submitSaved(event)\")).click()\n time.sleep(5)\n driver.switch_to.default_content()\n\n driver.switch_to.window(driver.window_handles[1])\n time.sleep(5)\n submitReportConfirmationText = driver.find_element_by_xpath(\"//*[@class='dialogText']\").text\n # Extract the digit in the text and convert it to string.\n walmartReportJobId = str([int(s) for s in str.split(submitReportConfirmationText) if s.isdigit()][0])\n time.sleep(5)\n\n driver.switch_to.window(driver.window_handles[0])\n driver.find_element_by_xpath(\"//a[@href='/']\").click()\n\n valid = False\n counter = 0\n while not valid:\n # Go threw the report and request table status and target the one corresponding to the walmartReportJobId.\n rowsInReportsAndRequestStatusTable = driver.find_elements_by_xpath(\"//div[@class='row-fluid statusResultsText']\")\n specificJobIdRowInReportsAndRequestStatusTable = [i for i in rowsInReportsAndRequestStatusTable if walmartReportJobId in i.text]\n\n if \"Done\" in specificJobIdRowInReportsAndRequestStatusTable[0].text:\n print(\"The report is ready to download.\")\n driver.find_element_by_xpath(\"//i[contains(@id, '{0}')]\".format(walmartReportJobId)).click()\n valid = True\n\n else:\n if counter >= 500:\n raise AirflowException(\"Wal-Mart report refresh timeout.\")\n else:\n print(\"Waiting 25 seconds before trying again.\")\n time.sleep(25)\n counter += 1\n driver.find_element_by_xpath(\"//a[@href='/']\").click()\n\n time.sleep(15)\n driver.quit()\n except Exception as e:\n print(e)\n finally:\n driver.quit()",
"def start(self):\n # check whether ThermoRawFileParser.exe is available\n assert Path(self._thermorawfileparser_path).is_file(), \\\n 'Executable to ThermoRawFileParser not found.'\n\n # access PRIDE repository by accession\n req = requests.get(f'https://www.ebi.ac.uk/pride/ws/archive/v2/files/byProject?accession={self._accession}',\n headers={'Accept': 'application/json'})\n\n # check whether API request contains success notification\n assert req.status_code == 200, \\\n f'Unsuccessful PRIDE access via accession (HTTP response status code {req.status_code}).'\n\n # find .sdrf file(s)\n req = req.json()\n files = [file['value'] for accession in req\n for file in accession['publicFileLocations']\n if 'ftp' in file['value'] and 'sdrf' in file['value'].lower()]\n\n assert len(files) > 0, 'PRIDE accession does not contain SDRF file(s).'\n\n self.sdrf_files = len(files)\n\n # create directory for acession\n Path(self._accession).mkdir(parents=False, exist_ok=True)\n # load .fasta map to acquire according .fasta darabase(s)\n if not Path(f'databases/{self.FASTA_MAP_FILE}').is_file():\n self.create_fasta_map(f'{self.UNIPROT_FASTA_BASIS_PATH}/README')\n self.fasta_map = pd.read_csv(f'databases/{self.FASTA_MAP_FILE}', sep=';')\n\n # download and iterate every .sdrf file found for appropriate accession\n for idx, file in enumerate(files):\n print(f'Processing .sdrf file ({idx + 1}/{len(files)}) from {self._accession}')\n with closing(request.urlopen(file)) as r:\n # download .sdrf\n sdrf = '{}/{}'.format(self._accession, file.split('/')[-1])\n print(sdrf)\n with open(sdrf, 'wb') as f:\n shutil.copyfileobj(r, f)\n with open(sdrf, 'r') as csv_file:\n entries = [entry for entry in csv.DictReader(csv_file, delimiter='\\t')]\n for i in range(0, len(entries)):\n entries[i] = dict((k.lower(), v) for k, v in entries[i].items()) ## transform everything to lower case!\n #entries = [entry.lower() for entry in entries] ## transform everything to lower case\n\n col_names = list(entries[0].keys())\n col_names = [entry.lower() for entry in col_names]\n\n if not all([col in col_names # list(entries[0].keys()\n for col in self.prerequisite_sdrf_cols]):\n print('ERROR: SDRF file does not provide prerequisite information '\n '(column names). SDRF file is skipped!')\n break\n\n #col_names = list(entries[0].keys())\n for entry_idx, entry in enumerate(entries):\n sdrf_infos = self._read_config_sdrf(entry, col_names)\n #\n if not self.use_same_fasta or entry_idx == 0:\n fasta_db = self._receive_fasta_database(sdrf_infos['organism'])\n if fasta_db:\n # if needed, create new directory\n if self._separate_sdrf_entries:\n sample_name = '/{}'.format(sdrf_infos['name'])\n sample_name = sample_name.replace(\" \", \"\") ### remove whitespace\n Path(f'{self._accession}{sample_name}').mkdir(parents=False, exist_ok=True)\n else:\n sample_name = ''\n file_name = sdrf_infos['file name']\n print('\\nProcessing {} ({}/{})'.format(sdrf_infos['name'],\n entry_idx + 1, len(entries)))\n # download .raw file\n with closing(request.urlopen(sdrf_infos['uri'])) as r:\n with open(f'{self._accession}{sample_name}/{file_name}', 'wb') as f:\n shutil.copyfileobj(r, f)\n # convert .raw to .mgf using ThermoRawFileParser.exe\n arguments = f'{self._thermorawfileparser_path} ' \\\n f'-i={self._accession}/{sample_name}/{file_name} ' \\\n f'-o={self._accession}/{sample_name} ' \\\n f'-f=0'\n subprocess.call(arguments, stdout=self._FNULL, stderr=self._FNULL, shell=False)\n # determine path to created .mgf file\n\n mgf_file_name = file_name.replace('raw', 'mgf')\n mgf_file_name = mgf_file_name.replace('RAW', 'mgf')\n\n mgf_file = '{}{}/{}'.format(self._accession,\n sample_name,\n mgf_file_name)\n # start search engine search\n self._search_engine.search(cwd=self._cwd,\n database=fasta_db,\n sdrf_entry=sdrf_infos,\n mgf_file=mgf_file)\n # perform FDR on results\n if self._use_search_engine_specific_fdr:\n self._search_engine.fdr()\n else:\n self.fdr()\n else:\n print('There is no associated FASTA database.')",
"def runBrowser(driver, url):\n\tdriver.get(url)\n\ttime.sleep(3) #REACT app need to sleep and wait app load.\n\tall_links=driver.execute_script('all_links = []; links = document.querySelectorAll(\".style-module--action--1Avvt>a\"); links.forEach(url => all_links.push(url.href)); return all_links');\n\tbar = IncrementalBar('📥 Icons Downloaded', max = len(all_links))\n\t\n\tfor i, link in enumerate(all_links):\n\t\tdriver.execute_script('''window.open(\"'''+link+'''\",\"_blank\");''')\n\t\tbar.next()\n\tprint('\\n')\n\tdriver.close()\n\tMessage.success('🎉 Download done!')",
"def create_gdb(out_gdb_path=\"./TravelerInfo.gdb\", access_code=None,\n templates_gdb=None, names=None, skip_data=False):\n\n # Create the file GDB if it does not already exist.\n arcpy.env.overwriteOutput = True\n if not arcpy.Exists(out_gdb_path):\n logging.debug(\"Creating GDB %s\", out_gdb_path)\n arcpy.management.CreateFileGDB(*os.path.split(out_gdb_path))\n else:\n logging.debug(\"%s already exists. Skipping creation.\", out_gdb_path)\n\n if not names:\n names = tuple(URLS.keys()) + (\"Scanweb\",)\n\n # Download each of the REST endpoints.\n for name in names:\n if name == \"Scanweb\":\n if skip_data:\n create_tables(out_gdb_path, template_gdb=templates_gdb)\n else:\n populate_feature_classes(out_gdb_path)\n else:\n print(\"Contacting %s...\" % URLS[name])\n # If user provided access code, use it.\n # Otherwise don't provide to function, which will use default from\n # environment or text file.`\n if skip_data:\n data = None\n else:\n if access_code:\n data = get_traveler_info(name, access_code)\n else:\n data = get_traveler_info(name)\n out_table = os.path.join(out_gdb_path, name)\n create_table(out_table, None, data, templates_gdb)",
"def main():\n args = docopt(__doc__)\n loc = \"%s/%s/dataset/%s\" % (args['--host'], args['--prefix'], args['DATASET'])\n qstr = \"staticThreshold=Infinity\"\n qstr += \"&nohitThreshold=Infinity\"\n qstr += \"&plotGraphics=svg\"\n if args['--format'] == 'svg':\n qstr += \"&svgThreshold=Infinity\"\n shape = 'square'\n for param in args['--param']:\n qstr += \"&%s\" % str(param)\n key, value = param.split('=')\n if key == 'plotShape':\n shape = value\n\n timeout = int(args['--timeout'])\n\n outdir = os.path.abspath(args['--out'])\n os.makedirs(Path(outdir), exist_ok=True)\n\n profile = webdriver.FirefoxProfile()\n profile.set_preference('browser.download.folderList', 2)\n profile.set_preference('browser.download.manager.showWhenStarting', False)\n profile.set_preference('browser.download.dir', outdir)\n profile.set_preference('browser.download.lastDir', args['--out'])\n profile.set_preference('browser.helperApps.neverAsk.saveToDisk',\n 'image/png, image/svg+xml, text/csv, text/plain, application/json')\n\n options = Options()\n options.set_headless(headless=False)\n display = Display(visible=0, size=(800, 600))\n display.start()\n driver = webdriver.Firefox(options=options, firefox_profile=profile)\n try:\n view = args['--view'][0]\n if args['--preview']:\n qstr += '#Filters'\n url = \"%s/%s?%s\" % (loc, view, qstr)\n print(\"Loading %s\" % url)\n try:\n driver.get(url)\n except Exception as err:\n print(err)\n\n for next_view in args['--view']:\n if next_view != view:\n view = next_view\n url = \"%s/%s?%s\" % (loc, view, qstr)\n print(\"Navigating to %s\" % url)\n try:\n driver.get(url)\n except Exception as err:\n print(err)\n for fmt in args['--format']:\n file = \"%s.%s\" % (args['DATASET'], view)\n if view == 'blob':\n file += \".%s\" % shape\n elif view == 'busco':\n view = \"all_%s\" % view\n if fmt not in ('csv', 'json'):\n fmt = 'json'\n file += \".%s\" % fmt\n print(\"Fetching %s\" % file)\n el_id = \"%s_save_%s\" % (view, fmt)\n print(\"waiting for element %s\" % el_id)\n unstable = True\n while unstable:\n try:\n element = WebDriverWait(driver, timeout).until(\n EC.visibility_of_element_located((By.ID, el_id))\n )\n element.click()\n unstable = False\n file_name = \"%s/%s\" % (outdir, file)\n print(\"waiting for file '%s'\" % file_name)\n file_ready(file_name)\n except Exception as err:\n time.sleep(1)\n\n for preview in args['--preview']:\n print(\"Creating %s preview\" % preview)\n for fmt in args['--format']:\n el_id = \"%s_preview_save_%s\" % (preview, fmt)\n file = \"%s.%s.preview.%s\" % (args['DATASET'], preview, fmt)\n try:\n element = WebDriverWait(driver, timeout).until(\n EC.visibility_of_element_located((By.ID, el_id))\n )\n element.click()\n file_name = \"%s/%s\" % (outdir, file)\n print(\"waiting for file '%s'\" % file_name)\n file_ready(file_name)\n except Exception as err:\n print(err)\n driver.close()\n except Exception as err:\n print(err)\n driver.close()",
"def main():\n\n in_file = ('/home/desi2/candidatesp9/asteroids_decals_dr2.fits')\n out_dir = os.path.join(os.environ.get('HOME'), 'asteroid_cutouts/')\n\n cand_info = fits_table(in_file)\n # Pre-select asteroids in the ra, dec box you know they exist.\n ramin = 107\n ramax = 130\n decmin = 16\n decmax = 30\n these = np.where((cand_info.ra0>ramin)*(cand_info.ra0<ramax)*\n (cand_info.dec0>decmin)*(cand_info.dec0<decmax))[0]\n #pdb.set_trace() # Runs Python Debugger on code up to this line. \n cand_info = cand_info[these]\n\n urls = []\n jpgfiles = []\n for ii in range(100):\n print('Working on candidate {}'.format(ii))\n ra = cand_info.ra0[ii]\n dec = cand_info.dec0[ii]\n \n jpgurl = 'http://legacysurvey.org/viewer/jpeg-cutout-decals-dr2?ra={:.6f}&dec={:.6f}&pixscale=0.262&size=200'.format(ra, dec)\n \n jpgfile = 'obj-{:03d}.jpg'.format(ii)\n jpgfile = os.path.join(out_dir, jpgfile)\n grab = 'wget --continue -O {:s} \"{:s}\"' .format(jpgfile, jpgurl)\n print(grab)\n os.system(grab)\n #pdb.set_trace() # Runs Python Debugger on code up to this line. \n if os.stat(jpgfile).st_size < 18000: # Remove partial or empty images\n # The cut on filesize takes care of most of the bad images but\n # leaves some behind. If the restriction is any larger,\n # it can remove some valid files.\n os.remove(jpgfile)\n else:\n print(jpgurl)\n jpgfiles.append(jpgfile)\n urls.append(jpgurl)\n # for HTML file. What should the URL be?\n #print('<html>')\n #print('<head> Planet Nine Candidates </head>')\n #print('<body>')\n #for thisurl, thisjpg in zip(urls, jpgfiles):\n # print('<div class=\"image\">')\n # print('<a href=\"{}\"><img src=\"{:s}\"></a>'.format(thisurl, thisjpg))\n # print('<div class=\"caption\"> Image of {:s} </div>' .format(thisjpg))\n # print('</div>')\n #print('</body></html>')",
"def scrape_BI(url):\n response = requests.get(url)\n soup = BeautifulSoup(response.text)\n companies = soup.find_all('h3', class_='slide-title')\n #names = []\n driver = init_driver()\n for company in companies[:]:\n name = company.getText().strip()\n # if \" \" in name:\n # name.replace(' ','+')\n html_code = load_google(driver, name)\n #name, address = scrape_google(html_code)\n url = scrape_google(html_code)\n print(name,url)\n #names.append(name)\n driver.quit()\n #print(names)",
"def getFutbinDataAndPopulateTable(driver, queue, futbin_url):\n browser = driver\n driver = driver\n\n tab_url = futbin_url\n\n browser.execute_script(\"window.open('');\")\n browser.switch_to.window(browser.window_handles[1])\n browser.get(tab_url)\n\n name = WebDriverWait(driver, 20).until(EC.visibility_of_element_located(\n (By.XPATH, \"/html/body/div[8]/div[15]/div/div/div[1]/div[2]/table/tbody/tr[2]/td\"))).text\n team = WebDriverWait(driver, 20).until(EC.visibility_of_element_located(\n (By.XPATH, \"/html/body/div[8]/div[15]/div/div/div[1]/div[2]/table/tbody/tr[3]/td/a\"))).text\n nation = WebDriverWait(driver, 20).until(EC.visibility_of_element_located(\n (By.XPATH, \"/html/body/div[8]/div[12]/div[3]/div[1]/div/ul/li[1]/a\"))).text\n cardtype = WebDriverWait(driver, 20).until(EC.visibility_of_element_located(\n (By.XPATH, \"/html/body/div[8]/div[15]/div/div/div[1]/div[2]/table/tbody/tr[12]/td\"))).text\n rating = WebDriverWait(driver, 20).until(EC.visibility_of_element_located(\n (By.XPATH, \"/html/body/div[8]/div[13]/div[2]/div/div/div[1]/div/a/div/div[2]\"))).text\n cardname = WebDriverWait(driver, 20).until(EC.visibility_of_element_located(\n (By.XPATH, \"/html/body/div[8]/div[13]/div[2]/div/div/div[1]/div/a/div/div[3]\"))).text\n position = WebDriverWait(driver, 20).until(EC.visibility_of_element_located(\n (By.XPATH, \"/html/body/div[8]/div[13]/div[2]/div/div/div[1]/div/a/div/div[4]\"))).text\n\n internals_location = driver.find_element(\n By.XPATH, \"/html/body/div[8]/div[5]/div\")\n internal_id = int(internals_location.get_attribute(\"data-baseid\"))\n futbin_id = internals_location.get_attribute(\"data-id\")\n\n # price, lastupdated = get_futbin_price_lastupdated(fifa_id)\n\n r = requests.get(\n 'https://www.futbin.com/22/playerPrices?player={0}'.format(internal_id))\n\n data = r.json()\n price = data[str(internal_id)][\"prices\"][\"xbox\"][\"LCPrice\"]\n lastupdated = data[str(internal_id)][\"prices\"][\"xbox\"][\"updated\"]\n\n # 18 mins ago\n # 48 mins ago\n # 1 hour ago\n # 2 hours ago\n if (lastupdated == \"Never\"):\n return 0, 100\n elif (\"mins ago\" in lastupdated):\n lastupdated = lastupdated[:-9]\n lastupdated = int(lastupdated)\n elif(\"hour ago\" in lastupdated):\n lastupdated = lastupdated[:-9]\n lastupdated = int(lastupdated) * 60\n elif(\"hours ago\" in lastupdated):\n lastupdated = lastupdated[:-10]\n lastupdated = int(lastupdated) * 60\n elif(\"seconds\" in lastupdated):\n lastupdated = 1\n elif(\"second\" in lastupdated):\n lastupdated = 1\n else:\n return 0, 100\n\n price = price.replace(\",\", \"\")\n price = int(price)\n\n # MINUTES\n lastupdated = int(lastupdated)\n futbin_id = int(futbin_id)\n market_price = 0\n buy_pct = .85\n agg = [name, cardname, rating, team, nation, cardtype, position,\n internal_id, futbin_id, price, lastupdated, market_price, buy_pct]\n\n full_entry = \"\"\n for word in agg:\n word = str(word)\n word_comma = word + \",\"\n full_entry += word_comma\n\n # Remove last comma\n full_entry = full_entry[:-1]\n print(full_entry)\n\n # Add new line to end\n hs = open(\"./data/player_list.txt\", \"a\", encoding=\"utf8\")\n hs.write(full_entry + \"\\n\")\n hs.close()\n\n log_event(queue, \"Added player \" + str(cardname))\n\n # ~ ~ ~ ~ ~ ~ ~ Close the futbin tab ~ ~ ~ ~ ~\n browser.close()\n\n # Switch back to the first tab with URL A\n browser.switch_to.window(browser.window_handles[0])\n # log_event(self.queue, \"Fetched player info\")",
"def customized_grammar_sentences(driver, file_name):\n file_path = get_file_path(file_name)\n data = []\n\n with open(file_path) as file:\n data = file.readlines()\n\n driver.find_element_by_xpath(\n addlink.format(\n model_path='customized_pondlet/customizedgrammarsentence')\n ).click()\n\n for row in data:\n items = row[:-1].split('\\t')\n\n input_value = {\n 'customized': items[0],\n 'grammarsentence': items[1],\n 'grammar': items[2],\n 'execflag_rearrangement': items[3],\n 'execflag_cloze': items[4],\n 'execexif_cloze_simp': items[5],\n 'execexif_cloze_trad': items[6],\n 'simp_close_candidates': items[7],\n 'trad_close_candidates': items[8],\n }\n\n wait_xpath(driver, addanother)\n\n if input_value['customized'] != \"\":\n select_customized = driver.find_element_by_id('id_customized')\n select_customized.find_element_by_xpath(\n f\"//option[contains(text(),'{input_value['customized']}')]\") \\\n .click()\n\n driver.find_element_by_id('id_grammarsentence_object_id') \\\n .send_keys(input_value['grammarsentence'])\n\n driver.find_element_by_id('id_grammar_object_id').send_keys(\n input_value['grammar'])\n\n if input_value['execflag_rearrangement'].lower() == 'true':\n driver.find_element_by_id('id_execflag_rearrangement').click()\n\n if input_value['execflag_cloze'].lower() == 'true':\n driver.find_element_by_id('id_execflag_cloze').click()\n\n driver.find_element_by_id('id_execexif_cloze_simp').send_keys(\n input_value['execexif_cloze_simp'])\n\n driver.find_element_by_id('id_execexif_cloze_trad').send_keys(\n input_value['execexif_cloze_trad'])\n\n driver.find_element_by_id('id_simp_close_candidates').send_keys(\n input_value['simp_close_candidates'])\n\n driver.find_element_by_id('id_trad_close_candidates').send_keys(\n input_value['trad_close_candidates'])\n\n driver.find_element_by_xpath(addanother).click()\n wait_xpath(driver, success)\n\n driver.find_element_by_id('site-name').click()\n wait_url(driver, dashboard)",
"def download_structure(inputpdbid):\n try:\n if len(inputpdbid) != 4 or extract_pdbid(inputpdbid.lower()) == 'UnknownProtein':\n logger.error(f'invalid PDB-ID (wrong format): {inputpdbid}')\n sys.exit(1)\n pdbfile, pdbid = fetch_pdb(inputpdbid.lower())\n pdbpath = tilde_expansion('%s/%s.pdb' % (config.BASEPATH.rstrip('/'), pdbid))\n create_folder_if_not_exists(config.BASEPATH)\n with open(pdbpath, 'w') as g:\n g.write(pdbfile)\n return pdbpath, pdbid\n except ValueError: # Invalid PDB ID, cannot fetch from RCBS server\n logger.error(f'PDB-ID does not exist: {inputpdbid}')\n sys.exit(1)",
"def process_webpage(self, target, output_file, url, embed, selenium):\n\t\t# Build the output file's name\n\t\tself._build_output_file(output_file)\n\t\t# Open the output file and clone the webpage\n\t\twith open(self.output_file_name, \"w\") as output:\n\t\t\tself.collect_source(target, output, url, embed, selenium)",
"def wf_dlvry_slot_finder_driver():\n driver = ChromeDriverSession.get_instance().web_driver\n\n sobj = SoupObj()\n\n def soup_refresh():\n time.sleep(Config.refresh_wait)\n driver.refresh()\n print(f\"refreshed {driver.current_url}\")\n html = driver.page_source\n sobj.soup = bs4.BeautifulSoup(html, features=\"html.parser\")\n\n soup_refresh()\n\n while not are_delivery_slots_available(sobj.soup):\n print(\"no delivery slots available\")\n soup_refresh()\n\n print(\"delivery slots available\")\n alert_util(\"Slots for delivery opened\")",
"def dftb_driver(relax_method, atoms_rel, thres_force, num_steps):\n dftb_driver= \"\"\"Driver = {{ relax_method }} {\n MovedAtoms = {{ atoms_rel }}\n MaxForceComponent = {{ thres_force }}\n MaxSteps = {{ num_steps }}\n }\n \"\"\"\n return Environment().from_string(dftb_driver).render(relax_method=relax_method,atoms_rel=atoms_rel,thres_force=thres_force,num_steps=num_steps)",
"def main():\n default_gdb_path = \"./TravelerInfo.gdb\"\n api_code_var_name = \"WSDOT_TRAFFIC_API_CODE\"\n api_code = os.environ.get(api_code_var_name)\n\n parser = argparse.ArgumentParser(\n description=\"Creates a file geodatabase using data from the WSDOT Traffic API.\")\n\n parser.add_argument(\"--gdb-path\", type=str, default=default_gdb_path,\n help='Path to where the GDB will be created. Defaults to \"%s\".' % default_gdb_path,\n nargs=\"?\")\n parser.add_argument(\n \"--templates-gdb\", help=\"Path to GDB with template feature classes. (Creating feature classes with templates is faster than using the Add Field tool.)\")\n p_help = \"WSDOT Traffic API code. Defaults to value of %s environment variable if available. If this environment variable does not exist, then this parameter is required.\" % api_code_var_name\n parser.add_argument(\"--code\", \"-c\", type=str,\n required=api_code is None, default=api_code,\n help=p_help)\n parser.add_argument(\"--schema-only\", action=\"store_true\", help=\"Using this flag will generate the tables but skips the data download and population steps.\")\n parser.add_argument(\"--log-level\", choices=(\n \"CRITICAL\",\n \"ERROR\",\n \"WARNING\",\n \"INFO\",\n \"DEBUG\",\n \"NOTSET\"\n ), default=logging.NOTSET)\n\n # default_names = [\n # \"CVRestrictions\",\n # \"HighwayAlerts\",\n # \"HighwayCameras\",\n # \"MountainPassConditions\",\n # \"TrafficFlow\",\n # \"WeatherInformation\",\n # \"TravelTimes\"\n # ]\n\n p_help = 'One or more of the following values: %s' % set(tuple(URLS.keys()) + (\"Scanweb\",))\n\n parser.add_argument(\"names\", type=str,\n nargs=argparse.REMAINDER, help=p_help)\n\n args = parser.parse_args()\n log_level = args.log_level\n if log_level:\n log_level = getattr(logging, args.log_level.upper())\n logging.basicConfig(level=log_level)\n\n names = None\n if args.names:\n names = args.names\n\n templates_gdb = args.templates_gdb\n create_gdb(args.gdb_path, args.code, templates_gdb, names, args.schema_only)",
"def pdb_downloader_and_metadata(self, outdir=None, pdb_file_type=None, force_rerun=False):\n\n if not pdb_file_type:\n pdb_file_type = self.pdb_file_type\n\n counter = 0\n for g in tqdm(self.genes):\n pdbs = g.protein.pdb_downloader_and_metadata(outdir=outdir, pdb_file_type=pdb_file_type, force_rerun=force_rerun)\n\n if pdbs:\n counter += len(pdbs)\n\n log.info('Updated PDB metadata dataframe. See the \"df_pdb_metadata\" attribute for a summary dataframe.')\n log.info('Saved {} structures total'.format(counter))"
] | [
"0.59274244",
"0.56355226",
"0.536841",
"0.53651416",
"0.53063196",
"0.5264013",
"0.51971424",
"0.5158493",
"0.515513",
"0.5132952",
"0.50971687",
"0.50108147",
"0.49585226",
"0.49400154",
"0.49312222",
"0.49199107",
"0.4919623",
"0.48803052",
"0.48767388",
"0.48723873",
"0.48598686",
"0.48259178",
"0.4817925",
"0.4777248",
"0.47691262",
"0.476661",
"0.47465992",
"0.47437832",
"0.47382608",
"0.47377485"
] | 0.67259806 | 0 |
Generic function to take write .rst files and convert to pdf/html. Accepts a report template and dictionary. Writes rst once with full paths for image files and generates a pdf, then strips leading path components and writes again, generating an html file that exepects to live in the same directory as report images. | def write_workflow_report(workflow_name, report_template, report_dict):
from os.path import exists, basename
from subprocess import check_output
# Plug the values into the template for the pdf file
report_rst_text = report_template % report_dict
# Write the rst file and convert to pdf
report_pdf_rst_file = "%s_pdf.rst" % workflow_name
report_pdf_file = op.abspath("%s_report.pdf" % workflow_name)
open(report_pdf_rst_file, "w").write(report_rst_text)
check_output(["rst2pdf", report_pdf_rst_file, "-o", report_pdf_file])
if not exists(report_pdf_file):
raise RuntimeError
# For images going into the html report, we want the path to be relative
# (We expect to read the html page from within the datasink directory
# containing the images. So iteratate through and chop off leading path.
for k, v in report_dict.items():
if isinstance(v, str) and v.endswith(".png"):
report_dict[k] = basename(v)
# Write the another rst file and convert it to html
report_html_rst_file = "%s_html.rst" % workflow_name
report_html_file = op.abspath("%s_report.html" % workflow_name)
report_rst_text = report_template % report_dict
open(report_html_rst_file, "w").write(report_rst_text)
check_output(["rst2html.py", report_html_rst_file, report_html_file])
if not exists(report_html_file):
raise RuntimeError
# Return both report files as a list
return [report_pdf_file, report_html_file] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_report(template_filename, report_title, report_dir):\n\n def inner(output_dir: Optional[str] = None):\n output_dir = output_dir or report_dir\n with open(template_filename) as fd:\n template = jinja2.Template(fd.read())\n\n template.globals.update(\n {\"date\": str(datetime.datetime.now()), \"lettergen\": lettergen, \"zip\": zip}\n )\n\n headers = iterfiles(output_dir, \"head.\")\n results = iterfiles(output_dir, \"result.\")\n stream = template.stream(headers=headers, results=results, project=report_title)\n artifact = os.path.join(output_dir, \"index.html\")\n stream.dump(artifact)\n logging.info(f\"Created report: {artifact}\")\n\n return inner",
"def task_render_report():\n target = 'report.pdf'\n dep = 'report.md'\n return {\n 'file_dep': [dep],\n 'targets': [target],\n 'actions': [\n f\"pandoc -t latex -o {target} {dep}\"\n ],\n 'clean': True\n }",
"def generate_document(stats: dict, semester: str):\n filename = 'report_' + str(date.today()) + '.html'\n with open('raw_html.html', 'r') as f:\n string = f.read()\n string = string.format(semester,\n stats['faculty_with_usage'],\n stats['full_time'],\n stats['total_full_time'],\n round((stats['full_time'] / stats['total_full_time']) * 100, 1),\n stats['part_time'],\n stats['total_part_time'],\n round((stats['part_time'] / stats['total_part_time']) * 100, 1),\n stats['staff'],\n stats['courses_with_usage'],\n stats['total_courses'],\n round((stats['courses_with_usage'] / stats['total_courses']) * 100, 1),\n stats['specifics']['assignments'],\n stats['specifics']['grade'],\n stats['specifics']['graded'],\n stats['specifics']['discussion'])\n with open(filename, 'w') as f:\n f.write(string)\n pdf = weasyprint.HTML(filename).write_pdf()\n open(\"report_\" + str(date.today()) + \".pdf\", 'wb').write(pdf)",
"def make_report(template_path, report_path, function, *args, **kwargs):\n # Create the report content.\n with open(template_path) as f:\n content = function(*args, f=f, **kwargs)\n\n # Write to the target directory.\n with open(report_path, \"w+\") as f:\n f.write(content)\n\n return report_path",
"def build(self) -> None:\n\n print(\"Genereting files..\")\n self.doc = self.doc + r'\\end{document}'\n\n f = open(\"latex\\\\\" + self.report_name + '.tex', 'w')\n f.write(self.doc)\n f.close()\n\n os.chdir('latex')\n\n cmd = ['pdflatex', '-interaction', 'nonstopmode', self.report_name + '.tex']\n #cmd = ['pdflatex', '-interaction', self.report_name + '.tex']\n\n for i in range(2):\n proc = subprocess.Popen(cmd)\n proc.communicate()\n retcode = proc.returncode\n if not retcode == 0:\n os.chdir('..')\n raise ValueError('Error {} executing command: {}'.format(retcode, ' '.join(cmd)))\n\n os.unlink(self.report_name + '.aux')\n os.unlink(self.report_name + '.lof')\n os.unlink(self.report_name + '.log')\n os.unlink(self.report_name + '.lot')\n os.unlink(self.report_name + '.out')\n os.unlink(self.report_name + '.toc')\n\n os.chdir('..')",
"def make_html(depends=(files['image.gif'],),\n targets=(files['index.html'],)):\n\n index_html = open(files['index.html'].rel, 'w')\n index_html.write(pyyaks.context.render(html_template))\n index_html.close()",
"def gen_html_report(summary, report_template=None, report_dir=None, report_file=None):\n if not summary[\"time\"] or summary[\"stat\"][\"testcases\"][\"total\"] == 0:\n logger.error(f\"test result summary is empty ! {summary}\")\n raise SummaryEmpty\n\n if not report_template:\n report_template = os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n \"template.html\"\n )\n logger.debug(\"No html report template specified, use default.\")\n else:\n logger.info(f\"render with html report template: {report_template}\")\n\n logger.info(\"Start to render Html report ...\")\n\n start_at_timestamp = summary[\"time\"][\"start_at\"]\n utc_time_iso_8601_str = datetime.utcfromtimestamp(start_at_timestamp).isoformat()\n summary[\"time\"][\"start_datetime\"] = utc_time_iso_8601_str\n\n if report_file:\n report_dir = os.path.dirname(report_file)\n report_file_name = os.path.basename(report_file)\n else:\n report_dir = report_dir or os.path.join(os.getcwd(), \"reports\")\n # fix #826: Windows does not support file name include \":\"\n report_file_name = \"{}.html\".format(utc_time_iso_8601_str.replace(\":\", \"\").replace(\"-\", \"\"))\n\n if not os.path.isdir(report_dir):\n os.makedirs(report_dir)\n\n report_path = os.path.join(report_dir, report_file_name)\n with io.open(report_template, \"r\", encoding='utf-8') as fp_r:\n template_content = fp_r.read()\n with io.open(report_path, 'w', encoding='utf-8') as fp_w:\n rendered_content = Template(\n template_content,\n extensions=[\"jinja2.ext.loopcontrols\"]\n ).render(summary)\n fp_w.write(rendered_content)\n\n logger.info(f\"Generated Html report: {report_path}\")\n\n return report_path",
"def _generate_report(self):\n\n _LOG.info(\"Generating the HTML report.\")\n\n # Make sure the output directory exists.\n try:\n self.outdir.mkdir(parents=True, exist_ok=True)\n except OSError as err:\n raise Error(f\"failed to create directory '{self.outdir}': {err}\")\n\n raw_stats_paths, descr_paths = self._copy_raw_data()\n\n # Find the styles and templates paths.\n templdir = FSHelpers.search_for_app_data(\"wult\", Path(\"templates\"),\n pathdescr=\"HTML report Jinja2 templates\")\n csspath = FSHelpers.search_for_app_data(\"wult\", Path(\"css/style.css\"),\n pathdescr=\"HTML report CSS file\")\n\n # Copy the styles file to the output directory.\n dstpath = self.outdir.joinpath(\"style.css\")\n try:\n shutil.copyfile(csspath, dstpath)\n except OSError as err:\n raise Error(f\"failed to copy CSS file from '{csspath}' to '{dstpath}':\\n{err}\")\n\n # The summary table is only included into the main HTML page.\n sum_tbl = self._prepare_summary_table(raw_stats_paths, descr_paths)\n links_tbl = self._prepare_links_table()\n\n # Each column name gets its own HTML page.\n for colname, pinfos in self._pinfos.items():\n stats_tbl = self._prepare_stats_table(pinfos)\n\n # Render the template.\n jenv = Jinja2.build_jenv(templdir, trim_blocks=True, lstrip_blocks=True)\n jenv.globals[\"stats_tbl\"] = stats_tbl\n jenv.globals[\"pinfos\"] = pinfos\n jenv.globals[\"colname\"] = colname\n jenv.globals[\"title_descr\"] = self.title_descr\n jenv.globals[\"toolname\"] = self._refinfo[\"toolname\"]\n\n if sum_tbl:\n jenv.globals[\"sum_tbl\"] = sum_tbl\n jenv.globals[\"links_tbl\"] = links_tbl\n templfile = outfile = \"index.html\"\n sum_tbl = None\n else:\n templfile = \"metric.html\"\n outfile = links_tbl[colname][\"fname\"]\n\n Jinja2.render_template(jenv, Path(templfile), outfile=self.outdir.joinpath(outfile))",
"def run(self):\n make_sure_path_exists(OUT_FOLDER)\n\n if self.config['type'] == 'website':\n make_sure_path_exists(self.config['out_folder'])\n\n\n for file in self.config['bodies']:\n if file['type'] == 'content':\n self.pandoc_file(file)\n if self.config['type'] == 'website':\n shutil.copyfile(file['generated'], os.path.join(self.config['out_folder'], os.path.basename(file['source'])))\n\n if self.config['type'] == 'website':\n return\n\n for file in self.config['abstract']:\n self.pandoc_file(file)\n for file in self.config['summary']:\n self.pandoc_file(file)\n\n template = LATEX_JINJA_ENV.get_template(self.config['template_file'])\n\n logging.info('Rendering template')\n out = template.render(**self.config)\n with open(self.config['name'] + self.get_file_extension(), 'w') as file:\n file.write(out)\n\n if not self.args.pandoc:\n logging.info('Rendering latex')\n self.write()\n if not self.args.fast:\n logging.info('Rendering latex, again')\n self.write() # twice for the toc\n\n logging.info('Done!')",
"def generate_output_files(rendered_report: Text,\n report_settings: Dict) -> None:\n\n css_path = os.path.join(report_settings[\"css_directory\"],\n report_settings[\"css_filename\"])\n output_path = os.path.join(report_settings[\"output_directory\"],\n report_settings[\"output_filename\"])\n\n # Create the output directory if it does not exist\n if not os.path.isdir(report_settings[\"output_directory\"]):\n os.mkdir(report_settings[\"output_directory\"])\n\n # Write out the generated report\n with open(output_path, \"w\") as output_file:\n if output_file.writable():\n output_file.write(rendered_report)\n else:\n print(\"Error: {} is not writable\".format(output_path))\n\n # Copy CSS file into output directory\n shutil.copy2(css_path, report_settings[\"output_directory\"])\n\n return",
"def print_html_report(report, title, img_name):\n import jinja2\n\n template_loader = jinja2.FileSystemLoader(searchpath=\"./\")\n template_env = jinja2.Environment(loader=template_loader)\n template_file = \"template.html\"\n template = template_env.get_template(template_file)\n heads = [\"Name\", \"Version\", \"Size\"]\n heads_comp = [\"Name\", \"Size\", \"Component\"]\n output_text = template.render(pips=report[\"pips\"],\n rpms=report[\"rpms\"],\n apts=report[\"apts\"],\n files_list=report[\"files\"],\n bundles=report[\"bundles\"],\n heads=heads,\n heads_comp=heads_comp,\n img_name=img_name,\n title=title)\n report_title = 'report_%s.html' % (title)\n html_file = open(report_title, 'w')\n html_file.write(output_text)\n html_file.close()",
"def convert_to_latex(self, builder, filename, latex_metadata):\n relative_path = ''\n tex_data = ''\n tex_build_path = self.texdir + relative_path\n pdf_build_path = self.pdfdir + relative_path\n template_folder = builder.config['jupyter_template_path']\n\n\n ensuredir(tex_build_path)\n ensuredir(pdf_build_path)\n\n ## setting the working directory\n os.chdir(self.texdir)\n\n ## copies all theme folder images to static folder\n if os.path.exists(builder.confdir + \"/theme/static/img\"):\n copy_tree(builder.confdir + \"/theme/static/img\", self.texdir + \"/_static/img/\", preserve_symlinks=1)\n else:\n self.logger.warning(\"Image folder not present inside the theme folder\")\n\n fl_ipynb = self.texdir + \"/\" + \"{}.ipynb\".format(filename)\n fl_tex = self.texdir + \"/\" + \"{}.tex\".format(filename)\n fl_tex_template = builder.confdir + \"/\" + template_folder + \"/\" + builder.config['jupyter_latex_template']\n\n ## do not convert excluded patterns to latex\n excluded_files = [x in filename for x in builder.config['jupyter_pdf_excludepatterns']]\n\n if not True in excluded_files: \n ## --output-dir - forms a directory in the same path as fl_ipynb - need a way to specify properly?\n ### converting to pdf using xelatex subprocess\n if sys.version_info[0] < 3:\n subprocess.call([\"jupyter\", \"nbconvert\",\"--to\",\"latex\",\"--template\",fl_tex_template,\"from\", fl_ipynb])\n else:\n subprocess.run([\"jupyter\", \"nbconvert\",\"--to\",\"latex\",\"--template\",fl_tex_template,\"from\", fl_ipynb])\n\n ### check if subdirectory\n subdirectory = \"\"\n index = filename.rfind('/')\n if index > 0:\n subdirectory = filename[0:index]\n filename = filename[index + 1:]\n\n ### set working directory for xelatex processing\n os.chdir(self.texdir + \"/\" + subdirectory)\n\n try:\n self.subprocess_xelatex(fl_tex, filename)\n if 'bib_include' in latex_metadata:\n self.subprocess_bibtex(filename)\n self.subprocess_xelatex(fl_tex, filename)\n self.subprocess_xelatex(fl_tex, filename)\n except OSError as e:\n print(e)\n except AssertionError as e:\n pass\n # exit() - to be used when we want the execution to stop on error",
"def export(bill, template_dir=None, pdf_dir=None):\n # if template_dir not provided,\n # look for the template directory of this script's location\n if not template_dir:\n template_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'template')\n # If the user-defined or default template directories don't exist, raise an error\n if not os.path.exists(template_dir):\n raise OSError('Could not find the template directory')\n\n # If no user-defined pdf output directory, put it in a folder where this script lives\n if not pdf_dir:\n basedir = os.path.dirname(os.path.abspath(__file__))\n pdf_dir = os.path.join(basedir, 'pdfs')\n # if the default pdf output directory doesn't exist, make it\n if not os.path.exists(pdf_dir):\n os.makedirs(pdf_dir)\n\n # if the user-defined pdf_dir does not exist, raise an error\n if not os.path.exists(pdf_dir):\n raise IOError('Could not find a directory to output pdfs')\n\n # get the path to the template\n template_path = os.path.join(template_dir, 'templates', 'template.html')\n # read the template\n template = open(template_path).read()\n\n # Replace relative imports of images and CSS with the full path to the files\n # Note: I'm including the '/' in the replacement so that\n # it doesn't replace other uses for '..' such as in regular text (i.e. an ellipsis)\n template = template.replace('../', os.path.join(path2url(template_dir), ''))\n\n # Insert billing data using find/replace\n # Sort by field length longest to shortest\n # This prevents values from fields that are substrings of other fields from going in the wrong place\n # e.g. the value of \"rebate\" would be inserted into the field \"rebate_closing_balance\"\n for key, value in sorted(bill.items(), key=lambda t: len(t[0]), reverse=True):\n template = template.replace(\"__\"+key, format_value(value))\n\n # Now create the pdf\n try:\n # options = {'encoding': 'utf-8'}\n report_name = make_report_name(bill)\n output_file = os.path.join(pdf_dir, report_name)\n pdfkit.from_string(template, output_file)\n except:\n typ, value, tb = sys.exc_info()\n traceback.print_exc()\n pdb.post_mortem(tb)",
"def create_pdf(submission):\n # Get questions from sections\n fields = {}\n for section in submission.questions:\n for form in section[\"forms\"]:\n for field in form[\"fields\"]:\n fs = field.get(\"fields\", [field])\n for f in fs:\n fields[f[\"name\"]] = f\n\n # Pull out image and answers\n images = []\n docs = []\n answers = []\n for answer in submission.answers:\n answer, name = answer.get(\"answer\", \"\"), answer.get(\"name\", \"\")\n field = fields[name]\n if field[\"type\"] == \"FILE\":\n image_ids = []\n doc_ids = []\n for file in answer:\n if \"image\" in file:\n image_ids.append(file[\"id\"])\n elif \"file\" in file:\n doc_ids.append(file[\"id\"])\n\n if image_ids:\n images += [\n image_upload.image\n for image_upload in ImageUpload.objects.filter(\n pk__in=image_ids\n ).all()\n ]\n if doc_ids:\n docs += [\n file_upload.file\n for file_upload in FileUpload.objects.filter(pk__in=doc_ids).all()\n ]\n else:\n answers.append(\n {\n \"name\": name.lower().replace(\"_\", \" \").capitalize(),\n \"prompt\": field.get(\"prompt\", \"\"),\n \"answers\": answer if type(answer) is list else [answer],\n }\n )\n\n context = {\n \"submission\": submission,\n \"answers\": answers,\n \"images\": images,\n \"docs\": docs,\n }\n pdf_html_str = render_to_string(\"client-intake.html\", context=context)\n pdf_bytes = weasyprint.HTML(string=pdf_html_str).write_pdf()\n return pdf_bytes",
"def RenderAnnotation(self, annotation_data, output_path):\n \n output_path = self._CreateUniqueDirectory(output_path)\n os.chdir(output_path)\n \n with open(\"report.html\", \"w\") as report_file:\n report_file.write(\n self.report_template.render(annotation_data=annotation_data))\n \n os.mkdir(\"src\")\n \n for annotated_file in annotation_data:\n with open(\"src/%s.html\" % annotated_file.source_stats.file_name.replace(\"/\", \"_\"), \"w\") as source_file:\n source_file.write(self.source_template.render(source_stats=annotated_file.source_stats, lines=annotated_file.lines))\n \n for static in [\"cloud9/css/source.css\", \"cloud9/css/report.css\"]:\n shutil.copy(os.path.join(self.base_dir, static), \".\")",
"def generate_pages(self, writer):\r\n write = partial(writer.write_file,\r\n relative_urls=self.settings['RELATIVE_URLS'])\r\n\r\n # to minimize the number of relative path stuff modification\r\n # in writer, articles pass first\r\n self.generate_articles(write)\r\n self.generate_period_archives(write)\r\n self.generate_direct_templates(write)\r\n\r\n # and subfolders after that\r\n self.generate_tags(write)\r\n self.generate_categories(write)\r\n self.generate_authors(write)\r\n self.generate_drafts(write)",
"def pdf_gen(report, summary=None):\n with open(\"report_content.yaml\", \"r\") as stream:\n docs = yaml.safe_load(stream)\n\n style = g_stylesheet.get(\"styles\")\n elems = [] # elements array used to build pdf structure\n pdf = SimpleDocTemplate(\n f\"{report.replay_id}_report.pdf\",\n pagesize=letter,\n leftMargin=0.75 * inch,\n rightMargin=0.75 * inch,\n topMargin=0.75 * inch,\n bottomMargin=0.75 * inch,\n )\n\n # title and subtitle and cluster info table\n elems.append(Paragraph(docs[\"title\"], style[\"Title\"]))\n elems.append(\n Paragraph(sub_yaml_vars(report, docs[\"subtitle\"]), style[\"Heading4\"])\n )\n cluster_info = pd.DataFrame.from_dict(report.cluster_details, orient=\"index\")\n elems.append(\n Table(\n df_to_np(report.cluster_details.keys(), cluster_info.transpose()),\n hAlign=\"LEFT\",\n style=g_stylesheet.get(\"table_style\"),\n )\n )\n # replay summary\n if summary is not None:\n elems.append(Paragraph(f\"Replay Summary\", style[\"Heading4\"]))\n elems.append(\n ListFlowable(\n [ListItem(Paragraph(x, style[\"Normal\"])) for x in summary],\n bulletType=\"bullet\",\n )\n )\n elems.append(Spacer(0, 5))\n\n elems.append(Paragraph(docs[\"report_paragraph\"], style[\"Normal\"]))\n\n # glossary section\n elems.append(Paragraph(docs[\"glossary_header\"], style[\"Heading4\"]))\n elems.append(Paragraph(docs[\"glossary_paragraph\"], style[\"Normal\"]))\n elems.append(\n ListFlowable(\n [ListItem(Paragraph(x, style[\"Normal\"])) for x in docs[\"glossary\"]],\n bulletType=\"bullet\",\n )\n )\n elems.append(Spacer(0, 5))\n\n # access data section\n elems.append(Paragraph(docs[\"data_header\"], style[\"Heading4\"]))\n elems.append(\n Paragraph(sub_yaml_vars(report, docs[\"data_paragraph\"]), style[\"Normal\"])\n )\n elems.append(\n ListFlowable(\n [ListItem(Paragraph(x, style[\"Normal\"])) for x in docs[\"raw_data\"]],\n bulletType=\"bullet\",\n )\n )\n elems.append(Spacer(0, 5))\n elems.append(\n Paragraph(\n sub_yaml_vars(report, docs[\"agg_data_paragraph\"]), style[\"Normal\"]\n )\n )\n\n # notes section\n elems.append(Paragraph(docs[\"notes_header\"], style[\"Heading4\"]))\n elems.append(Paragraph(docs[\"notes_paragraph\"], style[\"Normal\"]))\n elems.append(\n ListFlowable(\n [ListItem(Paragraph(x, style[\"Normal\"])) for x in docs[\"notes\"]],\n bulletType=\"bullet\",\n )\n )\n\n elems.append(PageBreak()) # page 2: cluster details\n\n # query breakdown\n build_pdf_tables(elems, docs[\"query_breakdown\"], report)\n elems.append(Spacer(0, 5))\n\n # histogram and description\n image_path = hist_gen(\n x_data=report.feature_graph[\"sec_start\"],\n y_data=report.feature_graph[\"count\"],\n title=docs[\"graph\"].get(\"title\"),\n x_label=\"Average Elapsed Time (s)\",\n )\n\n desc = Paragraph(docs[\"graph\"].get(\"paragraph\"), style[\"Normal\"])\n data = [[Image(image_path, width=300, height=200, hAlign=\"LEFT\"), desc]]\n elems.append(\n Table(data, style=TableStyle([(\"VALIGN\", (0, 0), (-1, -1), \"MIDDLE\")]))\n )\n elems.append(Spacer(0, 5))\n\n # cluster metrics table\n build_pdf_tables(elems, docs[\"cluster_metrics\"], report)\n\n elems.append(PageBreak()) # page 3+ measure tables\n\n build_pdf_tables(\n elems, docs[\"measure_tables\"], report\n ) # build 5 measure tables all at once\n\n # build pdf\n pdf.build(\n elems,\n onFirstPage=partial(first_page, report=report),\n onLaterPages=partial(later_pages, report=report),\n )\n os.remove(image_path)\n\n return pdf.filename",
"def _do_generate(*, build, out_dir, on_error):\n if out_dir == \"<test>\":\n out_dir = join(os.environ[\"TEST_TMPDIR\"], \"_builder_out\")\n if not os.path.isabs(out_dir):\n on_error(f\"--out_dir={out_dir} is not an absolute path\")\n if os.path.exists(out_dir):\n if len(os.listdir(out_dir)) > 0:\n on_error(f\"--out_dir={out_dir} is not empty\")\n else:\n if verbose():\n print(f\"+ mkdir -p {out_dir}\", flush=True)\n os.makedirs(out_dir)\n print(\"Generating HTML ...\")\n pages = _call_build(build=build, out_dir=out_dir)\n assert len(pages) > 0\n # Disallow symlinks in the output dir.\n for root, dirs, _ in os.walk(out_dir):\n for one_dir in dirs:\n for entry in os.scandir(f\"{root}/{one_dir}\"):\n assert not entry.is_symlink(), entry.path\n print(\"... done\")",
"def main(\n files: List[Path] = typer.Argument(default=None, dir_okay=False, exists=True),\n template: Optional[str] = typer.Option(\n None, '--template', help='Name of template file'\n ),\n logo: Optional[str] = typer.Option(None, '--logo', help='Name of logo file'),\n logo_width: Optional[str] = typer.Option(\n None, '--logo-width', help='Logo width (default 35mm)'\n ),\n highlight_style: Optional[str] = typer.Option(None, '--highlight-style',\n help='Specify coloring style to be used in highlighting source code'),\n syntax_definition: Optional[str] = typer.Option(None, '--syntax-definition',\n help='Specify a directory which contains syntax definition files'),\n no_toc: bool = typer.Option(\n False, '--no-toc', help='table of contents in PDF document'\n ),\n no_number_sections: bool = typer.Option(False, '--no-number-sections', help='no section numbering'),\n\n no_titlepage: bool = typer.Option(False, '--no-titlepage', help='title in PDF document'),\n tex_file: bool = typer.Option(\n False, '--tex', help='create TeX file instead of PDF document'\n ),\n email: Optional[str] = typer.Option(None, '--email', help='Author email'),\n company: Optional[str] = typer.Option(None, '--company', help='Name of company'),\n department: Optional[str] = typer.Option(\n None, '--department', help='Name of department'\n ),\n confidential: bool = typer.Option(\n False, '--confidential', help='indicate confidential'\n ),\n debug: bool = typer.Option(False, '--debug', help='turns debugging on'),\n pdf_engine: str = typer.Option(\n 'xelatex',\n '--pdf-engine',\n help='Specify pdf engine, one of lualatex, xelatex or tectonic ',\n ),\n _version: bool = typer.Option(\n None, '-V', '--version', callback=version_callback, help='Show version and exit'\n ),\n):\n\n if not files:\n typer.echo('Error: Must specify at least one .md file.')\n raise typer.Abort()\n\n mdfiles: List[str] = [str(md) for md in files]\n\n template = template or os.environ.get('MD2PDF_TEMPLATE')\n if template is None:\n print('No template specified')\n sys.exit(1)\n\n email = email or os.environ.get('MD2PDF_AUTHOR_EMAIL')\n footer_center = ''\n\n # command line overwrites `MD2PDF_PDF_ENGINE`. if both are not given\n # then `xelatex` is the default\n pdf_engine = pdf_engine or os.environ.get('MD2PDF_PDF_ENGINE') or 'xelatex'\n # check that pdf-engine is one of the following\n if pdf_engine not in ['xelatex', 'lualatex', 'tectonic']:\n print('--pdf-engine must be one of \"xelatex\", \"lualatex\", \"tectonic\"')\n sys.exit(1)\n\n ext = '.pdf'\n if tex_file:\n ext = '.tex'\n\n if len(mdfiles) == 1:\n toml_file = os.path.splitext(mdfiles[0])[0] + '.toml'\n\n if os.path.exists(toml_file):\n print(f'TOML file {toml_file} found')\n parsed_toml = toml.load(toml_file)\n default_val = parsed_toml.get('default')\n if default_val is None:\n print(f'No file names found in {toml_file}')\n else:\n mdfiles = default_val.get('files')\n\n for mdf in mdfiles:\n print(f'Compiling {mdf}')\n\n main_mdfile = os.path.realpath(mdfiles[0])\n\n outfile = Path(main_mdfile).stem + ext\n\n year = date.today().year\n\n company = company or os.environ.get('MD2PDF_COMPANY')\n department = department or os.environ.get('MD2PDF_DEPARTMENT')\n\n if company:\n if confidential:\n footer_center = f'© Copyright {year} {company}'\n else:\n footer_center = f'{year} {company}'\n\n pdcmd = PandocCmd(outfile)\n pdcmd.append(f'--template={template}')\n pdcmd.append(f'--pdf-engine={pdf_engine}')\n\n pdcmd.set_v('footer-center', footer_center)\n pdcmd.set_v('company', company)\n pdcmd.set_v('department', department)\n\n syntax_definition = syntax_definition or os.environ.get('MD2PDF_SYNTAX_DEFINITION_DIR')\n if syntax_definition is not None:\n add_syntax_definition(pdcmd, syntax_definition)\n\n pdcmd.append('--highlight-style')\n highlight_style = highlight_style or os.environ.get('MD2PDF_HIGHLIGHT_STYLE')\n if highlight_style is None:\n pdcmd.append('pygments')\n else:\n check_highlight_style(highlight_style)\n pdcmd.append(highlight_style)\n\n if not no_number_sections:\n pdcmd.append('--number-sections')\n\n if no_titlepage:\n pdcmd.set_m('titlepage', 'false')\n\n logo = logo or os.environ.get('MD2PDF_LOGO')\n pdcmd.set_v('logo', logo)\n\n logo_width = logo_width or os.environ.get('MD2PDF_LOGO_WIDTH')\n pdcmd.set_v('logo-width', logo_width)\n\n pdcmd.set_m('email', email)\n\n if not no_toc:\n pdcmd.append('--toc')\n\n pdcmd.extend(mdfiles)\n\n if debug:\n print(' '.join(pdcmd.pandoc))\n\n\n pdcmd.run()",
"def _report_template():\n current_dir = Path(__file__).parent\n\n with open(current_dir / \"report_template.html\", \"r\") as f:\n template = f.read()\n template = re.sub(r\"\\s{2,}\", \" \", template)\n template = re.sub(r\"\\n\", \"\", template)\n template = re.sub(r\"> <\", \"><\", template)\n return template",
"def createPDFDoc(self, filepath):\n print(\"Starting pdf creation\")\n strMD=\"\"\n for fileMD,data in self.graph.nodes(data=True):\n if not os.path.isfile(fileMD):\n sys.exit(\"Error: \" + fileMD + \" does not exist\")\n if not fileMD.endswith(\"md\" or \"markdown\"):\n sys.exit(fileMD + \" is not a markdown file\");\n print(\"Found file: \" + fileMD)\n strMD = strMD + \" \" + fileMD\n cmd = \"pandoc --latex-engine=xelatex -s -o \" + filepath + strMD\t\n print(\"Starting file conversion.\")\n if subprocess.call(cmd) != 0:\n print(\"Conversion failed\")\n else:\n print(\"Saving pdf file to: \" + filepath)\n print(\"Conversion successfull\")",
"def make_pdf(self, htmlbody, html_only=False):\n # wrap htmlbody with provided HTML template\n template = self.context.auto_template\n template = template.replace(u'${body}', htmlbody)\n if html_only:\n return template\n try:\n tempdir = tempfile.mkdtemp()\n # attachemnts saved. Let's save generated HTML\n fullpath = os.path.join(tempdir, 'issue.html')\n fp = open(fullpath, 'w')\n fp.write(template.encode('utf-8'))\n fp.close()\n # Run wkhtmltopdf and generate the PDF\n targetpath = os.path.join(tempdir, 'issue.pdf')\n result = subprocess.call([\"wkhtmltopdf\", '-q', 'file://%s' % fullpath, '%s' % targetpath])\n if result == 0:\n return open(targetpath, 'rb').read()\n else:\n return ''\n finally:\n shutil.rmtree(tempdir, ignore_errors=True)",
"def generate_pdf_report(release, spec, report_week):\n\n logging.info(u\" Generating the pdf report, give me a few minutes, please \"\n u\"...\")\n\n working_dir = spec.environment[u\"paths\"][u\"DIR[WORKING,SRC]\"]\n\n execute_command(f\"cd {working_dir} && mv -f index.pdf.template index.rst\")\n\n _convert_all_svg_to_pdf(spec.environment[u\"paths\"][u\"DIR[WORKING,SRC]\"])\n\n # Convert PyPLOT graphs in HTML format to PDF.\n convert_plots = u\"xvfb-run -a wkhtmltopdf {html} {pdf}\"\n plots = get_files(spec.environment[u\"paths\"][u\"DIR[STATIC,VPP]\"], u\"html\")\n plots.extend(\n get_files(spec.environment[u\"paths\"][u\"DIR[STATIC,DPDK]\"], u\"html\")\n )\n for plot in plots:\n file_name = f\"{plot.rsplit(u'.', 1)[0]}.pdf\"\n logging.info(f\"Converting {plot} to {file_name}\")\n execute_command(convert_plots.format(html=plot, pdf=file_name))\n\n # Generate the LaTeX documentation\n build_dir = spec.environment[u\"paths\"][u\"DIR[BUILD,LATEX]\"]\n cmd = PDF_BUILDER.format(\n release=release,\n date=datetime.datetime.utcnow().strftime(u'%Y-%m-%d %H:%M UTC'),\n working_dir=working_dir,\n build_dir=build_dir)\n execute_command(cmd)\n\n # Build pdf documentation\n archive_dir = spec.environment[u\"paths\"][u\"DIR[STATIC,ARCH]\"]\n cmds = [\n f'cd {build_dir} && '\n f'pdflatex -shell-escape -interaction nonstopmode csit.tex || true',\n f'cd {build_dir} && '\n f'pdflatex -interaction nonstopmode csit.tex || true',\n f'cd {build_dir} && '\n f'cp csit.pdf ../{archive_dir}/csit_{release}.{report_week}.pdf &&'\n f'cp csit.pdf ../{archive_dir}/csit_{release}.pdf'\n ]\n\n for cmd in cmds:\n execute_command(cmd)\n\n logging.info(u\" Done.\")",
"def md2html(template,filepath):\n content=''\n s = string.Template(template) \n try:\n content=markdown2.markdown_path(filepath)\n except:\n logger.warning('md2html:markdown convertion failed... Trying safe mode ')\n try:\n content=markdown2.markdown_path(filepath,safe_mode=True)\n except:\n logger.error('md2html:markdown convertion failed for %s. Use raw text.' %filepath)\n import codecs\n try:\n content=codecs.open(filepath,'r','utf-8').read()\n except:\n logger.error('md2html:invalid file? %s ' %filepath)\n # print 'error processing markdown. Read raw file...' \n html=''\n try:\n html=s.substitute(content=content)\n except:\n logger.warning('md2html()::string.Template substitute failed... Trying safe mode ')\n try:\n html=s.safe_substitute(content=content) \n except:\n logger.error('md2html()::string.Template conversion failed for : %s ' %filepath)\n return html",
"def get_raw_pdf(html_path, pdf_path, width='', height=''):\n debug = False\n if mg.EXPORT_IMAGES_DIAGNOSTIC: debug = True\n try:\n url = html_path.as_uri()\n cmd_make_pdf = 'cmd_make_pdf not successfully generated yet'\n \"\"\"\n Unless Linux, MUST be in report directory otherwise won't carry across\n internal links.\n\n Re: http://www.microsoft.com/resources/documentation/windows/xp/all/proddocs/en-us/ntcmds_shelloverview.mspx?mfr=true\n \"\"\"\n ## clear decks first so we can tell if image made or not\n try:\n os.remove(pdf_path)\n except Exception:\n pass\n rel_url = os.path.split(url)[1]\n cd_path = os.path.split(html_path)[0]\n if mg.PLATFORM == mg.WINDOWS: ## using Pyinstaller\n cmd_make_pdf = (\n f'cd \"{cd_path}\" && '\n f'\"{export_output.EXE_TMP}\\\\wkhtmltopdf.exe\" '\n f'{width} {height} \"{rel_url}\" \"{pdf_path}\"')\n elif mg.PLATFORM == mg.MAC:\n cmd_make_pdf = (\n f'cd \"{cd_path}\" && '\n f'\"{mg.MAC_FRAMEWORK_PATH}/wkhtmltopdf\" '\n f'{width} {height} \"{rel_url}\" \"{pdf_path}\"')\n elif mg.PLATFORM == mg.LINUX:\n cmd_make_pdf = f'wkhtmltopdf {width} {height} \"{url}\" \"{pdf_path}\"'\n else:\n raise Exception('Encountered an unexpected platform!')\n ## wkhtmltopdf uses stdout to actually output the PDF - a good feature but stuffs up reading stdout for message\n if debug: print(f'cmd_make_pdf: {cmd_make_pdf}')\n export_output.shellit(cmd_make_pdf)\n if not os.path.exists(pdf_path):\n raise Exception(\n f\"wkhtmltopdf didn't generate error but {pdf_path} not made \"\n f'nonetheless. cmd_make_pdf: {cmd_make_pdf}')\n if debug: print(f'Initial processing of {html_path} complete')\n except Exception as e:\n raise Exception(\n f'get_raw_pdf command failed: {cmd_make_pdf}. Orig error: {b.ue(e)}')\n return pdf_path",
"def generatePage(fn, ttype, envir):\n\n # create necessary directories\n d = dirname(join(opts.root, fn))\n if not exists(d):\n os.makedirs(d)\n\n envir['cd'] = dirname(fn)\n\n # Write out modified file.\n try:\n afn = join(opts.root, fn)\n tfile = open(afn, \"w\")\n execTemplate(tfile, templates[ttype], envir)\n tfile.close()\n\n except IOError, e:\n print >> sys.stderr, \"Error: can't open file: %s\" % fn",
"def template2pdf(template=None, **kwargs):\n str = render_to_string(template, kwargs)\n output = kwargs.get('output', False)\n ret = pdfkit.from_string(str, output, settings.WKHTMLTOPDF_OPTIONS or default_options)\n return (ret, str)",
"def rstjinja(app, docname, source):\n # Make sure we're outputting HTML\n if app.builder.format != 'html':\n return\n src = source[0]\n rendered = app.builder.templates.render_string(src, app.config.html_context)\n source[0] = rendered",
"def rstjinja(app, docname, source):\n # Make sure we're outputting HTML\n if app.builder.format != \"html\":\n return\n src = source[0]\n rendered = app.builder.templates.render_string(src, app.config.html_context)\n source[0] = rendered",
"def rstjinja(app, docname, source):\n # Make sure we're outputting HTML\n if app.builder.format != \"html\":\n return\n src = source[0]\n rendered = app.builder.templates.render_string(src, app.config.html_context)\n source[0] = rendered"
] | [
"0.61397487",
"0.5963483",
"0.5911825",
"0.58263385",
"0.57665575",
"0.5704803",
"0.56863654",
"0.56828785",
"0.5679785",
"0.5642886",
"0.55997115",
"0.5599295",
"0.55712205",
"0.5511724",
"0.5507876",
"0.5451432",
"0.54270923",
"0.534096",
"0.53085405",
"0.53046376",
"0.5291284",
"0.52850384",
"0.5280346",
"0.52754897",
"0.52485186",
"0.5246942",
"0.5221685",
"0.52135694",
"0.5209846",
"0.5209846"
] | 0.7019554 | 0 |
S.feed(handle, consumer) Feed in a BLAST report for scanning. handle is a filelike object that contains the BLAST report. consumer is a Consumer object that will receive events as the report is scanned. | def feed(self, handle, consumer):
if isinstance(handle, File.UndoHandle):
uhandle = handle
else:
uhandle = File.UndoHandle(handle)
# Try to fast-forward to the beginning of the blast report.
read_and_call_until(uhandle, consumer.noevent, contains='BLAST')
# Now scan the BLAST report.
self._scan_header(uhandle, consumer)
self._scan_rounds(uhandle, consumer)
self._scan_database_report(uhandle, consumer)
self._scan_parameters(uhandle, consumer) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def feed(self, handle, consumer, do_features=...): # -> bool:\n ...",
"def handle_feed(\n actapi: act.api.Act,\n user_agent: Text,\n proxies: Optional[Dict[Text, Text]] = None,\n verify_ssl: bool = True,\n output_format: Text = \"json\",\n) -> None:\n\n feed = download_feed(user_agent, proxies, verify_ssl)\n\n feeds_facts: List[act.api.fact.Fact] = []\n\n for report in feed[\"data\"]:\n if not (report.get(\"isinteresting\", False) or report.get(\"threatlevel\", 0)):\n continue\n # store data if threatlevel > 0 or report is interesting\n\n if \"sha256\" not in report:\n continue\n\n feeds_facts += handle_report(actapi, report)\n\n for fact in feeds_facts:\n act.api.helpers.handle_fact(fact, output_format=output_format)",
"def parse(self, handle):\n results = handle.read()\n\n try:\n self._scanner.feed(File.StringHandle(results), self._consumer)\n except ValueError, msg:\n # if we have a bad_report_file, save the info to it first\n if self._bad_report_handle:\n # send the info to the error handle\n self._bad_report_handle.write(results)\n\n # now we want to try and diagnose the error\n self._diagnose_error(\n File.StringHandle(results), self._consumer.data)\n\n # if we got here we can't figure out the problem\n # so we should pass along the syntax error we got\n raise\n return self._consumer.data",
"def feed(self, handle, consumer):\n\n if isinstance(handle, File.UndoHandle):\n pass\n else:\n handle = File.UndoHandle(handle)\n \n\n assert isinstance(handle, File.UndoHandle), \\\n \"handle must be an UndoHandle\"\n if handle.peekline():\n self._scan_record(handle, consumer)",
"def feed(self, handle, consumer):\n assert isinstance(handle, File.UndoHandle), \\\n \"handle must be an UndoHandle\"\n if handle.peekline():\n self._scan_record(handle, consumer)",
"def feed() -> None:\n ...",
"def handle_report(\n actapi: act.api.Act, report: Dict[Text, Any]\n) -> List[act.api.fact.Fact]:\n\n feeds_facts: List[act.api.fact.Fact] = []\n\n content = report[\"sha256\"]\n for hash_type in [\"md5\", \"sha1\", \"sha256\", \"ssdeep\", \"imphash\", \"sha512\"]:\n if (\n hash_type not in report\n or not report[hash_type]\n or report[hash_type] == \"Unknown\"\n ):\n info(f\"{hash_type} not set for content {content}\")\n continue\n feeds_facts.append(\n actapi.fact(\"represents\")\n .source(\"hash\", report[hash_type])\n .destination(\"content\", content)\n )\n feeds_facts.append(\n actapi.fact(\"category\", hash_type).source(\"hash\", report[hash_type])\n )\n\n feeds_facts += handle_hosts(actapi, content, report.get(\"hosts\", []))\n feeds_facts += handle_domains(actapi, content, report.get(\"domains\", []))\n feeds_facts += handle_extracted_files(\n actapi, content, report.get(\"extracted_files\", [])\n )\n feeds_facts += handle_classification_tags(\n actapi, content, report.get(\"classification_tags\", [])\n )\n\n # DISABLED DUE TO EXCESSIVE FACT CHAIN OBJECT. TO BE DISCUSSED\n # feeds_facts += handle_mitre_attcks(actapi, content, report.get(\"mitre_attcks\", []))\n\n feeds_facts += handle_process_list(actapi, content, report.get(\"process_list\", []))\n\n return feeds_facts",
"def scanFeedList(self): \r\n data = self.feed_handler.listScanFeeds()\r\n data = data[:MAX_FEEDS_SCAN]\r\n for idx, feed in enumerate(data):\r\n print \"feeds ... / [%s/%s] (%s docs:%s passed)\" % (idx, len(data),self.feed_item_ctr, self.feed_passed)\r\n try:\r\n baseURL = feed.mainUrl\r\n self.processData(baseURL) \r\n self.createFeedItems()\r\n except Exception, ex:\r\n print(\"ERR: failed to process data and create feed item=%s\" % ex)\r\n print \"done\"",
"def main():\n # Construct the feed generator\n f = LogBufferFeed(FEED_DIR)\n f.MAX_AGE = 24 * 60 * 60 # 1 day\n f.FEED_META['feed.title'] = '%s Referrering Links' % SITE_NAME\n f.FEED_META['feed.tagline'] = \\\n 'New referring links from Apache access.log on %s' % SITE_NAME\n \n # Load up tail of access log, parse, and filter\n new_lines = bookmark_tailgrep(ACCESS_LOG, max_initial_lines=100000)\n all_events = parse_access_log(new_lines)\n events = [ x for x in all_events if event_filter(x) ]\n \n # Scan through latest events for new referrers\n referrers_seen = shelve.open(REFER_SEEN)\n new_referrers = []\n for evt in events:\n k = '%(referrer)s -> %(path)s' % evt\n if not referrers_seen.has_key(k):\n referrers_seen[k] = 1\n new_referrers.append( (evt['referrer'], evt['path']) )\n referrers_seen.close()\n \n # If there were new referrers found, insert a new entry.\n if len(new_referrers) > 0:\n \n # Build a list of hyperlinks for referrers\n links_out = [\n LINK_TMPL % {\n 'SITE_ROOT' : SITE_ROOT,\n 'referrer' : x[0],\n 'path' : x[1],\n }\n for x in new_referrers\n ]\n \n # Build a summary for this entry.\n summary = SUMMARY_TMPL % { \n 'count' : len(new_referrers), \n 'links' : \"\\n\".join(links_out)\n }\n \n # Construct and append a new entry\n entry = FeedEntryDict({\n 'title' : '%s new referrers' % len(new_referrers),\n 'link' : '',\n 'summary' : summary\n })\n f.append_entry(entry)\n\n # Output the current feed entries as both RSS and Atom\n open(FEED_NAME_FN % 'rss', 'w').write(f.scrape_rss())\n open(FEED_NAME_FN % 'atom', 'w').write(f.scrape_atom())",
"def __init__(self, bad_report_handle = None):\n self._bad_report_handle = bad_report_handle\n \n #self._b_parser = BlastParser()\n self._scanner = _Scanner()\n self._consumer = _BlastErrorConsumer()",
"def main():\n feed_db, entry_db = openDBs(FEED_DB_FN, ENTRY_DB_FN)\n\n feeds = [ x.strip() for x in open(FEEDS_FN, \"r\").readlines() ]\n \n entries = getNewFeedEntries(feeds, feed_db, entry_db)\n \n if len(entries) > 0:\n out_fn = HTML_FN % time.strftime(\"%Y%m%d-%H%M%S\")\n writeAggregatorPage(entries, out_fn, DATE_HDR_TMPL, FEED_HDR_TMPL, \n ENTRY_TMPL, PAGE_TMPL)\n emailAggregatorPage(FROM_ADDR, TO_ADDR, SUBJECT, SMTP_HOST, out_fn)\n \n closeDBs(feed_db, entry_db)",
"def entrypoint(args=None):\n parser = argparse.ArgumentParser('consumer', description=entrypoint.__doc__)\n parser.add_argument('--push_address', help='address to push messages to',\n default='tcp://127.0.0.1:5555')\n parser.add_argument('--pull-address', help='address to pull messages from',\n default='tcp://127.0.0.1:5556')\n parser.add_argument('--num-fetches', '-n', help='number of fetches', type=int,\n default=100)\n parser.add_argument('--max-messages', '-m', help='maximum number of messages to publish',\n type=int, default=3 * multiprocessing.cpu_count())\n parser.add_argument('--context', '-c', help='context information in the format '\n '`name=type(value)`', action='append', default=[], type=parse_tuple)\n parser.add_argument('fetches', help='names of operations to fetch', nargs='+')\n args = parser.parse_args(args)\n\n context = dict(args.context)\n print(\"Context: %s\" % context)\n\n with pf.Consumer(args.push_address, args.pull_address) as consumer:\n contexts = [context for _ in range(args.num_fetches)]\n iterator = consumer.map(args.fetches, contexts, max_messages=args.max_messages)\n\n times = []\n for _ in tqdm.tqdm(iterator, total=args.num_fetches):\n times.append(time.time())\n\n deltas = np.diff(times) * 1000\n summary = textwrap.dedent(\n \"\"\"\n Summary statistics for %d fetches in ms\n =======================================\n Minimum : %.3f\n 5th percentile : %.3f\n 25th percentile : %.3f\n Median : %.3f\n 75th percentile : %.3f\n 95th percentile : %.3f\n Maximum : %.3f\n ---------------------------------------\n Mean : %.3f\n Standard dev. : %.3f\n ---------------------------------------\n\n Iterations per second: %.1f\n \"\"\" % (\n args.num_fetches, np.min(deltas), np.percentile(deltas, 5),\n np.percentile(deltas, 25), np.median(deltas), np.percentile(deltas, 75),\n np.percentile(deltas, 95), np.max(deltas), np.mean(deltas), np.std(deltas),\n 1000 / np.mean(deltas)\n ))\n print(summary)",
"def feed(self, entry):\r\n pass",
"def download_filings(feedpath,args=None):\n\tlogger.info(\"Processing RSS feed %s\",feedpath)\n\n\tdir = filings_dir(feedpath)\n\tos.makedirs(dir,exist_ok=True)\n\n\tfiling_urls = []\n\tfor filing in feed_tools.read_feed(feedpath):\n\t\tif args:\n\t\t\tif args.company_re and not bool(args.company_re.match(filing['companyName'])):\n\t\t\t\tcontinue\n\t\t\tif args.cik and args.cik != filing['cikNumber']:\n\t\t\t\tcontinue\n\t\t\tif args.sic and args.sic != filing['assignedSic']:\n\t\t\t\tcontinue\n\t\t\tif args.form_type and args.form_type != filing['formType']:\n\t\t\t\tcontinue\n\t\tif 'enclosureUrl' in filing and not exists_filing(dir,filing['enclosureUrl'],filing['enclosureLength']):\n\t\t\tfiling_urls.append(filing['enclosureUrl'])\n\t\tif args and getattr(args,'with_exhibits',False):\n\t\t\tfiling_urls.extend( filing.get( 'exhibitList', [] ) )\n\n\tlogger.info(\"Start downloading %d new filings\",len(filing_urls))\n\twith concurrent.futures.ThreadPoolExecutor(max_workers=args.max_threads) as executor:\n\t\tfutures = [executor.submit(download_filing,dir,url,args.max_retries) for url in filing_urls]\n\t\tfor future in concurrent.futures.as_completed(futures):\n\t\t\ttry:\n\t\t\t\tfuture.result()\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)",
"def run(self) -> None:\n\n # Track the last timestamp we see. When we fetch_stream() again on the\n # next iteration, we'll start from that timestamp onwards to avoid\n # fetching every single page again. The last event or two will be\n # still be in the response, but our de-duping will ignore those.\n last_timestamp = None\n\n # Keep track of what log entries we've consumed so that we suppress\n # duplicates. Duplicates will arise in our stream due to the way we\n # watch for new entries.\n consumed = set() # type: MutableSet\n\n # How many successful vs failed fetch_stream calls. If we consistently see\n # failures but we never see a successful attempt, we should raise an exception\n # and stop.\n success_count = 0\n failure_count = 0\n\n while not self.stopped.wait(0.2):\n try:\n for entry in fetch_stream(self.stream, start_time = last_timestamp):\n if entry[\"eventId\"] not in consumed:\n consumed.add(entry[\"eventId\"])\n\n last_timestamp = entry[\"timestamp\"]\n\n self.consumer(entry)\n except (ClientError, BotocoreConnectionError):\n failure_count += 1\n if failure_count > MAX_FAILURES and not success_count:\n raise\n else:\n success_count += 1",
"def _scan_scores(self,handle, consumer):\n read_and_call(handle, consumer.scores, start=\"Smith-Waterman\")",
"def do_feed(self, args):\n if self.first_feed:\n self.jay.speak(\"Remember, deletebot gets to eat the emails you delete and keepbot eats the emails you don't delete!\")\n self.jay.speak('Let me know when you want to STOP feeding!')\n self.first_feed = False\n delete = None\n while delete != 'stop' and delete != 'STOP':\n # pull email from database\n mail = self.email_client.get_random_email()\n self.jay.speak('Do you want to delete this email?')\n ec.preview_email(mail)\n delete = raw_input('>')\n if delete.lower().find('no') != -1:\n self.keepbot.feed(mail)\n elif delete != 'stop' and delete != 'STOP':\n self.deletebot.feed(mail)\n else:\n break\n self.jay.speak(\"Done feeding!\")",
"def run_rss(self):\n\n pass",
"def handleStbfReport(fundName, mode):\n\tlogger.debug('handleStbfReport()')\n\tfiles = list(getStbfFilesFromDirectory(getStbfDataDirectory()))\n\tif len(files) == 0:\n\t\tlogger.debug('handleStbfReport(): no input files')\n\t\treturn\n\n\tstatus, message = processStbfInputFiles(fundName, mode, files)\n\tsendNotificationEmail('Short Term Bond Fund', status, message)\n\tmoveFiles(getStbfProcessedDirectory(), files)",
"def download_feed_return_objects(rss_url):\r\n try:\r\n feed_obj = rss_exists(rss_url)\r\n except:\r\n yield None\r\n return\r\n\r\n feed_obj_found = False\r\n feed_parser_results, success = get_rss(rss_url)\r\n\r\n if feed_parser_results is None:\r\n error_reporter.captureMessage(u'Feed Parser results is None', **dict(rss_url=rss_url))\r\n yield None\r\n return\r\n\r\n if feed_obj is None:\r\n feed_obj = create_new_feed(feed_parser_results, rss_url)\r\n else:\r\n feed_obj_found = True\r\n\r\n feed_id = feed_obj.id\r\n feed_obj.title = feed_parser_results.get(\"title\", \"\") or \"\"\r\n max_length_field(feed_obj, 'title', 100)\r\n\r\n feed_obj.status_code = feed_parser_results.get(\"status\", \"\") or 200\r\n feed_obj.status = find_feed_status_from_scode(feed_obj)\r\n\r\n feed_obj.etag = cut_clean_etag(feed_parser_results.get(\"etag\", \"\"))\r\n\r\n updated_date = feed_parser_results.get(\"updated_parsed\")\r\n feed_obj.updated = dt.fromtimestamp(mktime(updated_date)) if updated_date is not None else dt.utcnow()\r\n #\tfeed_obj.published = dt.fromtimestamp(mktime(published_date)) if published_date is not None else None\r\n feed_obj.last_check = dt.utcnow()\r\n\r\n # We could be creating a new feed, or updating the existing one.\r\n yield feed_obj\r\n rss_posts = []\r\n\r\n for feed_article in feed_parser_results.get(\"entries\", []):\r\n ptime = feed_article.get(\"published_parsed\", None)\r\n post_date = dt.fromtimestamp(mktime(ptime)) if ptime is not None else dt.utcnow()\r\n #\t\tprint \"%r\" % post\r\n p = Post(\r\n id=uuid.uuid1(),\r\n title=feed_article.get(\"title\", \"\"),\r\n author=feed_article.get(\"author\", \"\"),\r\n href=feed_article.get(\"href\", \"\"),\r\n post_id=feed_article.get(\"id\", \"\"),\r\n published_at=post_date,\r\n feed_id=feed_id\r\n )\r\n\r\n p.original_title = max_length_field(p, 'title', 200)\r\n p.original_author = max_length_field(p, 'author', 200)\r\n\r\n p.content_html = feed_article.get(\"content\", \"\") or \"\"\r\n\r\n if feed_article.has_key(\"media_content\"):\r\n media_contents = feed_article.get(\"media_content\", []) or []\r\n if media_contents is not None and (not isinstance(media_contents, basestring)) and isinstance(\r\n media_contents, collections.Iterable):\r\n p.media = [media.get(\"url\") for media in media_contents]\r\n\r\n hasHash = False\r\n\r\n if feed_article.has_key(\"feedburner_origlink\"):\r\n p.original_link = feed_article.get(\"feedburner_origlink\", \"\")\r\n if non_empty_str(p.original_link):\r\n p.link_hash = url_hash(safe_str(p.original_link))\r\n hasHash = True\r\n\r\n if feed_article.has_key(\"link\"):\r\n p.href = feed_article.get(\"link\", \"\")\r\n if not hasHash and non_empty_str(p.href):\r\n p.link_hash = url_hash(safe_str(p.href))\r\n hasHash = True\r\n\r\n if not hasHash:\r\n print \"Post don't have any hash\"\r\n\r\n p.title_hash = url_hash(safe_str(p.title)) if non_empty_str(p.title) else \"\"\r\n p.post_id_hash = url_hash(safe_str(p.post_id)) if non_empty_str(p.post_id) else \"\"\r\n\r\n if feed_article.has_key(\"tags\"):\r\n if isinstance(feed_article['tags'], collections.Iterable):\r\n p.tags = [pst.get(\"term\") for pst in feed_article['tags']]\r\n\r\n rss_posts.append(p)\r\n\r\n has_posts = len(rss_posts) > 0\r\n post_id_hashes = [p.post_id_hash for p in rss_posts]\r\n #\tpost_title_hashes = [p.title_hash for p in rss_posts]\r\n post_link_hashes = [p.link_hash for p in rss_posts]\r\n\r\n found_posts_id_hashes = []\r\n found_posts_link_hashes = []\r\n\r\n if feed_obj_found and has_posts:\r\n existing_posts = find_existing_posts(feed_id, post_id_hashes, post_link_hashes)\r\n\r\n for ex_post_id_hash, ex_link_hash in existing_posts:\r\n found_posts_id_hashes.append(ex_post_id_hash)\r\n found_posts_link_hashes.append(ex_link_hash)\r\n\r\n has_existing_posts = len(found_posts_id_hashes) > 0 or len(found_posts_link_hashes) > 0\r\n\r\n new_post_count = 0\r\n if has_posts:\r\n for rss_post in rss_posts:\r\n should_skip = False\r\n\r\n if has_existing_posts:\r\n if non_empty_str(rss_post.post_id_hash) and rss_post.post_id_hash in found_posts_id_hashes:\r\n should_skip = True\r\n elif rss_post.link_hash in found_posts_link_hashes:\r\n should_skip = True # \"Link Hash found in existing records\"\r\n\r\n if not should_skip:\r\n new_post_count += 1\r\n yield rss_post\r\n\r\n feed_history = FeedHistory(id=uuid.uuid1(),\r\n feed_id=feed_obj.id,\r\n timestamp=dt.utcnow(),\r\n status=feed_obj.status_code,\r\n post_count=new_post_count,\r\n etag=feed_obj.etag)\r\n yield feed_history",
"def handle(self, *args, **options):\n feeds_data = []\n\n # Initialize facebook graph access tokens\n self.graph.access_token = facebook.get_app_access_token(settings.FACEBOOK_APP_ID, settings.FACEBOOK_SECRET_KEY)\n\n # Case no args - fetch all feeds\n if len(args) == 0:\n for feed in Facebook_Feed_Model.objects.all():\n self.stdout.write('Working on feed: {0}.'.format(feed.pk))\n feeds_data.append(self.get_feed_data(feed))\n self.stdout.write('Successfully fetched all')\n\n # Case arg exists - fetch feed by id supplied\n elif len(args) == 1:\n feed_id = int(args[0])\n\n try:\n feed = Facebook_Feed_Model.objects.get(pk=feed_id)\n self.stdout.write('Successfully fetched feed id {0}'.format(feed_id))\n except Facebook_Feed_Model.DoesNotExist:\n raise CommandError('Feed \"%s\" does not exist' % feed_id)\n\n feeds_data.append(self.get_feed_data(feed))\n\n # Case invalid args\n else:\n raise CommandError('Please enter a valid feed id')\n\n # Update fetched data to feed in database\n for feed_data in feeds_data:\n self.update_feed_data_to_db(feed_data['data'], feed_data['feed_id'])\n\n self.stdout.write('Successfully saved all statuses to db.')",
"def consumer(self, consumer):\n self._consumer = consumer",
"def consume_messages(process_func: Callable[[str], None]):\n consumer = get_consumer()\n\n for message in consumer:\n log.debug(f'Received a message: {message}')\n try:\n process_func(message.value)\n except Exception as e:\n log.error(f'Failed to process a message: {message.value}')\n log.exception(e)",
"def public_market_data_feed(config, state):\n\n # Sleep until the next market event\n while not state.stopper.is_set():\n\n state.lock.acquire()\n while not state.event_queue.empty():\n\n # Get next event\n event = state.event_queue.get()\n\n # TODO: ugly\n if isinstance(event, dict):\n symbol = event['instrument']\n message_type = event['message-type']\n else:\n symbol = event.instrument\n message_type = event.message_type\n\n for client in state.get_market_data_clients():\n if client.handshaken and client.snapshot_sent:\n subscriptions = client.subscriptions\n if symbol in subscriptions:\n topics = client.subscriptions[symbol]\n if message_type in ['A', 'X', 'M']:\n if 'orderBookL2' in topics:\n if not isinstance(event, dict):\n message = event.get_message()\n messaging.send_data(client.socket, message, client.encoding)\n else:\n message = json.dumps(event)\n messaging.send_data(client.socket, message, client.encoding)\n\n elif message_type in ['E']:\n if 'trade' in topics:\n if not isinstance(event, dict):\n message = event.get_message()\n messaging.send_data(client.socket, message, client.encoding)\n else:\n message = json.dumps(event)\n messaging.send_data(client.socket, message, client.encoding)\n\n state.get_current_lob_state(event['instrument']).print()\n\n state.lock.release()\n\n print('Market data dispatching stopped.')",
"def report_handler(bot, new_report):\n event_count = report[2]\n \n # Count events and take report & time\n if event_count == 0:\n event_count = new_report.count(\"|\")\n else:\n event_count += new_report.count(\"|\")\n\n timestamp = datetime.now()\n reporttime = timestamp.strftime(\"[%H:%M]\")\n\n #Console log\n print(timestamp.strftime(\"[%d %b, %H:%M]\") + \" -- \" + report)\n\n update_report(new_report, reporttime, event_count)\n \n bot.say(\"Understood.\")\n \n update_topic(bot, new_report, sopel.tools.target.Channel(CHANNEL))",
"def ingest(self):\n datetime_retrieved = datetime.now()\n prefix = self.prefix_template.format(**self.feed, year=datetime_retrieved.strftime('%Y'), month=datetime_retrieved.strftime('%m'))\n fp = self.generate_fp(\n template='{feedname}_{datetime_retrieved}',\n feedname=self.feed['feedname'],\n datetime_retrieved=datetime_retrieved\n )\n\n url_to_request = self.url_dict[(self.feed['state'],self.feed['feedname'])]\n try:\n r = requests.get(url_to_request)\n if r.status_code == 200:\n data_to_write = r.content\n self.s3helper.write_bytes(data_to_write, self.bucket, key=prefix+fp)\n self.print_func('Raw data ingested from {} to {} at {} UTC'.format(url_to_request, prefix+fp, datetime_retrieved))\n else:\n self.print_func('Received status code {} from {} feed.'.format(r.status_code,self.feed['feedname']))\n self.print_func('Skip triggering ingestion of {} to sandbox.'.format(self.feed['feedname']))\n self.print_func('Skip triggering ingestion of {} to Socrata.'.format(self.feed['feedname']))\n return\n except BaseException as e:\n data_to_write = f'The feed at {datetime_retrieved.isoformat()}.'.encode('utf-8')\n fp += '__FEED_NOT_RETRIEVED'\n self.s3helper.write_bytes(data_to_write, self.bucket, key=prefix+fp)\n self.print_func('We could not ingest data from {} at {} UTC'.format(url_to_request, datetime_retrieved))\n raise e\n\n # trigger semi-parse ingest\n if self.feed['pipedtosandbox'] == True:\n self.print_func('Trigger {} for {}'.format(self.lambda_to_trigger, self.feed['feedname']))\n lambda_client = self.s3helper.session.client('lambda')\n data_to_send = {'feed': self.feed, 'bucket': self.bucket, 'key': prefix+fp}\n response = lambda_client.invoke(\n FunctionName=self.lambda_to_trigger,\n InvocationType='Event',\n LogType='Tail',\n ClientContext='',\n Payload=json.dumps(data_to_send).encode('utf-8')\n )\n self.print_func(response)\n else:\n self.print_func('Skip triggering ingestion of {} to sandbox.'.format(self.feed['feedname']))\n\n # trigger ingest to socrata\n if self.feed['pipedtosocrata'] == True:\n self.print_func('Trigger {} for {}'.format(self.socrata_lambda_to_trigger, self.feed['feedname']))\n lambda_client = self.s3helper.session.client('lambda')\n data_to_send = {'feed': self.feed, 'bucket': self.bucket, 'key': prefix+fp}\n response = lambda_client.invoke(\n FunctionName=self.socrata_lambda_to_trigger,\n InvocationType='Event',\n LogType='Tail',\n ClientContext='',\n Payload=json.dumps(data_to_send).encode('utf-8')\n )\n self.print_func(response)\n else:\n self.print_func('Skip triggering ingestion of {} to Socrata.'.format(self.feed['feedname']))",
"def _scan_bq_data(self, uhandle, consumer):\n \n qual=''\n while 1:\n line=uhandle.readline()\n if is_blank_line(line):\n uhandle.saveline(line)\n break\n qual+=' '+line\n return qual",
"def _process_feeds(self):\n if self._feeds is None:\n return\n try:\n for feed_parser in self._feed_parsers:\n # all of the nested try excepts\n try:\n for article in feed_parser.get_new_articles():\n self._downloader.queue_article(article)\n for article in self._recursive_source.get_new_articles():\n self._downloader.queue_article(article)\n except Exception as e:\n logging.exception(e)\n\n except TypeError:\n raise ValueError(\"'feeds' must be a list of RSS feed URLs to process.\")",
"def cli():\n fire.Fire(fetch_rss_file)",
"def generate_atom_feeds(app):\n if not ablog.builder_support(app):\n return\n blog = Blog(app)\n base_url = blog.blog_baseurl\n if not base_url:\n return\n feeds = [\n (\n blog.posts,\n blog.blog_path,\n os.path.join(app.builder.outdir, blog.blog_path, feed_root + \".xml\"),\n blog.blog_title,\n os_path_join(base_url, blog.blog_path, feed_root + \".xml\"),\n feed_templates,\n )\n for feed_root, feed_templates in blog.blog_feed_templates.items()\n ]\n if blog.blog_feed_archives:\n for header, catalog in [\n (_(\"Posts by\"), blog.author),\n (_(\"Posts from\"), blog.location),\n (_(\"Posts in\"), blog.language),\n (_(\"Posts in\"), blog.category),\n (_(\"Posted in\"), blog.archive),\n (_(\"Posts tagged\"), blog.tags),\n ]:\n for coll in catalog:\n # skip collections containing only drafts\n if not len(coll):\n continue\n folder = os.path.join(app.builder.outdir, coll.path)\n if not os.path.isdir(folder):\n os.makedirs(folder)\n for feed_root, feed_templates in blog.blog_feed_templates.items():\n feeds.append(\n (\n coll,\n coll.path,\n os.path.join(folder, feed_root + \".xml\"),\n blog.blog_title + \" - \" + header + \" \" + str(coll),\n os_path_join(base_url, coll.path, feed_root + \".xml\"),\n feed_templates,\n )\n )\n # Config options\n feed_length = blog.blog_feed_length\n feed_fulltext = blog.blog_feed_fulltext\n for feed_posts, pagename, feed_path, feed_title, feed_url, feed_templates in feeds:\n feed = FeedGenerator()\n feed.id(blog.blog_baseurl)\n feed.title(feed_title)\n feed.link(href=base_url)\n feed.subtitle(blog.blog_feed_subtitle)\n feed.link(href=feed_url, rel=\"self\")\n feed.language(app.config.language)\n feed.generator(\"ABlog\", ablog.__version__, \"https://ablog.readthedocs.io/\")\n sorted_posts_by_date = sorted(feed_posts, key=lambda post: post.date, reverse=True)\n for i, post in enumerate(sorted_posts_by_date):\n if feed_length and i == feed_length:\n break\n post_url = os_path_join(base_url, app.builder.get_target_uri(post.docname))\n if post.section:\n post_url += \"#\" + post.section\n if blog.blog_feed_titles:\n content = None\n else:\n content = post.to_html(pagename, fulltext=feed_fulltext, img_url=True)\n feed_entry = feed.add_entry(order=\"append\")\n feed_entry.id(post_url)\n feed_entry.link(href=post_url)\n feed_entry.author({\"name\": author.name for author in post.author})\n feed_entry.pubDate(post.date.astimezone())\n feed_entry.updated(post.update.astimezone())\n for tag in sorted(post.tags):\n feed_entry.category(\n dict(\n term=tag.name.strip().replace(\" \", \"\"),\n label=tag.label,\n )\n )\n # Entry values that support templates\n title = post.title\n summary = \"\".join(paragraph.astext() for paragraph in post.excerpt)\n template_values = {}\n for element in (\"title\", \"summary\", \"content\"):\n if element in feed_templates:\n template_values[element] = jinja2.Template(feed_templates[element]).render(**locals())\n feed_entry.title(template_values.get(\"title\", title))\n summary = template_values.get(\"summary\", summary)\n if summary:\n feed_entry.summary(summary)\n content = template_values.get(\"content\", content)\n if content:\n feed_entry.content(content=content, type=\"html\")\n parent_dir = os.path.dirname(feed_path)\n if not os.path.isdir(parent_dir):\n os.makedirs(parent_dir)\n with open(feed_path, \"w\", encoding=\"utf-8\") as out:\n feed_str = feed.atom_str(pretty=True)\n out.write(feed_str.decode())\n if 0:\n # this is to make the function a generator\n # and make work for Sphinx 'html-collect-pages'\n yield"
] | [
"0.63534445",
"0.5767998",
"0.5734903",
"0.5599733",
"0.54569304",
"0.5154365",
"0.5127441",
"0.50028217",
"0.49828988",
"0.4976283",
"0.49430162",
"0.4887362",
"0.4875427",
"0.4865234",
"0.4799642",
"0.47539532",
"0.4745771",
"0.47228912",
"0.4705688",
"0.46607345",
"0.46272725",
"0.45850143",
"0.45816946",
"0.45795465",
"0.45522445",
"0.45274812",
"0.45187366",
"0.45161363",
"0.4478108",
"0.44740698"
] | 0.79035497 | 0 |
next(self) > object Return the next Blast record from the file. If no more records, return None. | def next(self):
lines = []
query = False
while 1:
line = self._uhandle.readline()
if not line:
break
# If I've reached the next one, then put the line back and stop.
if lines and (line.startswith('BLAST')
or line.startswith('BLAST', 1)
or line.startswith('<?xml ')):
self._uhandle.saveline(line)
break
# New style files ommit the BLAST line to mark a new query:
if line.startswith("Query="):
if not query:
if not self._header:
self._header = lines[:]
query = True
else:
#Start of another record
self._uhandle.saveline(line)
break
lines.append(line)
if query and "BLAST" not in lines[0]:
#Cheat and re-insert the header
#print "-"*50
#print "".join(self._header)
#print "-"*50
#print "".join(lines)
#print "-"*50
lines = self._header + lines
if not lines:
return None
data = ''.join(lines)
if self._parser is not None:
return self._parser.parse(File.StringHandle(data))
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _next(self, filename):\n try:\n return self.tmp_read[filename]['reader'].__next__()\n except StopIteration:\n return None",
"def __next__(self):\n try:\n next_record = next(self.records)\n self.all_records.append(next_record)\n return next_record\n except StopIteration:\n self.pending = False\n raise StopIteration('At the end of the result set.')",
"def next(self):\n nxt = self.readentry()\n if nxt is None:\n raise StopIteration\n return nxt",
"def next(self):\n result = None\n while result is None:\n if self._filehandle is None:\n if self.compressed:\n self._filehandle = gzip.GzipFile(self._filename, \"r\")\n else:\n self._filehandle = open(self._filename, \"r\")\n line = self._filehandle.next()\n line = line.rstrip()\n self._totallines += 1\n result = self.decodeline(line)\n return result",
"def _get_next_record(self):\n # Read next record from xlsx file\n row = self.ds_reader.next()\n if len(row) == 0:\n raise StopIteration\n \n # build record\n record = {'__row':row[0].row}\n for cell in row:\n record[str(cell.column)] = cell.internal_value\n #if cell.internal_value != None:\n # self.ds_processedBytes += len(cell.internal_value)\n \n # finished\n return record",
"def _next_record(self, next_line):\n record = self.loader.parse_record_stream(self.reader,\n next_line,\n self.known_format)\n\n self.member_info = None\n\n # Track known format for faster parsing of other records\n self.known_format = record.format\n\n return record",
"def _load_next_file(self):\n\n if self._file_ptr == len(self.files):\n raise pipeline.PipelineStopIteration\n\n # Collect garbage to remove any prior data objects\n gc.collect()\n\n # Fetch and remove the next item in the list\n file_ = self.files[self._file_ptr]\n self._file_ptr += 1\n\n # Set up a Reader class\n rd = self._acqtype_reader[self.acqtype](file_)\n\n self.log.info(f\"Reading file {self._file_ptr} of {len(self.files)}. ({file_})\")\n data = rd.read()\n\n return data",
"def deserialize_next_from_file(self):\n # We cannot read from a file unless the user provides it in the\n # constructor.\n if not self._input_file:\n raise Exception(\"No input file provided to deserialize from.\")\n\n with quickavro.FileReader(self._input_file) as reader:\n for record in reader.records():\n yield record",
"def next(self) -> object:\n return self._next",
"def next(self):\n return self._next",
"def next(self):\n return self.__next",
"def get_next(self):\n return self.next",
"def get_next(self):\n return self.next",
"def getNext(self):\n\t\t\treturn self.next",
"def getrecord():\n global nrecord, totrecords, EOF\n \n nrecord = nrecord + 1\n if nrecord > totrecords:\n EOF = True\n return ()\n return records[nrecord-1]",
"def next(self):\r\n return self.__next",
"def next(self):\r\n self._collect()\r\n if not self._heads and not self._refresh:\r\n return Stream.EOF\r\n minimum = self._pop()\r\n if minimum:\r\n line, stream = minimum\r\n self._refresh.add(stream)\r\n return (self._labels[stream], line)",
"def next(self):\n\n lines = []\n while 1: \n # if at beginning, skip the AS and look for first CO command\n line=self._uhandle.readline()\n if not line: # empty or corrupt file\n return None\n if line[:2]=='CO':\n lines.append(line)\n break\n while 1:\n line = self._uhandle.readline()\n if not line:\n break\n # If a new record, then put the line back and stop.\n if lines and line[:2] == 'CO':\n self._uhandle.saveline(line)\n break\n lines.append(line)\n\n if not lines:\n return None\n\n data = ''.join(lines)\n if self._parser is not None:\n return self._parser.parse(File.StringHandle(data))\n return data",
"def next_file(self):\n raise NotImplementedError()",
"def peek(self) -> t.Optional[Record]:\n self._buffer(1)\n if self._record_buffer:\n return self._record_buffer[0]\n return None",
"def next_object(self):\n if not self._buffer_size():\n return None\n return next(self.delegate)",
"def next(self):\n self.record_offset += 2 ** self.blockettes[1000]['Data Record Length']\n self._parseHeader()",
"def next(self):\n return self.my_next",
"def getNext(self):\n return self.__next",
"def next(self):\n return self.__next__()",
"def next(self):\n return self.__next__()",
"def next(self):\n return self.__next__()",
"def next(self):\n return self.__next__()",
"def next(self):\n return self.__next__()",
"def next(self):\n return self.__next__()"
] | [
"0.69588554",
"0.6904435",
"0.6851931",
"0.66342896",
"0.652028",
"0.64878345",
"0.6429976",
"0.6358846",
"0.62886494",
"0.62753236",
"0.62223315",
"0.62091005",
"0.62091005",
"0.6207237",
"0.61904114",
"0.6156381",
"0.6149515",
"0.6137225",
"0.6125767",
"0.61171055",
"0.61122704",
"0.60998684",
"0.60388756",
"0.6034655",
"0.60051316",
"0.60051316",
"0.60051316",
"0.60051316",
"0.60051316",
"0.60051316"
] | 0.73920125 | 0 |
Execute and retrieve data from standalone BLASTPALL as handles (OBSOLETE). NOTE This function is obsolete, you are encouraged to the command line wrapper Bio.Blast.Applications.BlastallCommandline instead. Execute and retrieve data from blastall. blastcmd is the command used to launch the 'blastall' executable. program is the blast program to use, e.g. 'blastp', 'blastn', etc. database is the path to the database to search against. infile is the path to the file containing the sequence to search with. The return values are two handles, for standard output and standard error. You may pass more parameters to keywds to change the behavior of the search. Otherwise, optional values will be chosen by blastall. The Blast output is by default in XML format. Use the align_view keyword for output in a different format. Scoring matrix Matrix to use. gap_open Gap open penalty. gap_extend Gap extension penalty. nuc_match Nucleotide match reward. (BLASTN) nuc_mismatch Nucleotide mismatch penalty. (BLASTN) query_genetic_code Genetic code for Query. db_genetic_code Genetic code for database. (TBLAST[NX]) Algorithm gapped Whether to do a gapped alignment. T/F (not for TBLASTX) expectation Expectation value cutoff. wordsize Word size. strands Query strands to search against database.([T]BLAST[NX]) keep_hits Number of best hits from a region to keep. xdrop Dropoff value (bits) for gapped alignments. hit_extend Threshold for extending hits. region_length Length of region used to judge hits. db_length Effective database length. search_length Effective length of search space. Processing filter Filter query sequence for low complexity (with SEG)? T/F believe_query Believe the query defline. T/F restrict_gi Restrict search to these GI's. nprocessors Number of processors to use. oldengine Force use of old engine T/F Formatting html Produce HTML output? T/F descriptions Number of oneline descriptions. alignments Number of alignments. align_view Alignment view. Integer 011, passed as a string or integer. show_gi Show GI's in deflines? T/F seqalign_file seqalign file to output. outfile Output file for report. Filename to write to, if ommitted standard output is used (which you can access from the returned handles). | def blastall(blastcmd, program, database, infile, align_view='7', **keywds):
_security_check_parameters(keywds)
att2param = {
'matrix' : '-M',
'gap_open' : '-G',
'gap_extend' : '-E',
'nuc_match' : '-r',
'nuc_mismatch' : '-q',
'query_genetic_code' : '-Q',
'db_genetic_code' : '-D',
'gapped' : '-g',
'expectation' : '-e',
'wordsize' : '-W',
'strands' : '-S',
'keep_hits' : '-K',
'xdrop' : '-X',
'hit_extend' : '-f',
'region_length' : '-L',
'db_length' : '-z',
'search_length' : '-Y',
'program' : '-p',
'database' : '-d',
'infile' : '-i',
'filter' : '-F',
'believe_query' : '-J',
'restrict_gi' : '-l',
'nprocessors' : '-a',
'oldengine' : '-V',
'html' : '-T',
'descriptions' : '-v',
'alignments' : '-b',
'align_view' : '-m',
'show_gi' : '-I',
'seqalign_file' : '-O',
'outfile' : '-o',
}
import warnings
warnings.warn("This function is obsolete, you are encouraged to the command line wrapper Bio.Blast.Applications.BlastallCommandline instead.", PendingDeprecationWarning)
from Applications import BlastallCommandline
cline = BlastallCommandline(blastcmd)
cline.set_parameter(att2param['program'], program)
cline.set_parameter(att2param['database'], database)
cline.set_parameter(att2param['infile'], infile)
cline.set_parameter(att2param['align_view'], str(align_view))
for key, value in keywds.iteritems():
cline.set_parameter(att2param[key], str(value))
return _invoke_blast(cline) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rpsblast(blastcmd, database, infile, align_view=\"7\", **keywds):\n\n import warnings\n warnings.warn(\"This function is obsolete, you are encouraged to the command line wrapper Bio.Blast.Applications.BlastrpsCommandline instead.\", PendingDeprecationWarning)\n _security_check_parameters(keywds)\n \n att2param = {\n 'multihit' : '-P',\n 'gapped' : '-g',\n 'expectation' : '-e',\n 'range_restriction' : '-L',\n 'xdrop' : '-X',\n 'xdrop_final' : '-Z',\n 'xdrop_extension' : '-y',\n 'search_length' : '-Y',\n 'nbits_gapping' : '-N',\n 'protein' : '-p',\n 'db_length' : '-z',\n\n 'database' : '-d',\n 'infile' : '-i',\n 'filter' : '-F',\n 'case_filter' : '-U',\n 'believe_query' : '-J',\n 'nprocessors' : '-a',\n 'logfile' : '-l',\n\n 'html' : '-T',\n 'descriptions' : '-v',\n 'alignments' : '-b',\n 'align_view' : '-m',\n 'show_gi' : '-I',\n 'seqalign_file' : '-O',\n 'align_outfile' : '-o',\n }\n\n from Applications import RpsBlastCommandline\n cline = RpsBlastCommandline(blastcmd)\n cline.set_parameter(att2param['database'], database)\n cline.set_parameter(att2param['infile'], infile)\n cline.set_parameter(att2param['align_view'], str(align_view))\n for key, value in keywds.iteritems():\n cline.set_parameter(att2param[key], str(value))\n return _invoke_blast(cline)",
"def blastpgp(blastcmd, database, infile, align_view='7', **keywds):\n\n import warnings\n warnings.warn(\"This function is obsolete, you are encouraged to the command line wrapper Bio.Blast.Applications.BlastpgpCommandline instead.\", PendingDeprecationWarning)\n _security_check_parameters(keywds)\n\n att2param = {\n 'matrix' : '-M',\n 'gap_open' : '-G',\n 'gap_extend' : '-E',\n 'window_size' : '-A',\n 'npasses' : '-j',\n 'passes' : '-P',\n\n 'gapped' : '-g',\n 'expectation' : '-e',\n 'wordsize' : '-W',\n 'keep_hits' : '-K',\n 'xdrop' : '-X',\n 'hit_extend' : '-f',\n 'region_length' : '-L',\n 'db_length' : '-Z',\n 'search_length' : '-Y',\n 'nbits_gapping' : '-N',\n 'pseudocounts' : '-c',\n 'xdrop_final' : '-Z',\n 'xdrop_extension' : '-y',\n 'model_threshold' : '-h',\n 'required_start' : '-S',\n 'required_end' : '-H',\n\n 'program' : '-p',\n 'database' : '-d',\n 'infile' : '-i',\n 'filter' : '-F',\n 'believe_query' : '-J',\n 'nprocessors' : '-a',\n\n 'html' : '-T',\n 'descriptions' : '-v',\n 'alignments' : '-b',\n 'align_view' : '-m',\n 'show_gi' : '-I',\n 'seqalign_file' : '-O',\n 'align_outfile' : '-o',\n 'checkpoint_outfile' : '-C',\n 'restart_infile' : '-R',\n 'hit_infile' : '-k',\n 'matrix_outfile' : '-Q',\n 'align_infile' : '-B',\n }\n from Applications import BlastpgpCommandline\n cline = BlastpgpCommandline(blastcmd)\n cline.set_parameter(att2param['database'], database)\n cline.set_parameter(att2param['infile'], infile)\n cline.set_parameter(att2param['align_view'], str(align_view))\n for key, value in keywds.iteritems():\n cline.set_parameter(att2param[key], str(value))\n return _invoke_blast(cline)",
"def run_blast(inputfile, input_type, outputfile, database, args=None, verbose=True):\n\n assert (input_type in ['protein', 'dna']), \"Input type must be either 'protein' or 'dna'\"\n\n cmd = ['diamond']\n\n if input_type == 'protein':\n cmd += ['blastp']\n elif input_type == 'dna':\n cmd += ['blastx']\n\n cmd += ['-d', database]\n cmd += ['-q', inputfile]\n cmd += ['-o', outputfile]\n\n if not args:\n args = \"--more-sensitive --top 10 --quiet\"\n\n cmd += args.split()\n\n if verbose:\n print(' '.join(cmd))\n\n with open(os.devnull, 'w') as devnull:\n try:\n exit_code = call(cmd, stdout=devnull)\n except OSError:\n exit_code = None\n\n return exit_code",
"def run_blast(self, metadata, analysistype, program, outfmt, evalue='1E-5', num_threads=12, num_alignments=1000000,\n perc_identity=70, task='blastn'):\n with progressbar(metadata) as bar:\n for sample in bar:\n # Run the BioPython BLASTn module with the genome as query, fasta (target gene) as db.\n make_path(sample[analysistype].reportdir)\n # Set the name and path of the BLAST report as reportdir/samplename_blastprogram.tsv\n sample[analysistype].report = os.path.join(\n sample[analysistype].reportdir, '{name}_{program}_{at}.tsv'.format(name=sample.name,\n program=program,\n at=analysistype))\n # Check the size of the report (if it exists). If it has size 0, something went wrong on a previous\n # iteration of the script. Delete the empty file in preparation for another try\n try:\n size = os.path.getsize(sample[analysistype].report)\n # If a report was created, but no results entered - program crashed, or no sequences passed\n # thresholds, remove the report, and run the blast analyses again\n if size == 0:\n os.remove(sample[analysistype].report)\n except FileNotFoundError:\n pass\n # Split the extension from the file path\n db = os.path.splitext(sample[analysistype].combinedtargets)[0]\n # Create the command line argument using the appropriate BioPython BLAST wrapper\n if program == 'blastn':\n blast = self.blastn_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt,\n perc_identity=perc_identity,\n task=task)\n elif program == 'blastp':\n blast = self.blastp_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt)\n elif program == 'blastx':\n blast = self.blastx_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt)\n elif program == 'tblastn':\n blast = self.tblastn_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt)\n elif program == 'tblastx':\n blast = self.tblastx_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt)\n else:\n blast = str()\n assert blast, 'Something went wrong, the BLAST program you provided ({program}) isn\\'t supported'\\\n .format(program=program)\n # Save the blast command in the metadata\n sample[analysistype].blastcommand = str(blast)\n # Only run blast if the report doesn't exist\n if not os.path.isfile(sample[analysistype].report):\n try:\n blast()\n except ApplicationError as e:\n logging.debug(e)\n try:\n os.remove(sample[analysistype].report)\n except (IOError, ApplicationError):\n pass\n # Return the updated metadata object\n return metadata",
"def blast_reads(number_hits, ncbi_database, organism):\n #blast_reads(number_hits, ncbi_database, entrez_query)\n print(\"Searching for BLAST hits...\")\n fasta_string = open(\"Log_Directory/blast_queries.fasta\").read()\n print (\"The ncbi database being searched is:\", ncbi_database)\n if len(organism) > 0:\n print (\"The organism being searched is: \", organism)\n query ='\"txid'+str(organism)+'\"'\n result_handle = NCBIWWW.qblast(\"blastn\", ncbi_database, fasta_string, entrez_query=query, hitlist_size=number_hits,\n expect=10.0, nucl_penalty=-2, nucl_reward=1, megablast=True, word_size=28, expect_low=True, gapcosts='0 2')\n else:\n print (\"No organism is designated\")\n result_handle = NCBIWWW.qblast(\"blastn\", ncbi_database, fasta_string, hitlist_size=number_hits)\n blast_result = open(\"Log_Directory/blast_results.xml\", \"w\")\n blast_result.write(result_handle.read())\n blast_result.close()\n result_handle.close()",
"def exec_blast(infile, config_file, out_name):\n\tdb, evalue = parse_config(config_file, \"blast\")\n\tfasta_string = SeqIO.read(infile, format=\"fasta\")\n\tresult_handle = NCBIWWW.qblast(\"blastp\", \"nr\", fasta_string.seq)\n\toutput= out_name + \".xml\"\n\tsave_file = open(output, \"w\")\n\tsave_file.write(result_handle.read())\n\tsave_file.close()\n\tresult_handle.close()\n\treturn (output)",
"def exec_blast(infile, config_file, out_name):\n\tdb, evalue = parse_config(config_file, \"blast\")\n\ttry:\n\t\tinput_open = open(infile, \"r\")\n\t\tinput_open.close()\n\texcept:\n\t\traise IOError(\"Cannot open input file %s\" %infile)\n\n\tif is_fasta(infile) == False:\n\t\t\traise TypeError(\"Input file %s must be in fasta format\" %infile)\n\telse:\n\t\tfasta_string = SeqIO.read(infile, format=\"fasta\")\n\t\tresult_handle = NCBIWWW.qblast(\"blastp\", db, fasta_string.seq)\n\t\toutput= out_name + \".xml\"\n\t\tsave_file = open(output, \"w\")\n\t\tsave_file.write(result_handle.read())\n\t\tsave_file.close()\n\t\tresult_handle.close()\n\treturn (output)",
"def qiime_blast_seqs(seqs,\r\n blast_constructor=Blastall,\r\n blast_program='blastn',\r\n blast_db=None,\r\n refseqs=None,\r\n refseqs_fp=None,\r\n blast_mat_root=None,\r\n params=None,\r\n WorkingDir=None,\r\n seqs_per_blast_run=1000,\r\n is_protein=False,\r\n HALT_EXEC=False):\r\n\r\n assert blast_db or refseqs_fp or refseqs, \\\r\n 'Must provide either a blast_db or a fasta ' +\\\r\n 'filepath containing sequences to build one.'\r\n\r\n if refseqs_fp:\r\n blast_db, db_files_to_remove =\\\r\n build_blast_db_from_fasta_path(refseqs_fp,\r\n output_dir=WorkingDir,\r\n is_protein=is_protein)\r\n elif refseqs:\r\n blast_db, db_files_to_remove =\\\r\n build_blast_db_from_fasta_file(refseqs,\r\n output_dir=WorkingDir,\r\n is_protein=is_protein)\r\n else:\r\n db_files_to_remove = []\r\n\r\n if params is None:\r\n params = {}\r\n params[\"-d\"] = blast_db\r\n params[\"-p\"] = blast_program\r\n\r\n blast_app = blast_constructor(\r\n params=params,\r\n blast_mat_root=blast_mat_root,\r\n InputHandler='_input_as_seq_id_seq_pairs',\r\n WorkingDir=WorkingDir,\r\n SuppressStderr=True,\r\n HALT_EXEC=HALT_EXEC)\r\n\r\n current_seqs = []\r\n blast_results = BlastResult([])\r\n for seq in seqs:\r\n current_seqs.append(seq)\r\n if len(current_seqs) % seqs_per_blast_run == 0:\r\n if blast_results:\r\n blast_results.update(\r\n BlastResult(blast_app(current_seqs)['StdOut']))\r\n else:\r\n blast_results = BlastResult(blast_app(current_seqs)['StdOut'])\r\n current_seqs = []\r\n\r\n # clean-up run: blast the remaining sequences\r\n blast_results.update(\r\n BlastResult(blast_app(current_seqs)['StdOut']))\r\n\r\n remove_files(db_files_to_remove)\r\n\r\n return blast_results",
"def blastn_commandline(cls):\n command = generate_path(\"../../blast/ncbi-blast*/bin/blastn\")\n fasta = generate_path(\"tmp/validate.fasta\")\n db = generate_path(\"data/blast/ValidationDB\")\n results = generate_path(\"tmp/validate.xml\")\n\n subprocess.call(\n '%s -query %s -db %s -outfmt 5 -out %s -best_hit_score_edge 0.05 '\n '-best_hit_overhang 0.1' % (\n command, fasta, db, results\n ), shell=True\n )",
"def makeblastdb(fasta, program='blastn', returncmd=False, **kwargs):\n # Convert the options dictionary to a string\n options = kwargs_to_string(kwargs)\n # Set the dbtype appropriately\n if program == 'blastn' or program == 'tblastn' or program == 'tblastx':\n dbtype = 'nucl'\n else:\n dbtype = 'prot'\n # Remove the file extension from the file name\n output = os.path.splitext(fasta)[0]\n cmd = 'makeblastdb -in {fasta} -parse_seqids -max_file_sz 2GB -dbtype {dbtype} -out {output}{options}' \\\n .format(fasta=fasta,\n dbtype=dbtype,\n output=output,\n options=options)\n # Check if database already exists\n if not os.path.isfile('{output}.nhr'.format(output=output)):\n out, err = run_subprocess(cmd)\n else:\n out = str()\n err = str()\n if returncmd:\n return out, err, cmd\n else:\n return out, err",
"def blast(self, analysis_id, organism_id, input, blastdb=None, blastdb_id=None,\n re_name=None, query_type=\"polypeptide\", match_on_name=False, skip_missing=False):\n\n if blastdb_id:\n found_db = self.session.query(self.model.db).filter_by(db_id=blastdb_id)\n if not found_db:\n raise Exception(\"Invalid db ID\")\n elif blastdb:\n found_db = self.session.query(self.model.db).filter_by(name=blastdb)\n if not found_db:\n raise Exception(\"Invalid db name\")\n blastdb_id = found_db.one().db_id\n\n if not blastdb_id:\n raise Exception(\"Either blastdb or blastdb_id is required\")\n\n res = self.session.query(self.model.analysis).filter_by(analysis_id=analysis_id)\n if not res.count():\n raise Exception(\"Analysis with the id {} was not found\".format(analysis_id))\n\n # Cache many things to speed up loading\n self._reset_cache()\n seqterm = self.ci.get_cvterm_id(query_type, 'sequence')\n self._init_feature_cache(organism_id, seqterm, match_on_name)\n\n self._init_analysisfeature_cache(analysis_id)\n\n self._init_analysisprop_cache()\n\n self._hit_details_cache = None\n\n if not os.path.exists(input):\n raise Exception(\"{} was not found\".format(input))\n\n self._setup_tables(\"blast\")\n\n count_ins = self._parse_blast_xml(analysis_id, blastdb_id, input, re_name, query_type, True, organism_id, skip_missing)\n\n blastdb_ap = self.ci.get_cvterm_id('analysis_blast_blastdb', 'tripal')\n self._add_analysisprop(analysis_id, type_id=blastdb_ap, value=blastdb_id)\n\n self.session.commit()\n\n self._reset_cache()\n\n return {'inserted': count_ins}",
"def blastp(database, query, output_to_file = False, output_file = None,\n overwrite = False, outfmt = 7):\n if output_to_file:\n if os.path.exists(output_file) and not overwrite:\n return output_file\n cmd = 'blastp -db {} -query {} -outfmt {} -out {} -num_alignments 1'.\\\n format(database, query, outfmt, output_file)\n else:\n cmd = 'blastp -db {} -query {} -outfmt {} -num_alignments 1'.format(\n database, query, outfmt)\n\n printed_output = subprocess.check_output(cmd, shell=True)\n if output_to_file:\n return output_file\n return printed_output",
"def _blast(query, output_pssm, output, blastdb):\n psiblast_command = \"psiblast -db {:} -query {:} -out_ascii_pssm {:} \" + \\\n \"-save_pssm_after_last_round -out {:}\"\n log_out = \"{}.out\".format(output)\n log_err = \"{}.err\".format(output)\n with open(log_out, 'a') as f_out:\n with open(log_err, 'a') as f_err:\n command = psiblast_command.format(\n blastdb, query, output_pssm, output)\n f_out.write('=================== CALL ===================\\n')\n f_out.write(command + '\\n')\n subprocess.check_call(\n command, shell=True, stderr=f_err, stdout=f_out)\n f_out.write('================= END CALL =================\\n')",
"def blaster(protSeq, orgnID = \"Mus musculus\"):\n \n from Bio.Blast.NCBIWWW import qblast\n from Bio.Blast import NCBIXML\n from sys import exit\n \n print(\"\\nconnecting to BLAST server. this will take some time...\")\n i = 1\n while i < 4: # BLAST sometimes returns empty results. if so, try once more, it happens quite rarely and resending the query seems to fix it.\n print(\"attempt number \" + str(i))\n i += 1\n resX = qblast(\"blastp\",\"refseq_protein\", protSeq, entrez_query= orgnID + \"[organism]\")\n resO = NCBIXML.read(resX)\n if resO.descriptions != []: break \n if resO.descriptions == []: \n print(\"connection unsuccessful. The BLAST server is acting up. Try again later.\")\n exit(0)\n \n else: print(\"connection successful\")\n \n print(resO.descriptions[0])\n descO = resO.descriptions[0]\n if descO.e < 0.01: \n try:\n descID = descO.title.split(\"|\")[3] # not sure why I picked element 3 here\n except IndexError:\n descID = descO.title.split(\"|\")[1]\n \n if \".\" in descID: return descID.split(\".\")[0]\n else: return descID\n \n else: return \"-\"",
"def _invoke_blast(cline):\n import subprocess, sys\n blast_cmd = cline.program_name\n if not os.path.exists(blast_cmd):\n raise ValueError(\"BLAST executable does not exist at %s\" % blast_cmd)\n #We don't need to supply any piped input, but we setup the\n #standard input pipe anyway as a work around for a python\n #bug if this is called from a Windows GUI program. For\n #details, see http://bugs.python.org/issue1124861\n blast_process = subprocess.Popen(str(cline),\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True,\n shell=(sys.platform!=\"win32\"))\n blast_process.stdin.close()\n return blast_process.stdout, blast_process.stderr",
"def main():\n count = 0\n\n # Read in the required files and filenames.\n predicted_proteins, protein_db, output_file_aug_to_fasta, \\\n output_file_proteins_to_db, blastp_output, output_to_file, \\\n overwrite = call_files()\n\n # Write all entries in the AUGUSTUS output to a FASTA file\n for record in split_records_aug(predicted_proteins):\n if count == 0:\n mode = 'w'\n else:\n mode = 'a'\n write_fasta(record, output_file_aug_to_fasta, mode)\n count += 1\n\n # Create a blast database and carry out a blastp search\n blast_db = blast_database(protein_db, 'prot', True,\n output_file_proteins_to_db, overwrite)\n\n blastp_file = blastp(output_file_proteins_to_db, output_file_aug_to_fasta,\n True, blastp_output, overwrite, 7)\n\n # Parse the blastp results for the desired information\n blast_results = parse_blastp_output(blastp_output)\n\n # Print the results\n print_output(blast_results)",
"def get_ncbi_pdb_blast(sequence, file_name=None, blast_type=\"blastp\",\n expect=0.01):\n assert (blast_type in [\"blastp\", \"blastn\"])\n if (sequence[-1] == '*'):\n sequence = sequence[:-1]\n if (not sequence.isalpha()):\n raise Sorry(\"The sequence contains non-alphabetical characters; in \"+\n \"addition to A-Z, only an asterisk denoting a stop codon is permitted.\")\n assert (expect >= 0)\n try :\n from Bio.Blast import NCBIWWW\n except ImportError :\n raise Sorry(\"You need to have BioPython installed to use this function.\")\n # FIXME will this use the HTTP proxy if defined?\n blast = NCBIWWW.qblast(blast_type, \"pdb\", sequence, expect=expect)\n blast_out = blast.read()\n if (file_name is not None):\n f = open(file_name, \"w\")\n f.write(blast_out)\n f.close()\n return blast_out",
"def do_blast(self,arg):\n try:\n argumentos=arg.strip(\"\\n\").split(\" \")\n if len(argumentos)==3:\n Blast=My_Blast(argumentos[0], argumentos[1], argumentos[2])\n TRESH=input('Qual é o valor do e-value Tresh: ')\n Blast.blast(TRESH)\n \n else:\n print(\"Número de argumentos errados!\")\n except:\n print(\"Erro a executar o blast!\")",
"def parse_blast(metadata, analysistype, fieldnames, cutoff, program):\n for sample in metadata:\n # Initialise a list to store the BLAST outputs\n sample[analysistype].blastlist = list()\n # Initialise a dictionary to store all the target sequences\n sample[analysistype].targetsequence = dict()\n try:\n # Open the sequence profile file as a dictionary\n blastdict = DictReader(open(sample[analysistype].report), fieldnames=fieldnames, dialect='excel-tab')\n resultdict = dict()\n # Go through each BLAST result\n for row in blastdict:\n # Ignore the headers\n if row['query_id'].startswith(fieldnames[0]):\n pass\n else:\n # Create the subject length variable - if the sequences are DNA (e.g. blastn), use the subject\n # length as usual; if the sequences are protein (e.g. tblastx), use the subject length / 3\n if program == 'blastn' or program == 'blastp' or program == 'blastx':\n subject_length = float(row['subject_length'])\n\n else:\n subject_length = float(row['subject_length']) / 3\n # Calculate the percent identity and extract the bitscore from the row\n # Percent identity is the (length of the alignment - num mismatches) / total subject length\n percentidentity = float('{:0.2f}'.format((float(row['positives']) - float(row['gaps'])) /\n subject_length * 100))\n # Create a percent_match dictionary entry\n row['percent_match'] = percentidentity\n # Remove unwanted pipes added to the name\n target = row['subject_id'].lstrip('gb|').rstrip('|') if '|' in row['subject_id'] else \\\n row['subject_id']\n row['subject_id'] = row['subject_id'].lstrip('gb|').rstrip('|') if '|' in row['subject_id'] \\\n else row['subject_id']\n # If the percent identity is greater than the cutoff\n if percentidentity >= cutoff:\n # Append the hit dictionary to the list\n sample[analysistype].blastlist.append(row)\n # Update the dictionary with the target and percent identity\n resultdict.update({target: percentidentity})\n # Determine if the orientation of the sequence is reversed compared to the reference\n if int(row['subject_end']) < int(row['subject_start']):\n # Create a sequence object using Biopython\n seq = Seq(row['query_sequence'])\n # Calculate the reverse complement of the sequence\n querysequence = str(seq.reverse_complement())\n # If the sequence is not reversed, use the sequence as it is in the output\n else:\n querysequence = row['query_sequence']\n # Add the sequence in the correct orientation to the sample\n try:\n sample[analysistype].targetsequence[target].append(querysequence)\n except (AttributeError, KeyError):\n sample[analysistype].targetsequence[target] = list()\n sample[analysistype].targetsequence[target].append(querysequence)\n # Add the percent identity to the object\n sample[analysistype].blastresults = resultdict\n # Populate missing results with 'NA' values\n if len(resultdict) == 0:\n sample[analysistype].blastresults = 'NA'\n except FileNotFoundError:\n sample[analysistype].blastresults = 'NA'\n return metadata",
"def sequence_BLAST(processedBLAST, inputFile, database, BLASTLoc, SEG, cores): \n\n # Setup the parameters for the BLASTing.\n outputLoc = inputFile.split('.')[0] + '.tmp' \n query = ' -query ' + inputFile\n out = ' -out ' + outputLoc\n evalue = ' -evalue 1'\n inclusionEThresh = ' -inclusion_ethresh 0.0001'\n numIterations = ' -num_iterations 3'\n gapTrigger = ' -gap_trigger 18'\n numDescriptions = ' -num_descriptions 10000'\n numAlignments = ' -num_alignments 10000'\n dbsize = ' -dbsize 0'\n db = ' -db ' + database\n outputFormat = ' -outfmt \"7 qseqid sseqid pident length evalue\"'\n if SEG:\n seg = ' -seg yes'\n else:\n seg = ' -seg no'\n numThreads = ' -num_threads ' + str(cores)\n argsPSI = (query + out + evalue + inclusionEThresh + numIterations + gapTrigger + numDescriptions +\n numAlignments + dbsize + db + outputFormat + seg + numThreads\n )\n # Perform the BLASTing.\n subprocess.call(BLASTLoc + argsPSI, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n # Process the BLAST output.\n processPSIoutput.main(outputLoc, processedBLAST)",
"def create_blast_db(self):\n print(\"Creating blast db\")\n if self.mask:\n command = 'dustmasker -in ' + self.seq_file + ' -infmt fasta '\n command += '-outfmt maskinfo_asn1_bin -out ' + self.seq_file + '_dust.asnb'\n subprocess.check_output(command, shell=True) # identifying low-complexity regions.\n\n command = 'makeblastdb -in ' + self.seq_file + ' -input_type fasta -dbtype nucl '\n command += '-mask_data ' + self.seq_file + '_dust.asnb '\n command += '-out ' + self.seq_file + ' -title \"Whole Genome without low-complexity regions\"'\n subprocess.check_output(command, shell=True) # Overwriting the genome file.\n else:\n command = 'makeblastdb -in ' + self.seq_file + ' -input_type fasta -dbtype nucl '\n command += '-out ' + self.seq_file + ' -title \"Whole Genome unmasked\"'\n subprocess.check_output(command, shell=True)",
"def build_blastscreen_cmd(queryfile, blastexe, blastdb, outdir=None):\n if outdir is None:\n stem = os.path.splitext(queryfile)[0]\n else:\n filestem = os.path.splitext(os.path.split(queryfile)[-1])[0]\n stem = os.path.join(outdir, filestem)\n return NcbiblastnCommandline(\n query=queryfile,\n cmd=blastexe,\n db=blastdb,\n out=stem + \".blasttab\",\n task=\"blastn-short\",\n max_target_seqs=1,\n outfmt=6,\n perc_identity=90,\n ungapped=True,\n )",
"def blast_genome(seqs, blast_db, e_value, max_hits, word_size, working_dir,\r\n blast_mat_root, extra_params=[], DEBUG=True):\r\n\r\n # set up params to use with blastp or\r\n params = {\r\n # matrix\r\n \"-M\": \"BLOSUM62\",\r\n\r\n # max procs\r\n \"-a\": \"1\",\r\n\r\n # expectation\r\n \"-e\": e_value,\r\n\r\n # max seqs to show\r\n \"-b\": max_hits,\r\n\r\n # Word size\r\n \"-W\": word_size,\r\n\r\n # max one line descriptions\r\n \"-v\": max_hits,\r\n\r\n # tabular output\r\n \"-m\": \"9\",\r\n\r\n # program\r\n \"-p\": \"blastn\"\r\n }\r\n params.update(extra_params)\r\n\r\n output = blast_seqs(seqs,\r\n Blastall,\r\n blast_db=blast_db,\r\n params=params,\r\n WorkingDir=working_dir,\r\n add_seq_names=False,\r\n blast_mat_root=blast_mat_root)\r\n\r\n raw_output = [x for x in output['StdOut']]\r\n return raw_output",
"def blast_ncbi(geneseq, blasttype='blastp', db='nr', queryterms='(\"formicidae\"[Organism]) OR (\"drosophila\"[Organism]) OR (\"caenorhabditis elegans\"[Organism])'):\n\n return ncbi.qblast(blasttype, db, geneseq, expect=2, hitlist_size=10, entrez_query=queryterms)",
"def test_blast_genome(self):\r\n\r\n formatdb_cmd = 'formatdb -p F -o T -i %s' % self.subjectdb_fp\r\n system(formatdb_cmd)\r\n self._paths_to_clean_up.append(\"formatdb.log\")\r\n for suffix in [\"nhr\", \"nin\", \"nsd\", \"nsi\", \"nsq\"]:\r\n self._paths_to_clean_up.append(\".\".join(\r\n [self.subjectdb_fp, suffix]))\r\n\r\n raw_output = blast_genome(TEST_BLAST_DB_LINES, self.subjectdb_fp,\r\n e_value=1e-4, max_hits=100, word_size=28,\r\n working_dir=\"./\", blast_mat_root=None)\r\n\r\n i = 0\r\n for line in raw_output:\r\n\r\n if line.startswith(\"#\"):\r\n i += 1\r\n continue # comments depend on tmpfilename, BLAST version\r\n self.assertEqual(raw_output[i], EXP_BLAST_OUTPUT[i])\r\n i += 1",
"def run_blastn(blastn_path, db, input_fasta, blast_threads=1):\n chunk_hits = mkstempfname('.hits.txt.gz')\n\n blastnCmd = [\n blastn_path, '-db', db, '-word_size', '16', '-num_threads', str(blast_threads), '-evalue', '1e-6', '-outfmt',\n '6', '-max_target_seqs', '1', '-query', input_fasta,\n ]\n log.debug(' '.join(blastnCmd))\n blast_pipe = subprocess.Popen(blastnCmd, stdout=subprocess.PIPE)\n\n with util.file.open_or_gzopen(chunk_hits, 'wt') as outf:\n # strip tab output to just query read ID names and emit\n last_read_id = None\n for line in blast_pipe.stdout:\n line = line.decode('UTF-8').rstrip('\\n\\r')\n read_id = line.split('\\t')[0]\n # only emit if it is not a duplicate of the previous read ID\n if read_id != last_read_id:\n last_read_id = read_id\n outf.write(read_id + '\\n')\n\n if blast_pipe.poll():\n raise CalledProcessError()\n os.unlink(input_fasta)\n\n return chunk_hits",
"def unique_parse_blast(metadata, analysistype, fieldnames, cutoff, program):\n for sample in metadata:\n # Initialise a dictionary to store all the target sequences\n sample[analysistype].targetsequence = dict()\n sample[analysistype].queryranges = dict()\n sample[analysistype].querypercent = dict()\n sample[analysistype].queryscore = dict()\n sample[analysistype].results = dict()\n try:\n # Encountering the following error: # _csv.Error: field larger than field limit (131072)\n # According to https://stackoverflow.com/a/15063941, increasing the field limit should fix the issue\n csv.field_size_limit(sys.maxsize)\n # Open the sequence profile file as a dictionary\n blastdict = DictReader(open(sample[analysistype].report), fieldnames=fieldnames, dialect='excel-tab')\n # Go through each BLAST result\n for row in blastdict:\n # Ignore the headers\n if row['query_id'].startswith(fieldnames[0]):\n pass\n else:\n # Create the subject length variable - if the sequences are DNA (e.g. blastn), use the subject\n # length as usual; if the sequences are protein (e.g. tblastx), use the subject length / 3\n if program == 'blastn' or program == 'blastp' or program == 'blastx':\n subject_length = float(row['subject_length'])\n else:\n subject_length = float(row['subject_length']) / 3\n # Calculate the percent identity\n # Percent identity is: (# matches - # mismatches - # gaps) / total subject length\n percentidentity = float('{:0.2f}'.format((float(row['positives']) - float(row['gaps'])) /\n subject_length * 100))\n target = row['subject_id'].lstrip('gb|').rstrip('|') if '|' in row['subject_id'] else \\\n row['subject_id']\n contig = row['query_id']\n high = max([int(row['query_start']), int(row['query_end'])])\n low = min([int(row['query_start']), int(row['query_end'])])\n score = row['bit_score']\n # Create new entries in the blast results dictionaries with the calculated variables\n row['percentidentity'] = percentidentity\n row['percent_match'] = percentidentity\n row['low'] = low\n row['high'] = high\n row['alignment_fraction'] = float('{:0.2f}'.format(float(float(row['alignment_length']) /\n subject_length * 100)))\n # If the percent identity is greater than the cutoff\n if percentidentity >= cutoff:\n try:\n sample[analysistype].results[contig].append(row)\n # Boolean to store whether the list needs to be updated\n append = True\n # Iterate through all the ranges. If the new range is different than any of the ranges\n # seen before, append it. Otherwise, update the previous ranges with the longer range as\n # necessary e.g. [2494, 3296] will be updated to [2493, 3296] with [2493, 3293], and\n # [2494, 3296] will become [[2493, 3296], [3296, 4132]] with [3296, 4132]\n for spot in sample[analysistype].queryranges[contig]:\n # Update the low value if the new low value is slightly lower than before\n if 1 <= (spot[0] - low) <= 100:\n # Update the low value\n spot[0] = low\n # It is not necessary to append\n append = False\n # Update the previous high value if the new high value is higher than before\n elif 1 <= (high - spot[1]) <= 100:\n # Update the high value in the list\n spot[1] = high\n # It is not necessary to append\n append = False\n # Do not append if the new low is slightly larger than before\n elif 1 <= (low - spot[0]) <= 100:\n append = False\n # Do not append if the new high is slightly smaller than before\n elif 1 <= (spot[1] - high) <= 100:\n append = False\n # Do not append if the high and low are the same as the previously recorded values\n elif low == spot[0] and high == spot[1]:\n append = False\n # If the result appears to be in a new location, add the data to the object\n if append:\n sample[analysistype].queryranges[contig].append([low, high])\n sample[analysistype].querypercent[contig] = percentidentity\n sample[analysistype].queryscore[contig] = score\n # Initialise and populate the dictionary for each contig\n except KeyError:\n sample[analysistype].queryranges[contig] = list()\n sample[analysistype].queryranges[contig].append([low, high])\n sample[analysistype].querypercent[contig] = percentidentity\n sample[analysistype].queryscore[contig] = score\n sample[analysistype].results[contig] = list()\n sample[analysistype].results[contig].append(row)\n sample[analysistype].targetsequence[target] = list()\n # Determine if the query sequence is in a different frame than the subject, and correct\n # by setting the query sequence to be the reverse complement\n if int(row['subject_end']) < int(row['subject_start']):\n # Create a sequence object using Biopython\n seq = Seq(row['query_sequence'])\n # Calculate the reverse complement of the sequence\n querysequence = str(seq.reverse_complement())\n # If the sequence is not reversed, use the sequence as it is in the output\n else:\n querysequence = row['query_sequence']\n # Add the sequence in the correct orientation to the sample\n try:\n sample[analysistype].targetsequence[target].append(querysequence)\n except (AttributeError, KeyError):\n sample[analysistype].targetsequence[target] = list()\n sample[analysistype].targetsequence[target].append(querysequence)\n except FileNotFoundError:\n pass\n # Return the updated metadata object\n return metadata",
"def test_parallel_blaster(self):\r\n params = {'refseqs_path': self.reference_seqs_file.name,\r\n 'disable_low_complexity_filter': False,\r\n 'e_value': 0.001,\r\n 'num_hits': 1,\r\n 'word_size': 30,\r\n 'suppress_format_blastdb': False,\r\n 'blastmat_dir': None\r\n }\r\n\r\n app = ParallelBlaster()\r\n r = app(self.tmp_seq_filepath,\r\n self.test_out,\r\n params,\r\n job_prefix='BLASTTEST',\r\n poll_directly=True,\r\n suppress_submit_jobs=False)\r\n\r\n # Basic sanity checks: we should get two blast hits (lines). We ignore\r\n # all of the comments in the file. Each line should have 12 fields\r\n # separated by tabs.\r\n results = [line for line in open(glob(\r\n join(self.test_out, '*_blast_out.txt'))[0], 'U') if not\r\n line.startswith('#')]\r\n self.assertEqual(len(results), 2)\r\n self.assertEqual(len(results[0].split('\\t')), 12)\r\n self.assertEqual(len(results[1].split('\\t')), 12)",
"def main():\n\n #Getthefiles\n all_fna_file_path = []\n path_to_all_info = '/Users/gustavotamasco/mdrkrp/project_MDR_KRPgenomes_parsnp'\n #path_to_all_info = argv[1]\n dirpath=os.getcwd()\n os.chdir(path_to_all_info)\n genome_files = list_directories(path_to_all_info)\n os.chdir(\"/Users/gustavotamasco/mdrkrp/plasmids\")\n plasmid_files = list_directories(\"/Users/gustavotamasco/mdrkrp/plasmids\")\n\n\n '''Genomes'''\n #for genome in genome_files:\n #if \"fna\" in genome:\n #print(genome)\n #run_plasflow(genome)\n\n '''Eval Plasmids'''\n for organism in plasmid_files:\n if \"plasflow_plasmids\" in organism:\n run_plasclass(organism)\n run_blastn(organism)\n\n '''Mining info'''\n data = {}\n blast_info_path = \"/Users/gustavotamasco/mdrkrp/plasmids/plasmid_blast\"\n blast_files = list_files_new_source(blast_info_path)\n for org_b in blast_files:\n if \"genome\" not in org_b:\n parse_blast(org_b, blast_info_path, data)",
"def main(inputFile, databaseFile, blastOperationID, SEG=False, cores=2, minAlignLength=20, maxEValue=1.0, verboseOutput=False):\n \n # Get the location of the BLAST executables.\n srcLocation = os.path.abspath(__file__)\n srcLocation = '\\\\'.join(srcLocation.split('\\\\')[:-1])\n BLASTExecutables = srcLocation + '\\\\BLASTExecutables'\n cwd = os.getcwd()\n outputLocation = cwd + '\\\\' + blastOperationID\n if os.path.exists(outputLocation):\n shutil.rmtree(outputLocation)\n os.mkdir(outputLocation)\n \n # Make a BLASTable database from the database file.\n if verboseOutput:\n print 'Creating the BLASTable database.'\n databaseDir = outputLocation + '\\\\TempDatabase'\n os.mkdir(databaseDir)\n os.mkdir(databaseDir + '\\\\TempDB')\n makeDBArgs = BLASTExecutables + '\\\\makeblastdb.exe -in ' + databaseFile + ' -out ' + databaseDir + '\\\\TempDB -dbtype prot'\n subprocess.call(makeDBArgs, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n \n # Loop through the input file and create a FASTA format file for each individual protein.\n if verboseOutput:\n print 'Generating a FASTA file of each sequence.'\n proteinDir = outputLocation + '\\\\TempProteins'\n os.mkdir(proteinDir)\n fullFASTA = open(inputFile, 'r')\n protCount = 0\n for line in fullFASTA:\n if line[0] == '>':\n # If the line starts a new protein definition.\n if protCount == 0:\n # If this is the first protein definition found.\n proteinWrite = open(proteinDir + '\\Prot' + str(protCount) + '.fasta', 'w')\n proteinWrite.write(line)\n else:\n # If this is not the first protein definition found.\n proteinWrite.close()\n proteinWrite = open(proteinDir + '\\Prot' + str(protCount) + '.fasta', 'w')\n proteinWrite.write(line)\n protCount += 1\n else:\n # Otherwise the line is a protein sequence.\n proteinWrite.write(line)\n \n proteinWrite.close()\n fullFASTA.close()\n \n # BLAST each of the individual protein FASTA files just made against the database generated from databaseFile.\n if verboseOutput:\n print 'Starting to BLAST each file.'\n fileCount = 1\n processedBLAST = outputLocation + '\\\\Processed.txt'\n proteinFiles = os.listdir(proteinDir)\n for file in proteinFiles:\n if verboseOutput:\n if fileCount % 100 == 0:\n print 'Currently BLASTing file ', fileCount, ' out of ', len(proteinFiles), '...'\n fileCount += 1\n sequence_BLAST(processedBLAST, proteinDir + '\\\\' + file, databaseDir + '\\\\TempDB', BLASTExecutables + '\\\\psiblast.exe',\n SEG, cores)\n \n # Parse the processed BLAST output, and record the similarities between the different proteins.\n if verboseOutput:\n print 'Now parsing the processed BLAST output.'\n similarities = {}\n readProcessedBLAST = open(processedBLAST, 'r')\n for line in readProcessedBLAST:\n chunks = line.split('\\t')\n key = tuple(sorted([chunks[0], chunks[1]]))\n identity = float(chunks[2])\n alignLength = int(chunks[3])\n if alignLength <= minAlignLength:\n # If the alignment length is too short, then ignore the alignment.\n continue\n evalue = float(chunks[4])\n if evalue >= maxEValue:\n # If the EValue is too great, then ignore the alignment.\n continue\n if similarities.has_key(key):\n oldSimilarity = similarities[key]['Identity']\n if identity > oldSimilarity:\n similarities[key] = {'Identity' : identity, 'Length' : alignLength, 'EValue' : evalue}\n else:\n similarities[key] = {'Identity' : identity, 'Length' : alignLength, 'EValue' : evalue}\n readProcessedBLAST.close()\n\n # Remove the temporary directory used for manipulating and processing the BLAST output.\n try:\n shutil.rmtree(outputLocation)\n except:\n time.sleep(60)\n shutil.rmtree(outputLocation)\n \n return similarities"
] | [
"0.6584838",
"0.6572283",
"0.650212",
"0.6392259",
"0.61772364",
"0.6139482",
"0.6132713",
"0.6076423",
"0.59617484",
"0.58901364",
"0.5859545",
"0.5780147",
"0.5600362",
"0.5597818",
"0.5547912",
"0.54921067",
"0.54734504",
"0.54669",
"0.5431532",
"0.5394897",
"0.5355753",
"0.53033864",
"0.52945405",
"0.5245142",
"0.5210821",
"0.520421",
"0.5196089",
"0.5176983",
"0.5133492",
"0.50537014"
] | 0.7728994 | 0 |
Execute and retrieve data from standalone BLASTPGP as handles (OBSOLETE). NOTE This function is obsolete, you are encouraged to the command line wrapper Bio.Blast.Applications.BlastpgpCommandline instead. Execute and retrieve data from blastpgp. blastcmd is the command used to launch the 'blastpgp' executable. database is the path to the database to search against. infile is the path to the file containing the sequence to search with. The return values are two handles, for standard output and standard error. You may pass more parameters to keywds to change the behavior of the search. Otherwise, optional values will be chosen by blastpgp. The Blast output is by default in XML format. Use the align_view keyword for output in a different format. Scoring matrix Matrix to use. gap_open Gap open penalty. gap_extend Gap extension penalty. window_size Multiple hits window size. npasses Number of passes. passes Hits/passes. Integer 02. Algorithm gapped Whether to do a gapped alignment. T/F expectation Expectation value cutoff. wordsize Word size. keep_hits Number of beset hits from a region to keep. xdrop Dropoff value (bits) for gapped alignments. hit_extend Threshold for extending hits. region_length Length of region used to judge hits. db_length Effective database length. search_length Effective length of search space. nbits_gapping Number of bits to trigger gapping. pseudocounts Pseudocounts constants for multiple passes. xdrop_final X dropoff for final gapped alignment. xdrop_extension Dropoff for blast extensions. model_threshold Evalue threshold to include in multipass model. required_start Start of required region in query. required_end End of required region in query. Processing XXX should document default values program The blast program to use. (PHIBLAST) filter Filter query sequence for low complexity (with SEG)? T/F believe_query Believe the query defline? T/F nprocessors Number of processors to use. Formatting html Produce HTML output? T/F descriptions Number of oneline descriptions. alignments Number of alignments. align_view Alignment view. Integer 011, passed as a string or integer. show_gi Show GI's in deflines? T/F seqalign_file seqalign file to output. align_outfile Output file for alignment. checkpoint_outfile Output file for PSIBLAST checkpointing. restart_infile Input file for PSIBLAST restart. hit_infile Hit file for PHIBLAST. matrix_outfile Output file for PSIBLAST matrix in ASCII. align_outfile Output file for alignment. Filename to write to, if ommitted standard output is used (which you can access from the returned handles). align_infile Input alignment file for PSIBLAST restart. | def blastpgp(blastcmd, database, infile, align_view='7', **keywds):
import warnings
warnings.warn("This function is obsolete, you are encouraged to the command line wrapper Bio.Blast.Applications.BlastpgpCommandline instead.", PendingDeprecationWarning)
_security_check_parameters(keywds)
att2param = {
'matrix' : '-M',
'gap_open' : '-G',
'gap_extend' : '-E',
'window_size' : '-A',
'npasses' : '-j',
'passes' : '-P',
'gapped' : '-g',
'expectation' : '-e',
'wordsize' : '-W',
'keep_hits' : '-K',
'xdrop' : '-X',
'hit_extend' : '-f',
'region_length' : '-L',
'db_length' : '-Z',
'search_length' : '-Y',
'nbits_gapping' : '-N',
'pseudocounts' : '-c',
'xdrop_final' : '-Z',
'xdrop_extension' : '-y',
'model_threshold' : '-h',
'required_start' : '-S',
'required_end' : '-H',
'program' : '-p',
'database' : '-d',
'infile' : '-i',
'filter' : '-F',
'believe_query' : '-J',
'nprocessors' : '-a',
'html' : '-T',
'descriptions' : '-v',
'alignments' : '-b',
'align_view' : '-m',
'show_gi' : '-I',
'seqalign_file' : '-O',
'align_outfile' : '-o',
'checkpoint_outfile' : '-C',
'restart_infile' : '-R',
'hit_infile' : '-k',
'matrix_outfile' : '-Q',
'align_infile' : '-B',
}
from Applications import BlastpgpCommandline
cline = BlastpgpCommandline(blastcmd)
cline.set_parameter(att2param['database'], database)
cline.set_parameter(att2param['infile'], infile)
cline.set_parameter(att2param['align_view'], str(align_view))
for key, value in keywds.iteritems():
cline.set_parameter(att2param[key], str(value))
return _invoke_blast(cline) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rpsblast(blastcmd, database, infile, align_view=\"7\", **keywds):\n\n import warnings\n warnings.warn(\"This function is obsolete, you are encouraged to the command line wrapper Bio.Blast.Applications.BlastrpsCommandline instead.\", PendingDeprecationWarning)\n _security_check_parameters(keywds)\n \n att2param = {\n 'multihit' : '-P',\n 'gapped' : '-g',\n 'expectation' : '-e',\n 'range_restriction' : '-L',\n 'xdrop' : '-X',\n 'xdrop_final' : '-Z',\n 'xdrop_extension' : '-y',\n 'search_length' : '-Y',\n 'nbits_gapping' : '-N',\n 'protein' : '-p',\n 'db_length' : '-z',\n\n 'database' : '-d',\n 'infile' : '-i',\n 'filter' : '-F',\n 'case_filter' : '-U',\n 'believe_query' : '-J',\n 'nprocessors' : '-a',\n 'logfile' : '-l',\n\n 'html' : '-T',\n 'descriptions' : '-v',\n 'alignments' : '-b',\n 'align_view' : '-m',\n 'show_gi' : '-I',\n 'seqalign_file' : '-O',\n 'align_outfile' : '-o',\n }\n\n from Applications import RpsBlastCommandline\n cline = RpsBlastCommandline(blastcmd)\n cline.set_parameter(att2param['database'], database)\n cline.set_parameter(att2param['infile'], infile)\n cline.set_parameter(att2param['align_view'], str(align_view))\n for key, value in keywds.iteritems():\n cline.set_parameter(att2param[key], str(value))\n return _invoke_blast(cline)",
"def blastall(blastcmd, program, database, infile, align_view='7', **keywds):\n\n _security_check_parameters(keywds)\n\n att2param = {\n 'matrix' : '-M',\n 'gap_open' : '-G',\n 'gap_extend' : '-E',\n 'nuc_match' : '-r',\n 'nuc_mismatch' : '-q',\n 'query_genetic_code' : '-Q',\n 'db_genetic_code' : '-D',\n\n 'gapped' : '-g',\n 'expectation' : '-e',\n 'wordsize' : '-W',\n 'strands' : '-S',\n 'keep_hits' : '-K',\n 'xdrop' : '-X',\n 'hit_extend' : '-f',\n 'region_length' : '-L',\n 'db_length' : '-z',\n 'search_length' : '-Y',\n \n 'program' : '-p',\n 'database' : '-d',\n 'infile' : '-i',\n 'filter' : '-F',\n 'believe_query' : '-J',\n 'restrict_gi' : '-l',\n 'nprocessors' : '-a',\n 'oldengine' : '-V',\n\n 'html' : '-T',\n 'descriptions' : '-v',\n 'alignments' : '-b',\n 'align_view' : '-m',\n 'show_gi' : '-I',\n 'seqalign_file' : '-O',\n 'outfile' : '-o',\n }\n import warnings\n warnings.warn(\"This function is obsolete, you are encouraged to the command line wrapper Bio.Blast.Applications.BlastallCommandline instead.\", PendingDeprecationWarning)\n from Applications import BlastallCommandline\n cline = BlastallCommandline(blastcmd)\n cline.set_parameter(att2param['program'], program)\n cline.set_parameter(att2param['database'], database)\n cline.set_parameter(att2param['infile'], infile)\n cline.set_parameter(att2param['align_view'], str(align_view))\n for key, value in keywds.iteritems():\n cline.set_parameter(att2param[key], str(value))\n return _invoke_blast(cline)",
"def run_blast(inputfile, input_type, outputfile, database, args=None, verbose=True):\n\n assert (input_type in ['protein', 'dna']), \"Input type must be either 'protein' or 'dna'\"\n\n cmd = ['diamond']\n\n if input_type == 'protein':\n cmd += ['blastp']\n elif input_type == 'dna':\n cmd += ['blastx']\n\n cmd += ['-d', database]\n cmd += ['-q', inputfile]\n cmd += ['-o', outputfile]\n\n if not args:\n args = \"--more-sensitive --top 10 --quiet\"\n\n cmd += args.split()\n\n if verbose:\n print(' '.join(cmd))\n\n with open(os.devnull, 'w') as devnull:\n try:\n exit_code = call(cmd, stdout=devnull)\n except OSError:\n exit_code = None\n\n return exit_code",
"def blastp(database, query, output_to_file = False, output_file = None,\n overwrite = False, outfmt = 7):\n if output_to_file:\n if os.path.exists(output_file) and not overwrite:\n return output_file\n cmd = 'blastp -db {} -query {} -outfmt {} -out {} -num_alignments 1'.\\\n format(database, query, outfmt, output_file)\n else:\n cmd = 'blastp -db {} -query {} -outfmt {} -num_alignments 1'.format(\n database, query, outfmt)\n\n printed_output = subprocess.check_output(cmd, shell=True)\n if output_to_file:\n return output_file\n return printed_output",
"def makeblastdb(fasta, program='blastn', returncmd=False, **kwargs):\n # Convert the options dictionary to a string\n options = kwargs_to_string(kwargs)\n # Set the dbtype appropriately\n if program == 'blastn' or program == 'tblastn' or program == 'tblastx':\n dbtype = 'nucl'\n else:\n dbtype = 'prot'\n # Remove the file extension from the file name\n output = os.path.splitext(fasta)[0]\n cmd = 'makeblastdb -in {fasta} -parse_seqids -max_file_sz 2GB -dbtype {dbtype} -out {output}{options}' \\\n .format(fasta=fasta,\n dbtype=dbtype,\n output=output,\n options=options)\n # Check if database already exists\n if not os.path.isfile('{output}.nhr'.format(output=output)):\n out, err = run_subprocess(cmd)\n else:\n out = str()\n err = str()\n if returncmd:\n return out, err, cmd\n else:\n return out, err",
"def exec_blast(infile, config_file, out_name):\n\tdb, evalue = parse_config(config_file, \"blast\")\n\ttry:\n\t\tinput_open = open(infile, \"r\")\n\t\tinput_open.close()\n\texcept:\n\t\traise IOError(\"Cannot open input file %s\" %infile)\n\n\tif is_fasta(infile) == False:\n\t\t\traise TypeError(\"Input file %s must be in fasta format\" %infile)\n\telse:\n\t\tfasta_string = SeqIO.read(infile, format=\"fasta\")\n\t\tresult_handle = NCBIWWW.qblast(\"blastp\", db, fasta_string.seq)\n\t\toutput= out_name + \".xml\"\n\t\tsave_file = open(output, \"w\")\n\t\tsave_file.write(result_handle.read())\n\t\tsave_file.close()\n\t\tresult_handle.close()\n\treturn (output)",
"def exec_blast(infile, config_file, out_name):\n\tdb, evalue = parse_config(config_file, \"blast\")\n\tfasta_string = SeqIO.read(infile, format=\"fasta\")\n\tresult_handle = NCBIWWW.qblast(\"blastp\", \"nr\", fasta_string.seq)\n\toutput= out_name + \".xml\"\n\tsave_file = open(output, \"w\")\n\tsave_file.write(result_handle.read())\n\tsave_file.close()\n\tresult_handle.close()\n\treturn (output)",
"def get_ncbi_pdb_blast(sequence, file_name=None, blast_type=\"blastp\",\n expect=0.01):\n assert (blast_type in [\"blastp\", \"blastn\"])\n if (sequence[-1] == '*'):\n sequence = sequence[:-1]\n if (not sequence.isalpha()):\n raise Sorry(\"The sequence contains non-alphabetical characters; in \"+\n \"addition to A-Z, only an asterisk denoting a stop codon is permitted.\")\n assert (expect >= 0)\n try :\n from Bio.Blast import NCBIWWW\n except ImportError :\n raise Sorry(\"You need to have BioPython installed to use this function.\")\n # FIXME will this use the HTTP proxy if defined?\n blast = NCBIWWW.qblast(blast_type, \"pdb\", sequence, expect=expect)\n blast_out = blast.read()\n if (file_name is not None):\n f = open(file_name, \"w\")\n f.write(blast_out)\n f.close()\n return blast_out",
"def _blast(query, output_pssm, output, blastdb):\n psiblast_command = \"psiblast -db {:} -query {:} -out_ascii_pssm {:} \" + \\\n \"-save_pssm_after_last_round -out {:}\"\n log_out = \"{}.out\".format(output)\n log_err = \"{}.err\".format(output)\n with open(log_out, 'a') as f_out:\n with open(log_err, 'a') as f_err:\n command = psiblast_command.format(\n blastdb, query, output_pssm, output)\n f_out.write('=================== CALL ===================\\n')\n f_out.write(command + '\\n')\n subprocess.check_call(\n command, shell=True, stderr=f_err, stdout=f_out)\n f_out.write('================= END CALL =================\\n')",
"def qiime_blast_seqs(seqs,\r\n blast_constructor=Blastall,\r\n blast_program='blastn',\r\n blast_db=None,\r\n refseqs=None,\r\n refseqs_fp=None,\r\n blast_mat_root=None,\r\n params=None,\r\n WorkingDir=None,\r\n seqs_per_blast_run=1000,\r\n is_protein=False,\r\n HALT_EXEC=False):\r\n\r\n assert blast_db or refseqs_fp or refseqs, \\\r\n 'Must provide either a blast_db or a fasta ' +\\\r\n 'filepath containing sequences to build one.'\r\n\r\n if refseqs_fp:\r\n blast_db, db_files_to_remove =\\\r\n build_blast_db_from_fasta_path(refseqs_fp,\r\n output_dir=WorkingDir,\r\n is_protein=is_protein)\r\n elif refseqs:\r\n blast_db, db_files_to_remove =\\\r\n build_blast_db_from_fasta_file(refseqs,\r\n output_dir=WorkingDir,\r\n is_protein=is_protein)\r\n else:\r\n db_files_to_remove = []\r\n\r\n if params is None:\r\n params = {}\r\n params[\"-d\"] = blast_db\r\n params[\"-p\"] = blast_program\r\n\r\n blast_app = blast_constructor(\r\n params=params,\r\n blast_mat_root=blast_mat_root,\r\n InputHandler='_input_as_seq_id_seq_pairs',\r\n WorkingDir=WorkingDir,\r\n SuppressStderr=True,\r\n HALT_EXEC=HALT_EXEC)\r\n\r\n current_seqs = []\r\n blast_results = BlastResult([])\r\n for seq in seqs:\r\n current_seqs.append(seq)\r\n if len(current_seqs) % seqs_per_blast_run == 0:\r\n if blast_results:\r\n blast_results.update(\r\n BlastResult(blast_app(current_seqs)['StdOut']))\r\n else:\r\n blast_results = BlastResult(blast_app(current_seqs)['StdOut'])\r\n current_seqs = []\r\n\r\n # clean-up run: blast the remaining sequences\r\n blast_results.update(\r\n BlastResult(blast_app(current_seqs)['StdOut']))\r\n\r\n remove_files(db_files_to_remove)\r\n\r\n return blast_results",
"def sequence_BLAST(processedBLAST, inputFile, database, BLASTLoc, SEG, cores): \n\n # Setup the parameters for the BLASTing.\n outputLoc = inputFile.split('.')[0] + '.tmp' \n query = ' -query ' + inputFile\n out = ' -out ' + outputLoc\n evalue = ' -evalue 1'\n inclusionEThresh = ' -inclusion_ethresh 0.0001'\n numIterations = ' -num_iterations 3'\n gapTrigger = ' -gap_trigger 18'\n numDescriptions = ' -num_descriptions 10000'\n numAlignments = ' -num_alignments 10000'\n dbsize = ' -dbsize 0'\n db = ' -db ' + database\n outputFormat = ' -outfmt \"7 qseqid sseqid pident length evalue\"'\n if SEG:\n seg = ' -seg yes'\n else:\n seg = ' -seg no'\n numThreads = ' -num_threads ' + str(cores)\n argsPSI = (query + out + evalue + inclusionEThresh + numIterations + gapTrigger + numDescriptions +\n numAlignments + dbsize + db + outputFormat + seg + numThreads\n )\n # Perform the BLASTing.\n subprocess.call(BLASTLoc + argsPSI, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n # Process the BLAST output.\n processPSIoutput.main(outputLoc, processedBLAST)",
"def blast(self, analysis_id, organism_id, input, blastdb=None, blastdb_id=None,\n re_name=None, query_type=\"polypeptide\", match_on_name=False, skip_missing=False):\n\n if blastdb_id:\n found_db = self.session.query(self.model.db).filter_by(db_id=blastdb_id)\n if not found_db:\n raise Exception(\"Invalid db ID\")\n elif blastdb:\n found_db = self.session.query(self.model.db).filter_by(name=blastdb)\n if not found_db:\n raise Exception(\"Invalid db name\")\n blastdb_id = found_db.one().db_id\n\n if not blastdb_id:\n raise Exception(\"Either blastdb or blastdb_id is required\")\n\n res = self.session.query(self.model.analysis).filter_by(analysis_id=analysis_id)\n if not res.count():\n raise Exception(\"Analysis with the id {} was not found\".format(analysis_id))\n\n # Cache many things to speed up loading\n self._reset_cache()\n seqterm = self.ci.get_cvterm_id(query_type, 'sequence')\n self._init_feature_cache(organism_id, seqterm, match_on_name)\n\n self._init_analysisfeature_cache(analysis_id)\n\n self._init_analysisprop_cache()\n\n self._hit_details_cache = None\n\n if not os.path.exists(input):\n raise Exception(\"{} was not found\".format(input))\n\n self._setup_tables(\"blast\")\n\n count_ins = self._parse_blast_xml(analysis_id, blastdb_id, input, re_name, query_type, True, organism_id, skip_missing)\n\n blastdb_ap = self.ci.get_cvterm_id('analysis_blast_blastdb', 'tripal')\n self._add_analysisprop(analysis_id, type_id=blastdb_ap, value=blastdb_id)\n\n self.session.commit()\n\n self._reset_cache()\n\n return {'inserted': count_ins}",
"def rbpdb_data_load(rna_info, out=None):\n del out # this function doesn't emit progress status (yet)!\n rbpdb_protein_file_path = (\n \"./website/data/RBPDB_v1.3.1_proteins_human_2012-11-21.tdt\"\n )\n letter_strength = RBPDB_MOTIF_PWM_LETTER_STRENGTH\n n_repeat_req = RBPDB_MOTIF_N_REPEAT_REQ\n rna_seq = get_human_seq(rna_info)\n\n experiment_id_to_pwm_dict = (\n picklify(\n generate_rbpdb_experimental_to_pwm, letter_strength, n_repeat_req\n )\n )\n protein_id_to_experimental_ids_dict = (\n picklify(generate_rbpdb_protein_to_experiment_id)\n )\n experiment_id_to_columns_dict = (\n picklify(generate_rbpdb_experiment_to_columns)\n )\n with open(rbpdb_protein_file_path) as handle:\n _ = handle.readline().strip().split('\\t')\n # columns here is expected to have the following information in the\n # following order:\n # protein_id, annotation_id, creation_date, update_date, gene_name,\n # gene_description, species, taxID, domains, aliases, flag, flag_notes,\n # some_other_id\n protein_columns = handle.readline().replace(\"\\n\", \"\").split('\\t')\n while protein_columns != ['']:\n assert len(protein_columns) == 13\n # We only care about human RBPs for now.\n if protein_columns[10] == \"0\":\n protein_columns = (\n handle.readline().replace(\"\\n\", \"\").split('\\t')\n )\n continue\n rbp = protein_columns[4]\n protein_id = protein_columns[0]\n\n if protein_id not in protein_id_to_experimental_ids_dict:\n # No experiments associated. So no data to be had\n protein_columns = (\n handle.readline().replace(\"\\n\", \"\").split('\\t')\n )\n continue\n\n for experiment_id in (\n protein_id_to_experimental_ids_dict[protein_id]\n ):\n assert (\n experiment_id in experiment_id_to_pwm_dict\n or experiment_id == \"410\"\n )\n if experiment_id == \"410\":\n continue\n pwms = experiment_id_to_pwm_dict[experiment_id]\n for pwm in pwms:\n assert len(pwm[\"A\"]) > 0\n experimental_columns = (\n experiment_id_to_columns_dict[experiment_id]\n )\n assert len(experimental_columns) == 15\n total_columns = protein_columns + experimental_columns\n annotation = (\n ANNOTATION_COLUMN_DELIMITER.join(\n [\n total_columns[i]\n for i in rbpdb_columns_of_interest\n ]\n )\n )\n\n if pwm_degree_of_freedom(pwm) >= 2048:\n # experimentally shown that by this point naive brute\n # force is faster. Bound could be\n # reduced.\n sites = pwm_scan_naive_brute_force(rna_seq, pwm)\n else:\n sites = pwm_scan(rna_seq, pwm)\n\n if not sites:\n continue\n\n for start, end in sites:\n yield rbp, start, end, annotation\n\n protein_columns = handle.readline().replace(\"\\n\", \"\").split('\\t')",
"def parse_first_database(db, percentage_ids, alignment_lengths):\n #@@@ Try blast parser object\n results = MinimalBlastParser9(db)\n\n #@@@ cogent.util.transform.cartesian_product\n options = [(p,a) for p in percentage_ids for a in alignment_lengths]\n\n best_hits = {}\n for total_queries, (metadata, hits) in enumerate(results):\n fields = [i.strip() for i in metadata['FIELDS'].split(',')]\n name = metadata['QUERY']\n percentage_id = fields.index('% identity')\n bit_score = fields.index('bit score')\n alg_length = fields.index('alignment length')\n evalue = fields.index('e-value')\n subject_id = fields.index('Subject id')\n\n if not hits: \n continue\n\n best_hits[name] = []\n for p,a in options:\n # best bit score\n bbs = 0\n result = None\n\n for h in hits:\n h[percentage_id] = float(h[percentage_id])\n h[alg_length] = float(h[alg_length])\n h[bit_score] = float(h[bit_score])\n\n if h[percentage_id]>=p and h[alg_length]>=a and h[bit_score]>bbs:\n result = { 'a': { 'subject_id': h[subject_id],\n 'percentage_id': h[percentage_id],\n 'bit_score': h[bit_score],\n 'alg_length': int(h[alg_length]),\n 'evalue': float(h[evalue]) },\n 'b': { 'subject_id': None, \n 'bit_score': -1 } }\n bbs = h[bit_score]\n best_hits[name].append(result)\n\n return total_queries+1, best_hits",
"def blast_reads(number_hits, ncbi_database, organism):\n #blast_reads(number_hits, ncbi_database, entrez_query)\n print(\"Searching for BLAST hits...\")\n fasta_string = open(\"Log_Directory/blast_queries.fasta\").read()\n print (\"The ncbi database being searched is:\", ncbi_database)\n if len(organism) > 0:\n print (\"The organism being searched is: \", organism)\n query ='\"txid'+str(organism)+'\"'\n result_handle = NCBIWWW.qblast(\"blastn\", ncbi_database, fasta_string, entrez_query=query, hitlist_size=number_hits,\n expect=10.0, nucl_penalty=-2, nucl_reward=1, megablast=True, word_size=28, expect_low=True, gapcosts='0 2')\n else:\n print (\"No organism is designated\")\n result_handle = NCBIWWW.qblast(\"blastn\", ncbi_database, fasta_string, hitlist_size=number_hits)\n blast_result = open(\"Log_Directory/blast_results.xml\", \"w\")\n blast_result.write(result_handle.read())\n blast_result.close()\n result_handle.close()",
"def blast_genome(seqs, blast_db, e_value, max_hits, word_size, working_dir,\r\n blast_mat_root, extra_params=[], DEBUG=True):\r\n\r\n # set up params to use with blastp or\r\n params = {\r\n # matrix\r\n \"-M\": \"BLOSUM62\",\r\n\r\n # max procs\r\n \"-a\": \"1\",\r\n\r\n # expectation\r\n \"-e\": e_value,\r\n\r\n # max seqs to show\r\n \"-b\": max_hits,\r\n\r\n # Word size\r\n \"-W\": word_size,\r\n\r\n # max one line descriptions\r\n \"-v\": max_hits,\r\n\r\n # tabular output\r\n \"-m\": \"9\",\r\n\r\n # program\r\n \"-p\": \"blastn\"\r\n }\r\n params.update(extra_params)\r\n\r\n output = blast_seqs(seqs,\r\n Blastall,\r\n blast_db=blast_db,\r\n params=params,\r\n WorkingDir=working_dir,\r\n add_seq_names=False,\r\n blast_mat_root=blast_mat_root)\r\n\r\n raw_output = [x for x in output['StdOut']]\r\n return raw_output",
"def create_blast_db(self):\n print(\"Creating blast db\")\n if self.mask:\n command = 'dustmasker -in ' + self.seq_file + ' -infmt fasta '\n command += '-outfmt maskinfo_asn1_bin -out ' + self.seq_file + '_dust.asnb'\n subprocess.check_output(command, shell=True) # identifying low-complexity regions.\n\n command = 'makeblastdb -in ' + self.seq_file + ' -input_type fasta -dbtype nucl '\n command += '-mask_data ' + self.seq_file + '_dust.asnb '\n command += '-out ' + self.seq_file + ' -title \"Whole Genome without low-complexity regions\"'\n subprocess.check_output(command, shell=True) # Overwriting the genome file.\n else:\n command = 'makeblastdb -in ' + self.seq_file + ' -input_type fasta -dbtype nucl '\n command += '-out ' + self.seq_file + ' -title \"Whole Genome unmasked\"'\n subprocess.check_output(command, shell=True)",
"def blaster(protSeq, orgnID = \"Mus musculus\"):\n \n from Bio.Blast.NCBIWWW import qblast\n from Bio.Blast import NCBIXML\n from sys import exit\n \n print(\"\\nconnecting to BLAST server. this will take some time...\")\n i = 1\n while i < 4: # BLAST sometimes returns empty results. if so, try once more, it happens quite rarely and resending the query seems to fix it.\n print(\"attempt number \" + str(i))\n i += 1\n resX = qblast(\"blastp\",\"refseq_protein\", protSeq, entrez_query= orgnID + \"[organism]\")\n resO = NCBIXML.read(resX)\n if resO.descriptions != []: break \n if resO.descriptions == []: \n print(\"connection unsuccessful. The BLAST server is acting up. Try again later.\")\n exit(0)\n \n else: print(\"connection successful\")\n \n print(resO.descriptions[0])\n descO = resO.descriptions[0]\n if descO.e < 0.01: \n try:\n descID = descO.title.split(\"|\")[3] # not sure why I picked element 3 here\n except IndexError:\n descID = descO.title.split(\"|\")[1]\n \n if \".\" in descID: return descID.split(\".\")[0]\n else: return descID\n \n else: return \"-\"",
"def main():\n count = 0\n\n # Read in the required files and filenames.\n predicted_proteins, protein_db, output_file_aug_to_fasta, \\\n output_file_proteins_to_db, blastp_output, output_to_file, \\\n overwrite = call_files()\n\n # Write all entries in the AUGUSTUS output to a FASTA file\n for record in split_records_aug(predicted_proteins):\n if count == 0:\n mode = 'w'\n else:\n mode = 'a'\n write_fasta(record, output_file_aug_to_fasta, mode)\n count += 1\n\n # Create a blast database and carry out a blastp search\n blast_db = blast_database(protein_db, 'prot', True,\n output_file_proteins_to_db, overwrite)\n\n blastp_file = blastp(output_file_proteins_to_db, output_file_aug_to_fasta,\n True, blastp_output, overwrite, 7)\n\n # Parse the blastp results for the desired information\n blast_results = parse_blastp_output(blastp_output)\n\n # Print the results\n print_output(blast_results)",
"def format_blast(makeblastdb_path, fname):\n # The script is written in shell, so this function just calls it and\n # checks the output\n # Build the shell command\n cmd = ['bash', DBFORMAT_SCRIPT, makeblastdb_path, fname]\n # Execute the script\n # shell=False to ensure that we aren't executing commands from untrusted\n # sources\n p = subprocess.Popen(\n cmd,\n shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = p.communicate()\n return (out, err)",
"def build_db(db_fasta, out_db, input_type='fasta'):\n subprocess.run(\n 'makeblastdb -dbtype nucl -in %s -input_type %s -parse_seqids -out %s'\n % (db_fasta, input_type, out_db),\n shell=True,\n env={'PATH': BLAST_PATH}\n )",
"def main(inputFile, databaseFile, blastOperationID, SEG=False, cores=2, minAlignLength=20, maxEValue=1.0, verboseOutput=False):\n \n # Get the location of the BLAST executables.\n srcLocation = os.path.abspath(__file__)\n srcLocation = '\\\\'.join(srcLocation.split('\\\\')[:-1])\n BLASTExecutables = srcLocation + '\\\\BLASTExecutables'\n cwd = os.getcwd()\n outputLocation = cwd + '\\\\' + blastOperationID\n if os.path.exists(outputLocation):\n shutil.rmtree(outputLocation)\n os.mkdir(outputLocation)\n \n # Make a BLASTable database from the database file.\n if verboseOutput:\n print 'Creating the BLASTable database.'\n databaseDir = outputLocation + '\\\\TempDatabase'\n os.mkdir(databaseDir)\n os.mkdir(databaseDir + '\\\\TempDB')\n makeDBArgs = BLASTExecutables + '\\\\makeblastdb.exe -in ' + databaseFile + ' -out ' + databaseDir + '\\\\TempDB -dbtype prot'\n subprocess.call(makeDBArgs, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n \n # Loop through the input file and create a FASTA format file for each individual protein.\n if verboseOutput:\n print 'Generating a FASTA file of each sequence.'\n proteinDir = outputLocation + '\\\\TempProteins'\n os.mkdir(proteinDir)\n fullFASTA = open(inputFile, 'r')\n protCount = 0\n for line in fullFASTA:\n if line[0] == '>':\n # If the line starts a new protein definition.\n if protCount == 0:\n # If this is the first protein definition found.\n proteinWrite = open(proteinDir + '\\Prot' + str(protCount) + '.fasta', 'w')\n proteinWrite.write(line)\n else:\n # If this is not the first protein definition found.\n proteinWrite.close()\n proteinWrite = open(proteinDir + '\\Prot' + str(protCount) + '.fasta', 'w')\n proteinWrite.write(line)\n protCount += 1\n else:\n # Otherwise the line is a protein sequence.\n proteinWrite.write(line)\n \n proteinWrite.close()\n fullFASTA.close()\n \n # BLAST each of the individual protein FASTA files just made against the database generated from databaseFile.\n if verboseOutput:\n print 'Starting to BLAST each file.'\n fileCount = 1\n processedBLAST = outputLocation + '\\\\Processed.txt'\n proteinFiles = os.listdir(proteinDir)\n for file in proteinFiles:\n if verboseOutput:\n if fileCount % 100 == 0:\n print 'Currently BLASTing file ', fileCount, ' out of ', len(proteinFiles), '...'\n fileCount += 1\n sequence_BLAST(processedBLAST, proteinDir + '\\\\' + file, databaseDir + '\\\\TempDB', BLASTExecutables + '\\\\psiblast.exe',\n SEG, cores)\n \n # Parse the processed BLAST output, and record the similarities between the different proteins.\n if verboseOutput:\n print 'Now parsing the processed BLAST output.'\n similarities = {}\n readProcessedBLAST = open(processedBLAST, 'r')\n for line in readProcessedBLAST:\n chunks = line.split('\\t')\n key = tuple(sorted([chunks[0], chunks[1]]))\n identity = float(chunks[2])\n alignLength = int(chunks[3])\n if alignLength <= minAlignLength:\n # If the alignment length is too short, then ignore the alignment.\n continue\n evalue = float(chunks[4])\n if evalue >= maxEValue:\n # If the EValue is too great, then ignore the alignment.\n continue\n if similarities.has_key(key):\n oldSimilarity = similarities[key]['Identity']\n if identity > oldSimilarity:\n similarities[key] = {'Identity' : identity, 'Length' : alignLength, 'EValue' : evalue}\n else:\n similarities[key] = {'Identity' : identity, 'Length' : alignLength, 'EValue' : evalue}\n readProcessedBLAST.close()\n\n # Remove the temporary directory used for manipulating and processing the BLAST output.\n try:\n shutil.rmtree(outputLocation)\n except:\n time.sleep(60)\n shutil.rmtree(outputLocation)\n \n return similarities",
"def BLAST_alignment(species, index_query, index_alignment, index_identity, prot):\n alignments = {}\n seq_id = []\n boo = True\n with open(blastpPath + '/BLAST_%s_mouse' % species) as f:\n for line in f:\n if boo:\n if line[0] != '#':\n query = re.split(\"\\||\\t\", line)[index_query]\n iden = float(re.split(\"\\||\\t\", line)[index_identity])\n if query in prot:\n seq_id.append(iden)\n boo = False\n if line[0] == '#':\n boo = True\n\n return np.array(seq_id)",
"def makeblastdb(files, db_name, db_type):\n with open(db_name + \".pin\", \"w\") as f:\n f.write(\"\\n\".join(db_name))\n return subprocess.run([\"makeblastdb\", \"-in\", db_name + \".pin\", \"-dbtype\", db_type)",
"def test_w_preexising_blastdb(self):\r\n # pre-existing blast db\r\n inseqs = parse_fasta(self.inseqs1)\r\n actual = qiime_blast_seqs(inseqs, blast_db=self.blast_db)\r\n self.assertEqual(len(actual), 5)\r\n\r\n # couple of sanity checks against command line blast\r\n self.assertEqual(actual['s2_like_seq'][0][0]['SUBJECT ID'], 's2')\r\n self.assertEqual(actual['s105'][0][2]['SUBJECT ID'], 's1')",
"def get_blast_hits(seqs,\r\n blast_db,\r\n max_e_value=1e-10,\r\n min_pct_identity=0.75,\r\n min_aligned_percent=0.50,\r\n blast_program='blastn'):\r\n max_evalue = max_e_value\r\n min_percent_identity = min_pct_identity\r\n seq_ids = [s[0] for s in seqs]\r\n result = {}\r\n\r\n blast_result = blast_seqs(\r\n seqs, Blastall, blast_db=blast_db,\r\n params={'-p': blast_program, '-n': 'F'},\r\n add_seq_names=False)\r\n\r\n if blast_result['StdOut']:\r\n lines = [x for x in blast_result['StdOut']]\r\n blast_result = BlastResult(lines)\r\n else:\r\n return {}.fromkeys(seq_ids, [])\r\n\r\n for seq_id, seq in seqs:\r\n blast_result_id = seq_id.split()[0]\r\n max_alignment_length = len(seq)\r\n if blast_program == 'blastx':\r\n # if this is a translated blast search, the max alignment\r\n # length is the number of 3mers in seq\r\n max_alignment_length /= 3\r\n min_alignment_length = max_alignment_length * min_aligned_percent\r\n result[seq_id] = []\r\n if blast_result_id in blast_result:\r\n for e in blast_result[blast_result_id][0]:\r\n if (float(e['E-VALUE']) <= max_evalue and\r\n float(e['% IDENTITY']) / 100. >= min_percent_identity and\r\n int(e['ALIGNMENT LENGTH']) >= min_alignment_length):\r\n result[seq_id].append(e)\r\n\r\n return result",
"def run_blast(self, metadata, analysistype, program, outfmt, evalue='1E-5', num_threads=12, num_alignments=1000000,\n perc_identity=70, task='blastn'):\n with progressbar(metadata) as bar:\n for sample in bar:\n # Run the BioPython BLASTn module with the genome as query, fasta (target gene) as db.\n make_path(sample[analysistype].reportdir)\n # Set the name and path of the BLAST report as reportdir/samplename_blastprogram.tsv\n sample[analysistype].report = os.path.join(\n sample[analysistype].reportdir, '{name}_{program}_{at}.tsv'.format(name=sample.name,\n program=program,\n at=analysistype))\n # Check the size of the report (if it exists). If it has size 0, something went wrong on a previous\n # iteration of the script. Delete the empty file in preparation for another try\n try:\n size = os.path.getsize(sample[analysistype].report)\n # If a report was created, but no results entered - program crashed, or no sequences passed\n # thresholds, remove the report, and run the blast analyses again\n if size == 0:\n os.remove(sample[analysistype].report)\n except FileNotFoundError:\n pass\n # Split the extension from the file path\n db = os.path.splitext(sample[analysistype].combinedtargets)[0]\n # Create the command line argument using the appropriate BioPython BLAST wrapper\n if program == 'blastn':\n blast = self.blastn_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt,\n perc_identity=perc_identity,\n task=task)\n elif program == 'blastp':\n blast = self.blastp_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt)\n elif program == 'blastx':\n blast = self.blastx_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt)\n elif program == 'tblastn':\n blast = self.tblastn_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt)\n elif program == 'tblastx':\n blast = self.tblastx_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt)\n else:\n blast = str()\n assert blast, 'Something went wrong, the BLAST program you provided ({program}) isn\\'t supported'\\\n .format(program=program)\n # Save the blast command in the metadata\n sample[analysistype].blastcommand = str(blast)\n # Only run blast if the report doesn't exist\n if not os.path.isfile(sample[analysistype].report):\n try:\n blast()\n except ApplicationError as e:\n logging.debug(e)\n try:\n os.remove(sample[analysistype].report)\n except (IOError, ApplicationError):\n pass\n # Return the updated metadata object\n return metadata",
"def _create_execute_blastdbcmd(execute_command):\n\n def execute_blastdbcmd(input_file: str, sequence_file: str, database: str):\n cmd = \"{} -db {} -entry_batch {} > {}\".format(\n BLASTDBCMD_CMD, database, input_file, sequence_file)\n execute_command(cmd)\n\n return execute_blastdbcmd",
"def main():\n\n #Getthefiles\n all_fna_file_path = []\n path_to_all_info = '/Users/gustavotamasco/mdrkrp/project_MDR_KRPgenomes_parsnp'\n #path_to_all_info = argv[1]\n dirpath=os.getcwd()\n os.chdir(path_to_all_info)\n genome_files = list_directories(path_to_all_info)\n os.chdir(\"/Users/gustavotamasco/mdrkrp/plasmids\")\n plasmid_files = list_directories(\"/Users/gustavotamasco/mdrkrp/plasmids\")\n\n\n '''Genomes'''\n #for genome in genome_files:\n #if \"fna\" in genome:\n #print(genome)\n #run_plasflow(genome)\n\n '''Eval Plasmids'''\n for organism in plasmid_files:\n if \"plasflow_plasmids\" in organism:\n run_plasclass(organism)\n run_blastn(organism)\n\n '''Mining info'''\n data = {}\n blast_info_path = \"/Users/gustavotamasco/mdrkrp/plasmids/plasmid_blast\"\n blast_files = list_files_new_source(blast_info_path)\n for org_b in blast_files:\n if \"genome\" not in org_b:\n parse_blast(org_b, blast_info_path, data)",
"def command_gtf2db(raw_args, prog=None):\n\n if prog:\n parser = argparse.ArgumentParser(prog=prog, add_help=False)\n else:\n parser = argparse.ArgumentParser(add_help=False)\n\n def print_message(message):\n if message:\n sys.stderr.write(message)\n else:\n sys.stderr.write(command_gtf2db.__doc__)\n sys.stderr.write('\\n')\n sys.exit(1)\n\n parser.error = print_message\n\n # required\n parser.add_argument(\"-i\", \"--input\", dest=\"input\", metavar=\"GTF_file\")\n parser.add_argument(\"-o\", \"--output\", dest=\"output\", metavar=\"DB_file\")\n\n # debugging and help\n parser.add_argument(\"-h\", \"--help\", dest=\"help\", action='store_true')\n parser.add_argument(\"-d\", \"--debug\", dest=\"debug\", action=\"count\", default=0)\n\n args = parser.parse_args(raw_args)\n\n g2g.configure_logging(args.debug)\n\n if args.help:\n g2g.exit(\"\", parser)\n\n if not args.input:\n g2g.exit(\"No GTF file was specified.\", parser)\n\n if not args.output:\n g2g.exit(\"No output GTG DB file was specified.\", parser)\n\n try:\n gtf_db.gtf2db(args.input, args.output)\n except KeyboardInterrupt as ki:\n LOG.debug(ki)\n except exceptions.G2GValueError as e:\n g2g.exit(e, parser)\n except exceptions.G2GError as e:\n g2g.exit(e, parser)"
] | [
"0.69132483",
"0.63498545",
"0.6059055",
"0.5686107",
"0.5529276",
"0.5422737",
"0.54194593",
"0.54177874",
"0.54152125",
"0.5406426",
"0.53167474",
"0.5235951",
"0.52318496",
"0.52102786",
"0.5169427",
"0.5141408",
"0.5137818",
"0.5122259",
"0.5119625",
"0.50635177",
"0.49775708",
"0.49679065",
"0.4962047",
"0.49588674",
"0.49158356",
"0.4903266",
"0.48933208",
"0.4873001",
"0.48499388",
"0.48449993"
] | 0.821216 | 0 |
Execute and retrieve data from standalone RPSBLAST as handles (OBSOLETE). NOTE This function is obsolete, you are encouraged to the command line wrapper Bio.Blast.Applications.RpsBlastCommandline instead. Execute and retrieve data from standalone RPSBLAST. blastcmd is the command used to launch the 'rpsblast' executable. database is the path to the database to search against. infile is the path to the file containing the sequence to search with. The return values are two handles, for standard output and standard error. You may pass more parameters to keywds to change the behavior of the search. Otherwise, optional values will be chosen by rpsblast. Please note that this function will give XML output by default, by setting align_view to seven (i.e. command line option m 7). You should use the NCBIXML.parse() function to read the resulting output. This is because NCBIStandalone.BlastParser() does not understand the plain text output format from rpsblast. WARNING The following text and associated parameter handling has not received extensive testing. Please report any errors we might have made... Algorithm/Scoring gapped Whether to do a gapped alignment. T/F multihit 0 for multiple hit (default), 1 for single hit expectation Expectation value cutoff. | def rpsblast(blastcmd, database, infile, align_view="7", **keywds):
import warnings
warnings.warn("This function is obsolete, you are encouraged to the command line wrapper Bio.Blast.Applications.BlastrpsCommandline instead.", PendingDeprecationWarning)
_security_check_parameters(keywds)
att2param = {
'multihit' : '-P',
'gapped' : '-g',
'expectation' : '-e',
'range_restriction' : '-L',
'xdrop' : '-X',
'xdrop_final' : '-Z',
'xdrop_extension' : '-y',
'search_length' : '-Y',
'nbits_gapping' : '-N',
'protein' : '-p',
'db_length' : '-z',
'database' : '-d',
'infile' : '-i',
'filter' : '-F',
'case_filter' : '-U',
'believe_query' : '-J',
'nprocessors' : '-a',
'logfile' : '-l',
'html' : '-T',
'descriptions' : '-v',
'alignments' : '-b',
'align_view' : '-m',
'show_gi' : '-I',
'seqalign_file' : '-O',
'align_outfile' : '-o',
}
from Applications import RpsBlastCommandline
cline = RpsBlastCommandline(blastcmd)
cline.set_parameter(att2param['database'], database)
cline.set_parameter(att2param['infile'], infile)
cline.set_parameter(att2param['align_view'], str(align_view))
for key, value in keywds.iteritems():
cline.set_parameter(att2param[key], str(value))
return _invoke_blast(cline) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def blastall(blastcmd, program, database, infile, align_view='7', **keywds):\n\n _security_check_parameters(keywds)\n\n att2param = {\n 'matrix' : '-M',\n 'gap_open' : '-G',\n 'gap_extend' : '-E',\n 'nuc_match' : '-r',\n 'nuc_mismatch' : '-q',\n 'query_genetic_code' : '-Q',\n 'db_genetic_code' : '-D',\n\n 'gapped' : '-g',\n 'expectation' : '-e',\n 'wordsize' : '-W',\n 'strands' : '-S',\n 'keep_hits' : '-K',\n 'xdrop' : '-X',\n 'hit_extend' : '-f',\n 'region_length' : '-L',\n 'db_length' : '-z',\n 'search_length' : '-Y',\n \n 'program' : '-p',\n 'database' : '-d',\n 'infile' : '-i',\n 'filter' : '-F',\n 'believe_query' : '-J',\n 'restrict_gi' : '-l',\n 'nprocessors' : '-a',\n 'oldengine' : '-V',\n\n 'html' : '-T',\n 'descriptions' : '-v',\n 'alignments' : '-b',\n 'align_view' : '-m',\n 'show_gi' : '-I',\n 'seqalign_file' : '-O',\n 'outfile' : '-o',\n }\n import warnings\n warnings.warn(\"This function is obsolete, you are encouraged to the command line wrapper Bio.Blast.Applications.BlastallCommandline instead.\", PendingDeprecationWarning)\n from Applications import BlastallCommandline\n cline = BlastallCommandline(blastcmd)\n cline.set_parameter(att2param['program'], program)\n cline.set_parameter(att2param['database'], database)\n cline.set_parameter(att2param['infile'], infile)\n cline.set_parameter(att2param['align_view'], str(align_view))\n for key, value in keywds.iteritems():\n cline.set_parameter(att2param[key], str(value))\n return _invoke_blast(cline)",
"def blastpgp(blastcmd, database, infile, align_view='7', **keywds):\n\n import warnings\n warnings.warn(\"This function is obsolete, you are encouraged to the command line wrapper Bio.Blast.Applications.BlastpgpCommandline instead.\", PendingDeprecationWarning)\n _security_check_parameters(keywds)\n\n att2param = {\n 'matrix' : '-M',\n 'gap_open' : '-G',\n 'gap_extend' : '-E',\n 'window_size' : '-A',\n 'npasses' : '-j',\n 'passes' : '-P',\n\n 'gapped' : '-g',\n 'expectation' : '-e',\n 'wordsize' : '-W',\n 'keep_hits' : '-K',\n 'xdrop' : '-X',\n 'hit_extend' : '-f',\n 'region_length' : '-L',\n 'db_length' : '-Z',\n 'search_length' : '-Y',\n 'nbits_gapping' : '-N',\n 'pseudocounts' : '-c',\n 'xdrop_final' : '-Z',\n 'xdrop_extension' : '-y',\n 'model_threshold' : '-h',\n 'required_start' : '-S',\n 'required_end' : '-H',\n\n 'program' : '-p',\n 'database' : '-d',\n 'infile' : '-i',\n 'filter' : '-F',\n 'believe_query' : '-J',\n 'nprocessors' : '-a',\n\n 'html' : '-T',\n 'descriptions' : '-v',\n 'alignments' : '-b',\n 'align_view' : '-m',\n 'show_gi' : '-I',\n 'seqalign_file' : '-O',\n 'align_outfile' : '-o',\n 'checkpoint_outfile' : '-C',\n 'restart_infile' : '-R',\n 'hit_infile' : '-k',\n 'matrix_outfile' : '-Q',\n 'align_infile' : '-B',\n }\n from Applications import BlastpgpCommandline\n cline = BlastpgpCommandline(blastcmd)\n cline.set_parameter(att2param['database'], database)\n cline.set_parameter(att2param['infile'], infile)\n cline.set_parameter(att2param['align_view'], str(align_view))\n for key, value in keywds.iteritems():\n cline.set_parameter(att2param[key], str(value))\n return _invoke_blast(cline)",
"def exec_blast(infile, config_file, out_name):\n\tdb, evalue = parse_config(config_file, \"blast\")\n\tfasta_string = SeqIO.read(infile, format=\"fasta\")\n\tresult_handle = NCBIWWW.qblast(\"blastp\", \"nr\", fasta_string.seq)\n\toutput= out_name + \".xml\"\n\tsave_file = open(output, \"w\")\n\tsave_file.write(result_handle.read())\n\tsave_file.close()\n\tresult_handle.close()\n\treturn (output)",
"def exec_blast(infile, config_file, out_name):\n\tdb, evalue = parse_config(config_file, \"blast\")\n\ttry:\n\t\tinput_open = open(infile, \"r\")\n\t\tinput_open.close()\n\texcept:\n\t\traise IOError(\"Cannot open input file %s\" %infile)\n\n\tif is_fasta(infile) == False:\n\t\t\traise TypeError(\"Input file %s must be in fasta format\" %infile)\n\telse:\n\t\tfasta_string = SeqIO.read(infile, format=\"fasta\")\n\t\tresult_handle = NCBIWWW.qblast(\"blastp\", db, fasta_string.seq)\n\t\toutput= out_name + \".xml\"\n\t\tsave_file = open(output, \"w\")\n\t\tsave_file.write(result_handle.read())\n\t\tsave_file.close()\n\t\tresult_handle.close()\n\treturn (output)",
"def run_blast(inputfile, input_type, outputfile, database, args=None, verbose=True):\n\n assert (input_type in ['protein', 'dna']), \"Input type must be either 'protein' or 'dna'\"\n\n cmd = ['diamond']\n\n if input_type == 'protein':\n cmd += ['blastp']\n elif input_type == 'dna':\n cmd += ['blastx']\n\n cmd += ['-d', database]\n cmd += ['-q', inputfile]\n cmd += ['-o', outputfile]\n\n if not args:\n args = \"--more-sensitive --top 10 --quiet\"\n\n cmd += args.split()\n\n if verbose:\n print(' '.join(cmd))\n\n with open(os.devnull, 'w') as devnull:\n try:\n exit_code = call(cmd, stdout=devnull)\n except OSError:\n exit_code = None\n\n return exit_code",
"def blast_reads(number_hits, ncbi_database, organism):\n #blast_reads(number_hits, ncbi_database, entrez_query)\n print(\"Searching for BLAST hits...\")\n fasta_string = open(\"Log_Directory/blast_queries.fasta\").read()\n print (\"The ncbi database being searched is:\", ncbi_database)\n if len(organism) > 0:\n print (\"The organism being searched is: \", organism)\n query ='\"txid'+str(organism)+'\"'\n result_handle = NCBIWWW.qblast(\"blastn\", ncbi_database, fasta_string, entrez_query=query, hitlist_size=number_hits,\n expect=10.0, nucl_penalty=-2, nucl_reward=1, megablast=True, word_size=28, expect_low=True, gapcosts='0 2')\n else:\n print (\"No organism is designated\")\n result_handle = NCBIWWW.qblast(\"blastn\", ncbi_database, fasta_string, hitlist_size=number_hits)\n blast_result = open(\"Log_Directory/blast_results.xml\", \"w\")\n blast_result.write(result_handle.read())\n blast_result.close()\n result_handle.close()",
"def _blast(query, output_pssm, output, blastdb):\n psiblast_command = \"psiblast -db {:} -query {:} -out_ascii_pssm {:} \" + \\\n \"-save_pssm_after_last_round -out {:}\"\n log_out = \"{}.out\".format(output)\n log_err = \"{}.err\".format(output)\n with open(log_out, 'a') as f_out:\n with open(log_err, 'a') as f_err:\n command = psiblast_command.format(\n blastdb, query, output_pssm, output)\n f_out.write('=================== CALL ===================\\n')\n f_out.write(command + '\\n')\n subprocess.check_call(\n command, shell=True, stderr=f_err, stdout=f_out)\n f_out.write('================= END CALL =================\\n')",
"def _invoke_blast(cline):\n import subprocess, sys\n blast_cmd = cline.program_name\n if not os.path.exists(blast_cmd):\n raise ValueError(\"BLAST executable does not exist at %s\" % blast_cmd)\n #We don't need to supply any piped input, but we setup the\n #standard input pipe anyway as a work around for a python\n #bug if this is called from a Windows GUI program. For\n #details, see http://bugs.python.org/issue1124861\n blast_process = subprocess.Popen(str(cline),\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True,\n shell=(sys.platform!=\"win32\"))\n blast_process.stdin.close()\n return blast_process.stdout, blast_process.stderr",
"def blastn_commandline(cls):\n command = generate_path(\"../../blast/ncbi-blast*/bin/blastn\")\n fasta = generate_path(\"tmp/validate.fasta\")\n db = generate_path(\"data/blast/ValidationDB\")\n results = generate_path(\"tmp/validate.xml\")\n\n subprocess.call(\n '%s -query %s -db %s -outfmt 5 -out %s -best_hit_score_edge 0.05 '\n '-best_hit_overhang 0.1' % (\n command, fasta, db, results\n ), shell=True\n )",
"def blastp(database, query, output_to_file = False, output_file = None,\n overwrite = False, outfmt = 7):\n if output_to_file:\n if os.path.exists(output_file) and not overwrite:\n return output_file\n cmd = 'blastp -db {} -query {} -outfmt {} -out {} -num_alignments 1'.\\\n format(database, query, outfmt, output_file)\n else:\n cmd = 'blastp -db {} -query {} -outfmt {} -num_alignments 1'.format(\n database, query, outfmt)\n\n printed_output = subprocess.check_output(cmd, shell=True)\n if output_to_file:\n return output_file\n return printed_output",
"def qiime_blast_seqs(seqs,\r\n blast_constructor=Blastall,\r\n blast_program='blastn',\r\n blast_db=None,\r\n refseqs=None,\r\n refseqs_fp=None,\r\n blast_mat_root=None,\r\n params=None,\r\n WorkingDir=None,\r\n seqs_per_blast_run=1000,\r\n is_protein=False,\r\n HALT_EXEC=False):\r\n\r\n assert blast_db or refseqs_fp or refseqs, \\\r\n 'Must provide either a blast_db or a fasta ' +\\\r\n 'filepath containing sequences to build one.'\r\n\r\n if refseqs_fp:\r\n blast_db, db_files_to_remove =\\\r\n build_blast_db_from_fasta_path(refseqs_fp,\r\n output_dir=WorkingDir,\r\n is_protein=is_protein)\r\n elif refseqs:\r\n blast_db, db_files_to_remove =\\\r\n build_blast_db_from_fasta_file(refseqs,\r\n output_dir=WorkingDir,\r\n is_protein=is_protein)\r\n else:\r\n db_files_to_remove = []\r\n\r\n if params is None:\r\n params = {}\r\n params[\"-d\"] = blast_db\r\n params[\"-p\"] = blast_program\r\n\r\n blast_app = blast_constructor(\r\n params=params,\r\n blast_mat_root=blast_mat_root,\r\n InputHandler='_input_as_seq_id_seq_pairs',\r\n WorkingDir=WorkingDir,\r\n SuppressStderr=True,\r\n HALT_EXEC=HALT_EXEC)\r\n\r\n current_seqs = []\r\n blast_results = BlastResult([])\r\n for seq in seqs:\r\n current_seqs.append(seq)\r\n if len(current_seqs) % seqs_per_blast_run == 0:\r\n if blast_results:\r\n blast_results.update(\r\n BlastResult(blast_app(current_seqs)['StdOut']))\r\n else:\r\n blast_results = BlastResult(blast_app(current_seqs)['StdOut'])\r\n current_seqs = []\r\n\r\n # clean-up run: blast the remaining sequences\r\n blast_results.update(\r\n BlastResult(blast_app(current_seqs)['StdOut']))\r\n\r\n remove_files(db_files_to_remove)\r\n\r\n return blast_results",
"def get_ncbi_pdb_blast(sequence, file_name=None, blast_type=\"blastp\",\n expect=0.01):\n assert (blast_type in [\"blastp\", \"blastn\"])\n if (sequence[-1] == '*'):\n sequence = sequence[:-1]\n if (not sequence.isalpha()):\n raise Sorry(\"The sequence contains non-alphabetical characters; in \"+\n \"addition to A-Z, only an asterisk denoting a stop codon is permitted.\")\n assert (expect >= 0)\n try :\n from Bio.Blast import NCBIWWW\n except ImportError :\n raise Sorry(\"You need to have BioPython installed to use this function.\")\n # FIXME will this use the HTTP proxy if defined?\n blast = NCBIWWW.qblast(blast_type, \"pdb\", sequence, expect=expect)\n blast_out = blast.read()\n if (file_name is not None):\n f = open(file_name, \"w\")\n f.write(blast_out)\n f.close()\n return blast_out",
"def blast(self, analysis_id, organism_id, input, blastdb=None, blastdb_id=None,\n re_name=None, query_type=\"polypeptide\", match_on_name=False, skip_missing=False):\n\n if blastdb_id:\n found_db = self.session.query(self.model.db).filter_by(db_id=blastdb_id)\n if not found_db:\n raise Exception(\"Invalid db ID\")\n elif blastdb:\n found_db = self.session.query(self.model.db).filter_by(name=blastdb)\n if not found_db:\n raise Exception(\"Invalid db name\")\n blastdb_id = found_db.one().db_id\n\n if not blastdb_id:\n raise Exception(\"Either blastdb or blastdb_id is required\")\n\n res = self.session.query(self.model.analysis).filter_by(analysis_id=analysis_id)\n if not res.count():\n raise Exception(\"Analysis with the id {} was not found\".format(analysis_id))\n\n # Cache many things to speed up loading\n self._reset_cache()\n seqterm = self.ci.get_cvterm_id(query_type, 'sequence')\n self._init_feature_cache(organism_id, seqterm, match_on_name)\n\n self._init_analysisfeature_cache(analysis_id)\n\n self._init_analysisprop_cache()\n\n self._hit_details_cache = None\n\n if not os.path.exists(input):\n raise Exception(\"{} was not found\".format(input))\n\n self._setup_tables(\"blast\")\n\n count_ins = self._parse_blast_xml(analysis_id, blastdb_id, input, re_name, query_type, True, organism_id, skip_missing)\n\n blastdb_ap = self.ci.get_cvterm_id('analysis_blast_blastdb', 'tripal')\n self._add_analysisprop(analysis_id, type_id=blastdb_ap, value=blastdb_id)\n\n self.session.commit()\n\n self._reset_cache()\n\n return {'inserted': count_ins}",
"def sequence_BLAST(processedBLAST, inputFile, database, BLASTLoc, SEG, cores): \n\n # Setup the parameters for the BLASTing.\n outputLoc = inputFile.split('.')[0] + '.tmp' \n query = ' -query ' + inputFile\n out = ' -out ' + outputLoc\n evalue = ' -evalue 1'\n inclusionEThresh = ' -inclusion_ethresh 0.0001'\n numIterations = ' -num_iterations 3'\n gapTrigger = ' -gap_trigger 18'\n numDescriptions = ' -num_descriptions 10000'\n numAlignments = ' -num_alignments 10000'\n dbsize = ' -dbsize 0'\n db = ' -db ' + database\n outputFormat = ' -outfmt \"7 qseqid sseqid pident length evalue\"'\n if SEG:\n seg = ' -seg yes'\n else:\n seg = ' -seg no'\n numThreads = ' -num_threads ' + str(cores)\n argsPSI = (query + out + evalue + inclusionEThresh + numIterations + gapTrigger + numDescriptions +\n numAlignments + dbsize + db + outputFormat + seg + numThreads\n )\n # Perform the BLASTing.\n subprocess.call(BLASTLoc + argsPSI, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n # Process the BLAST output.\n processPSIoutput.main(outputLoc, processedBLAST)",
"def run_blast(self, metadata, analysistype, program, outfmt, evalue='1E-5', num_threads=12, num_alignments=1000000,\n perc_identity=70, task='blastn'):\n with progressbar(metadata) as bar:\n for sample in bar:\n # Run the BioPython BLASTn module with the genome as query, fasta (target gene) as db.\n make_path(sample[analysistype].reportdir)\n # Set the name and path of the BLAST report as reportdir/samplename_blastprogram.tsv\n sample[analysistype].report = os.path.join(\n sample[analysistype].reportdir, '{name}_{program}_{at}.tsv'.format(name=sample.name,\n program=program,\n at=analysistype))\n # Check the size of the report (if it exists). If it has size 0, something went wrong on a previous\n # iteration of the script. Delete the empty file in preparation for another try\n try:\n size = os.path.getsize(sample[analysistype].report)\n # If a report was created, but no results entered - program crashed, or no sequences passed\n # thresholds, remove the report, and run the blast analyses again\n if size == 0:\n os.remove(sample[analysistype].report)\n except FileNotFoundError:\n pass\n # Split the extension from the file path\n db = os.path.splitext(sample[analysistype].combinedtargets)[0]\n # Create the command line argument using the appropriate BioPython BLAST wrapper\n if program == 'blastn':\n blast = self.blastn_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt,\n perc_identity=perc_identity,\n task=task)\n elif program == 'blastp':\n blast = self.blastp_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt)\n elif program == 'blastx':\n blast = self.blastx_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt)\n elif program == 'tblastn':\n blast = self.tblastn_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt)\n elif program == 'tblastx':\n blast = self.tblastx_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt)\n else:\n blast = str()\n assert blast, 'Something went wrong, the BLAST program you provided ({program}) isn\\'t supported'\\\n .format(program=program)\n # Save the blast command in the metadata\n sample[analysistype].blastcommand = str(blast)\n # Only run blast if the report doesn't exist\n if not os.path.isfile(sample[analysistype].report):\n try:\n blast()\n except ApplicationError as e:\n logging.debug(e)\n try:\n os.remove(sample[analysistype].report)\n except (IOError, ApplicationError):\n pass\n # Return the updated metadata object\n return metadata",
"def do_blast(self,arg):\n try:\n argumentos=arg.strip(\"\\n\").split(\" \")\n if len(argumentos)==3:\n Blast=My_Blast(argumentos[0], argumentos[1], argumentos[2])\n TRESH=input('Qual é o valor do e-value Tresh: ')\n Blast.blast(TRESH)\n \n else:\n print(\"Número de argumentos errados!\")\n except:\n print(\"Erro a executar o blast!\")",
"def parse_blast(metadata, analysistype, fieldnames, cutoff, program):\n for sample in metadata:\n # Initialise a list to store the BLAST outputs\n sample[analysistype].blastlist = list()\n # Initialise a dictionary to store all the target sequences\n sample[analysistype].targetsequence = dict()\n try:\n # Open the sequence profile file as a dictionary\n blastdict = DictReader(open(sample[analysistype].report), fieldnames=fieldnames, dialect='excel-tab')\n resultdict = dict()\n # Go through each BLAST result\n for row in blastdict:\n # Ignore the headers\n if row['query_id'].startswith(fieldnames[0]):\n pass\n else:\n # Create the subject length variable - if the sequences are DNA (e.g. blastn), use the subject\n # length as usual; if the sequences are protein (e.g. tblastx), use the subject length / 3\n if program == 'blastn' or program == 'blastp' or program == 'blastx':\n subject_length = float(row['subject_length'])\n\n else:\n subject_length = float(row['subject_length']) / 3\n # Calculate the percent identity and extract the bitscore from the row\n # Percent identity is the (length of the alignment - num mismatches) / total subject length\n percentidentity = float('{:0.2f}'.format((float(row['positives']) - float(row['gaps'])) /\n subject_length * 100))\n # Create a percent_match dictionary entry\n row['percent_match'] = percentidentity\n # Remove unwanted pipes added to the name\n target = row['subject_id'].lstrip('gb|').rstrip('|') if '|' in row['subject_id'] else \\\n row['subject_id']\n row['subject_id'] = row['subject_id'].lstrip('gb|').rstrip('|') if '|' in row['subject_id'] \\\n else row['subject_id']\n # If the percent identity is greater than the cutoff\n if percentidentity >= cutoff:\n # Append the hit dictionary to the list\n sample[analysistype].blastlist.append(row)\n # Update the dictionary with the target and percent identity\n resultdict.update({target: percentidentity})\n # Determine if the orientation of the sequence is reversed compared to the reference\n if int(row['subject_end']) < int(row['subject_start']):\n # Create a sequence object using Biopython\n seq = Seq(row['query_sequence'])\n # Calculate the reverse complement of the sequence\n querysequence = str(seq.reverse_complement())\n # If the sequence is not reversed, use the sequence as it is in the output\n else:\n querysequence = row['query_sequence']\n # Add the sequence in the correct orientation to the sample\n try:\n sample[analysistype].targetsequence[target].append(querysequence)\n except (AttributeError, KeyError):\n sample[analysistype].targetsequence[target] = list()\n sample[analysistype].targetsequence[target].append(querysequence)\n # Add the percent identity to the object\n sample[analysistype].blastresults = resultdict\n # Populate missing results with 'NA' values\n if len(resultdict) == 0:\n sample[analysistype].blastresults = 'NA'\n except FileNotFoundError:\n sample[analysistype].blastresults = 'NA'\n return metadata",
"def rbpdb_data_load(rna_info, out=None):\n del out # this function doesn't emit progress status (yet)!\n rbpdb_protein_file_path = (\n \"./website/data/RBPDB_v1.3.1_proteins_human_2012-11-21.tdt\"\n )\n letter_strength = RBPDB_MOTIF_PWM_LETTER_STRENGTH\n n_repeat_req = RBPDB_MOTIF_N_REPEAT_REQ\n rna_seq = get_human_seq(rna_info)\n\n experiment_id_to_pwm_dict = (\n picklify(\n generate_rbpdb_experimental_to_pwm, letter_strength, n_repeat_req\n )\n )\n protein_id_to_experimental_ids_dict = (\n picklify(generate_rbpdb_protein_to_experiment_id)\n )\n experiment_id_to_columns_dict = (\n picklify(generate_rbpdb_experiment_to_columns)\n )\n with open(rbpdb_protein_file_path) as handle:\n _ = handle.readline().strip().split('\\t')\n # columns here is expected to have the following information in the\n # following order:\n # protein_id, annotation_id, creation_date, update_date, gene_name,\n # gene_description, species, taxID, domains, aliases, flag, flag_notes,\n # some_other_id\n protein_columns = handle.readline().replace(\"\\n\", \"\").split('\\t')\n while protein_columns != ['']:\n assert len(protein_columns) == 13\n # We only care about human RBPs for now.\n if protein_columns[10] == \"0\":\n protein_columns = (\n handle.readline().replace(\"\\n\", \"\").split('\\t')\n )\n continue\n rbp = protein_columns[4]\n protein_id = protein_columns[0]\n\n if protein_id not in protein_id_to_experimental_ids_dict:\n # No experiments associated. So no data to be had\n protein_columns = (\n handle.readline().replace(\"\\n\", \"\").split('\\t')\n )\n continue\n\n for experiment_id in (\n protein_id_to_experimental_ids_dict[protein_id]\n ):\n assert (\n experiment_id in experiment_id_to_pwm_dict\n or experiment_id == \"410\"\n )\n if experiment_id == \"410\":\n continue\n pwms = experiment_id_to_pwm_dict[experiment_id]\n for pwm in pwms:\n assert len(pwm[\"A\"]) > 0\n experimental_columns = (\n experiment_id_to_columns_dict[experiment_id]\n )\n assert len(experimental_columns) == 15\n total_columns = protein_columns + experimental_columns\n annotation = (\n ANNOTATION_COLUMN_DELIMITER.join(\n [\n total_columns[i]\n for i in rbpdb_columns_of_interest\n ]\n )\n )\n\n if pwm_degree_of_freedom(pwm) >= 2048:\n # experimentally shown that by this point naive brute\n # force is faster. Bound could be\n # reduced.\n sites = pwm_scan_naive_brute_force(rna_seq, pwm)\n else:\n sites = pwm_scan(rna_seq, pwm)\n\n if not sites:\n continue\n\n for start, end in sites:\n yield rbp, start, end, annotation\n\n protein_columns = handle.readline().replace(\"\\n\", \"\").split('\\t')",
"def summarize_blast_output(blast_out=None, blast_file=None,\n min_identity=None, expect=None, stop_if_no_alignment=True):\n assert ([blast_out, blast_file].count(None) == 1)\n from Bio.Blast import NCBIXML\n import iotbx.pdb.fetch\n if (blast_out is not None):\n blast_in = StringIO(blast_out)\n else :\n assert os.path.isfile(blast_file)\n blast_in = open(blast_file)\n parsed = NCBIXML.parse(blast_in)\n blast = next(parsed)\n if (len(blast.alignments) == 0):\n if stop_if_no_alignment:\n raise Sorry(\"No matching sequences!\")\n else: return list()\n results = []\n for i_hit, hit in enumerate(blast.alignments):\n pdb_chain_id = str(hit.accession)\n #hit.accession may only have pdb_id, e.g. 1EMB\n if len(pdb_chain_id.split(\"_\")) > 1:\n pdb_id, chain_id = pdb_chain_id.split(\"_\")\n else:\n pdb_id = pdb_chain_id\n chain_id = None\n #\n hsp = hit.hsps[0]\n assert (hsp.align_length > 0)\n identity = 100 * hsp.identities / hsp.align_length\n if (min_identity is not None) and (identity < min_identity):\n continue\n # XXX this is really appalling, but the NCBI groups together identical\n # sequences in its BLAST output, so I need to parse the accession code\n # strings to extract the individual PDB IDs\n hit_def_fields = hit.hit_def.split(\"|\")\n all_ids = []\n all_ids.append([pdb_id,chain_id])\n for i_field, field in enumerate(hit_def_fields):\n if (field == \"pdb\") and (i_field < len(hit_def_fields) -1):\n next_pdb_id = hit_def_fields[i_field + 1]\n if \"Chain\" in hit_def_fields[i_field + 2]:\n next_chain_id = hit_def_fields[i_field + 2].split()[0]\n else:\n next_chain_id = None\n if (iotbx.pdb.fetch.looks_like_pdb_id(next_pdb_id)):\n all_ids.append([next_pdb_id,next_chain_id])\n summary = blast_hit(\n hit_num=i_hit+1,\n pdb_id=pdb_id,\n chain_id=chain_id,\n evalue=hsp.expect,\n length=hsp.align_length,\n identity=identity,\n positives=100*hsp.positives/hsp.align_length,\n hsp = hsp,\n all_ids=all_ids)\n results.append(summary)\n return results",
"def parseBlastOutput(blast_path):\r\n\t\t#unpruned_read_objects = {}\r\n\t\t#ref_pruned_reads = {}\r\n\r\n\t\tunpruned_read_objects = {key:[] for key in COMMON_NAME.keys()}\r\n\t\tref_pruned_reads = {key:[] for key in COMMON_NAME.keys()}\r\n\t\twith open(blast_path,\"r\") as f:\r\n\t\t\t\tfor line in f:\r\n\r\n\t\t\t\t\t\tline = line.rstrip()\r\n\t\t\t\t\t\tline = line.rsplit()\r\n\t\t\t\t\t\t# print(line, file=sys.stderr,flush=True)\r\n\t\t\t\t\t\tif len(line) > 1:\r\n\t\t\t\t\t\t\t\tread_name = line[0]\r\n\t\t\t\t\t\t\t\tsubject_hit = line[1]\r\n\t\t\t\t\t\t\t\tlength = int(line[3])\r\n\t\t\t\t\t\t\t\t# sstart = int(line[6])\r\n\t\t\t\t\t\t\t\t# send = int(line[7])\r\n\t\t\t\t\t\t\t\tsstart = int(line[8])\r\n\t\t\t\t\t\t\t\tsend = int(line[9])\r\n\t\t\t\t\t\t\t\te_score = float(line[10])\r\n\r\n\t\t\t\t\t\t\t\t# CREATE A READ OBJECT FOR EACH OF THESE SIGNIFICANT HITS TO WOLBACHIA ENDOSYMBIONT.\r\n\t\t\t\t\t\t\t\t# IF A READ HITS THE SAME SUBJECT MORE THAN ONCE,\r\n\t\t\t\t\t\t\t\t# SAVE ONLY THE MOST SIGNIFICANT HIT (LOWEST E-SCORE).\r\n\t\t\t\t\t\t\t\tif e_score < 1e-10 and length > 40:\r\n\t\t\t\t\t\t\t\t\t\t# if subject_hit in ENDOSYMBIONT_IDS:\r\n\t\t\t\t\t\t\t\t\t\t# wol_host = ENDOSYMBIONT_IDS[subject_hit]\r\n\t\t\t\t\t\t\t\t\t\tcurrent_read = Read(read_name,subject_hit,length,sstart,send,e_score)\r\n\t\t\t\t\t\t\t\t\t\tif subject_hit in unpruned_read_objects:\r\n\t\t\t\t\t\t\t\t\t\t\t\tunpruned_read_objects[subject_hit].append(current_read)\r\n\t\t\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\t\t\t\tunpruned_read_objects[subject_hit] = [current_read]\r\n\t\tif len(unpruned_read_objects) > 0:\r\n\t\t\t\tfor ref in unpruned_read_objects.keys():\r\n\t\t\t\t\t\tpruned_reads_ref = prune(unpruned_read_objects[ref])\r\n\t\t\t\t\t\tref_pruned_reads[ref] = pruned_reads_ref\r\n\r\n\t\t\t\treturn unpruned_read_objects, ref_pruned_reads\r\n\t\telse:\r\n\t\t\t\treturn None, None",
"def blast_genome(seqs, blast_db, e_value, max_hits, word_size, working_dir,\r\n blast_mat_root, extra_params=[], DEBUG=True):\r\n\r\n # set up params to use with blastp or\r\n params = {\r\n # matrix\r\n \"-M\": \"BLOSUM62\",\r\n\r\n # max procs\r\n \"-a\": \"1\",\r\n\r\n # expectation\r\n \"-e\": e_value,\r\n\r\n # max seqs to show\r\n \"-b\": max_hits,\r\n\r\n # Word size\r\n \"-W\": word_size,\r\n\r\n # max one line descriptions\r\n \"-v\": max_hits,\r\n\r\n # tabular output\r\n \"-m\": \"9\",\r\n\r\n # program\r\n \"-p\": \"blastn\"\r\n }\r\n params.update(extra_params)\r\n\r\n output = blast_seqs(seqs,\r\n Blastall,\r\n blast_db=blast_db,\r\n params=params,\r\n WorkingDir=working_dir,\r\n add_seq_names=False,\r\n blast_mat_root=blast_mat_root)\r\n\r\n raw_output = [x for x in output['StdOut']]\r\n return raw_output",
"def test_blast_genome(self):\r\n\r\n formatdb_cmd = 'formatdb -p F -o T -i %s' % self.subjectdb_fp\r\n system(formatdb_cmd)\r\n self._paths_to_clean_up.append(\"formatdb.log\")\r\n for suffix in [\"nhr\", \"nin\", \"nsd\", \"nsi\", \"nsq\"]:\r\n self._paths_to_clean_up.append(\".\".join(\r\n [self.subjectdb_fp, suffix]))\r\n\r\n raw_output = blast_genome(TEST_BLAST_DB_LINES, self.subjectdb_fp,\r\n e_value=1e-4, max_hits=100, word_size=28,\r\n working_dir=\"./\", blast_mat_root=None)\r\n\r\n i = 0\r\n for line in raw_output:\r\n\r\n if line.startswith(\"#\"):\r\n i += 1\r\n continue # comments depend on tmpfilename, BLAST version\r\n self.assertEqual(raw_output[i], EXP_BLAST_OUTPUT[i])\r\n i += 1",
"def build_blastscreen_cmd(queryfile, blastexe, blastdb, outdir=None):\n if outdir is None:\n stem = os.path.splitext(queryfile)[0]\n else:\n filestem = os.path.splitext(os.path.split(queryfile)[-1])[0]\n stem = os.path.join(outdir, filestem)\n return NcbiblastnCommandline(\n query=queryfile,\n cmd=blastexe,\n db=blastdb,\n out=stem + \".blasttab\",\n task=\"blastn-short\",\n max_target_seqs=1,\n outfmt=6,\n perc_identity=90,\n ungapped=True,\n )",
"def makeblastdb(fasta, program='blastn', returncmd=False, **kwargs):\n # Convert the options dictionary to a string\n options = kwargs_to_string(kwargs)\n # Set the dbtype appropriately\n if program == 'blastn' or program == 'tblastn' or program == 'tblastx':\n dbtype = 'nucl'\n else:\n dbtype = 'prot'\n # Remove the file extension from the file name\n output = os.path.splitext(fasta)[0]\n cmd = 'makeblastdb -in {fasta} -parse_seqids -max_file_sz 2GB -dbtype {dbtype} -out {output}{options}' \\\n .format(fasta=fasta,\n dbtype=dbtype,\n output=output,\n options=options)\n # Check if database already exists\n if not os.path.isfile('{output}.nhr'.format(output=output)):\n out, err = run_subprocess(cmd)\n else:\n out = str()\n err = str()\n if returncmd:\n return out, err, cmd\n else:\n return out, err",
"def format_blast(makeblastdb_path, fname):\n # The script is written in shell, so this function just calls it and\n # checks the output\n # Build the shell command\n cmd = ['bash', DBFORMAT_SCRIPT, makeblastdb_path, fname]\n # Execute the script\n # shell=False to ensure that we aren't executing commands from untrusted\n # sources\n p = subprocess.Popen(\n cmd,\n shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = p.communicate()\n return (out, err)",
"def BlastSeq(Query, Subject, Out, BlastDir):\n print Out\n print Out.split('.')\n if len(Out.split('.'))==1:\n MakeDir(Out)\n OutPath='.'.join(Out.split('.'))\n print (OutPath)\n OutFile=OutPath+'/output.csv'\n errlog=open(OutPath+'/_err.log', 'a')\n else:\n OutFile=Out\n errfile='.'.join( Out.split('.')[:1])+'_err.log'\n errlog=open(errfile, 'a')\n\n\n## column_spec='10 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue btop'\n column_spec='10 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue qcovs btop'\n BLAST=subprocess.Popen([BlastDir, '-query',Query, '-subject',Subject, '-outfmt', column_spec, '-out', OutFile], stderr=errlog)\n BLAST.communicate()\n errlog.close()\n return OutFile",
"def run_blastn(blastn_path, db, input_fasta, blast_threads=1):\n chunk_hits = mkstempfname('.hits.txt.gz')\n\n blastnCmd = [\n blastn_path, '-db', db, '-word_size', '16', '-num_threads', str(blast_threads), '-evalue', '1e-6', '-outfmt',\n '6', '-max_target_seqs', '1', '-query', input_fasta,\n ]\n log.debug(' '.join(blastnCmd))\n blast_pipe = subprocess.Popen(blastnCmd, stdout=subprocess.PIPE)\n\n with util.file.open_or_gzopen(chunk_hits, 'wt') as outf:\n # strip tab output to just query read ID names and emit\n last_read_id = None\n for line in blast_pipe.stdout:\n line = line.decode('UTF-8').rstrip('\\n\\r')\n read_id = line.split('\\t')[0]\n # only emit if it is not a duplicate of the previous read ID\n if read_id != last_read_id:\n last_read_id = read_id\n outf.write(read_id + '\\n')\n\n if blast_pipe.poll():\n raise CalledProcessError()\n os.unlink(input_fasta)\n\n return chunk_hits",
"def test_w_preexising_blastdb(self):\r\n # pre-existing blast db\r\n inseqs = parse_fasta(self.inseqs1)\r\n actual = qiime_blast_seqs(inseqs, blast_db=self.blast_db)\r\n self.assertEqual(len(actual), 5)\r\n\r\n # couple of sanity checks against command line blast\r\n self.assertEqual(actual['s2_like_seq'][0][0]['SUBJECT ID'], 's2')\r\n self.assertEqual(actual['s105'][0][2]['SUBJECT ID'], 's1')",
"def parse_blast_XML(blast_xml, config_file):\n\tblast_xml_op = open (blast_xml, 'r')\n\tEntrez.email = parse_config(config_file, \"email\")\n\tdb, evalue = parse_config(config_file, \"blast\")\n\n\tfor record in NCBIXML.parse(blast_xml_op):\n\t\tfor align in record.alignments:\n\t\t\thit_id = align.hit_id.split(\"|\")\n\t\t\tprev_eval = 1\n\t\t\tfor hsp in align.hsps:\n\t\t\t\tif hsp.expect < prev_eval:\n\t\t\t\t\tprev_eval = hsp.expect\n\t\t\tefetch = Entrez.efetch(db=\"protein\", id=hit_id, rettype=\"fasta\")\n\t\t\tfor line in efetch:\n\t\t\t\tline = line.rstrip()\n\t\t\t\tif line.startswith(\">\"):\n\t\t\t\t\tid_info = line\n\t\t\t\t\tsequence = \"\"\n\t\t\t\telse:\n\t\t\t\t\tsequence += line\n\t\t\tsequence += line\n\n\t\t\torganism = id_info[id_info.find(\"[\") + 1:id_info.find(\"]\")]\n\t\t\torganism = organism.split()\n\t\t\tif len(organism) != 1:\n\t\t\t\tspecies = str(organism[0] + \"_\" + organism[1])\n\n\t\t\tif prev_eval <= float(evalue):\n\t\t\t\tyield BlastResult(hit_id[1], species, sequence, prev_eval)",
"def qiime_blastx_seqs(seqs,\r\n blast_constructor=Blastall,\r\n blast_db=None,\r\n refseqs=None,\r\n refseqs_fp=None,\r\n blast_mat_root=None,\r\n params={},\r\n WorkingDir=None,\r\n seqs_per_blast_run=1000,\r\n HALT_EXEC=False):\r\n return qiime_blast_seqs(seqs,\r\n blast_constructor=blast_constructor,\r\n blast_program='blastx',\r\n blast_db=blast_db,\r\n refseqs=refseqs,\r\n refseqs_fp=refseqs_fp,\r\n blast_mat_root=blast_mat_root,\r\n params={},\r\n WorkingDir=WorkingDir,\r\n seqs_per_blast_run=seqs_per_blast_run,\r\n is_protein=True,\r\n HALT_EXEC=HALT_EXEC)"
] | [
"0.6567743",
"0.6469183",
"0.62038195",
"0.6169676",
"0.5998038",
"0.5889682",
"0.57250804",
"0.5698743",
"0.55632794",
"0.55214614",
"0.5490336",
"0.53914905",
"0.52392304",
"0.5195957",
"0.51946354",
"0.51559067",
"0.5152533",
"0.5110173",
"0.5094674",
"0.50776154",
"0.50564665",
"0.5022634",
"0.50104266",
"0.4983607",
"0.4973502",
"0.49350655",
"0.4912491",
"0.4887762",
"0.4881815",
"0.48568428"
] | 0.74148107 | 0 |
Start BLAST and returns handles for stdout and stderr (PRIVATE). Expects a command line wrapper object from Bio.Blast.Applications | def _invoke_blast(cline):
import subprocess, sys
blast_cmd = cline.program_name
if not os.path.exists(blast_cmd):
raise ValueError("BLAST executable does not exist at %s" % blast_cmd)
#We don't need to supply any piped input, but we setup the
#standard input pipe anyway as a work around for a python
#bug if this is called from a Windows GUI program. For
#details, see http://bugs.python.org/issue1124861
blast_process = subprocess.Popen(str(cline),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
shell=(sys.platform!="win32"))
blast_process.stdin.close()
return blast_process.stdout, blast_process.stderr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n\targs = sys.argv[1:]\n\t# If stdin is not empty (being piped to)\n\tif not sys.stdin.isatty():\n\t\targs += sys.stdin.readlines()\n\tcommand = Main()\n\tcatch = lnk.errors.Catch(1)\n\tcatch.catch(command.main, args, standalone_mode=False)",
"def blastn_commandline(cls):\n command = generate_path(\"../../blast/ncbi-blast*/bin/blastn\")\n fasta = generate_path(\"tmp/validate.fasta\")\n db = generate_path(\"data/blast/ValidationDB\")\n results = generate_path(\"tmp/validate.xml\")\n\n subprocess.call(\n '%s -query %s -db %s -outfmt 5 -out %s -best_hit_score_edge 0.05 '\n '-best_hit_overhang 0.1' % (\n command, fasta, db, results\n ), shell=True\n )",
"def start(self):\n cmd = self.doCommand(self.args)\n if cmd is not None:\n cmd.join()\n else:\n self.out = self.error",
"def call_mallet(cmd, classpath=None, stdin=None, stdout=None, stderr=None,\n blocking=True):\n if _mallet_classpath is None:\n config_mallet()\n \n # Set up the classpath\n if classpath is None:\n classpath = _mallet_classpath\n else:\n classpath += ':' + _mallet_classpath\n # Delegate to java()\n return java(cmd, classpath, stdin, stdout, stderr, blocking)",
"def main():\n sys.exit(RBExt().run(sys.argv[1:]))",
"def start(self):\n self.start_time = dt.datetime.now()\n self.call = ' '.join(sys.argv)\n self.commands = []",
"def run_blast(inputfile, input_type, outputfile, database, args=None, verbose=True):\n\n assert (input_type in ['protein', 'dna']), \"Input type must be either 'protein' or 'dna'\"\n\n cmd = ['diamond']\n\n if input_type == 'protein':\n cmd += ['blastp']\n elif input_type == 'dna':\n cmd += ['blastx']\n\n cmd += ['-d', database]\n cmd += ['-q', inputfile]\n cmd += ['-o', outputfile]\n\n if not args:\n args = \"--more-sensitive --top 10 --quiet\"\n\n cmd += args.split()\n\n if verbose:\n print(' '.join(cmd))\n\n with open(os.devnull, 'w') as devnull:\n try:\n exit_code = call(cmd, stdout=devnull)\n except OSError:\n exit_code = None\n\n return exit_code",
"def start_bot(self):\n self.proc = subprocess.Popen(\"./start\", stdin=subprocess.PIPE,\n\t\t\t\t\t\t\t\t\t stdout=subprocess.PIPE,\n\t\t\t\t\t\t\t\t\t cwd=os.path.abspath(self.path))",
"def start(self):\n self._proc = self._get_subprocess()\n self._pid = self._proc.pid\n self._return_code = None",
"def entry_point() -> int:\n return run(argv=sys.argv[1:], stdout=sys.stdout, stderr=sys.stderr)",
"def run_starter(self, expect_to_fail=False):\n logging.info(\"running starter \" + self.name)\n args = [self.cfg.bin_dir / \"arangodb\"] + self.hotbackup_args + self.default_starter_args + self.arguments\n\n lh.log_cmd(args)\n self.instance = psutil.Popen(args)\n logging.info(\"my starter has PID:\" + str(self.instance.pid))\n if not expect_to_fail:\n self.wait_for_logfile()\n self.wait_for_port_bind()",
"def start(self):\r\n return self.start_subprocess()",
"def run(self, stdout=None, stderr=None):",
"def executable():\n\n if len(sys.argv) == 1:\n arguments.get_help()\n sys.exit('\\nGive me something to do and I will do it\\n')\n else:\n # Parse the Arguments that have been provided\n args = arguments.get_args()\n\n # Load The System Logger\n log = logger.load_in(log_level=args.get('log_level', 'info'))\n log.debug('Used Arguments %s', args)\n const(log_method=log)\n\n # Begin Work\n start(set_args=args)",
"def main_wrapper(argv):\n try:\n #scribus.statusMessage(\"Running script...\")\n scribus.progressReset()\n main(argv)\n finally:\n # Exit neatly even if the script terminated with an exception,\n # so we leave the progress bar and status bar blank and make sure\n # drawing is enabled.\n if scribus.haveDoc() > 0:\n scribus.setRedraw(True)\n scribus.statusMessage(\"\")\n scribus.progressReset()",
"def run (self, bioseqs, *clargs):\t\t\n\t\t## Preconditions:\n\t\tassert (2 <= len (bioseqs))\n\t\t## Main:\n\t\tself._inseqs = bioseqs\n\t\tself.call_cmdline (*clargs)",
"def main(argv=sys.argv):\n logging.basicConfig()\n exit_code = 1\n try:\n app = Application(argv)\n app.run()\n exit_code = 0\n except KeyboardInterrupt:\n exit_code = 0\n except Exception as exc:\n LOG.exception(exc)\n sys.exit(exit_code)",
"def _launch(self):\n annotators = ['tokenize', 'ssplit']\n if 'ner' in self.annotators:\n annotators.extend(['pos', 'lemma', 'ner'])\n elif 'lemma' in self.annotators:\n annotators.extend(['pos', 'lemma'])\n elif 'pos' in self.annotators:\n annotators.extend(['pos'])\n annotators = ','.join(annotators)\n options = ','.join(['untokenizable=noneDelete',\n 'invertible=true'])\n # if you work on English, use this this command\n cmd = ['java', '-mx' + self.mem, '-cp', '\"%s\"' % self.classpath,\n 'edu.stanford.nlp.pipeline.StanfordCoreNLP', '-annotators',\n annotators, '-tokenize.options', options,\n '-outputFormat', 'json', '-prettyPrint', 'false']\n \n # if you work on arabic, use this this command\n \n # cmd = ['java', '-mx' + self.mem, '-cp', '\"%s\"' % self.classpath,\n # # 'edu.stanford.nlp.pipeline.StanfordCoreNLP','-annotators',\n # 'edu.stanford.nlp.pipeline.StanfordCoreNLP', '-props', 'StanfordCoreNLP-arabic.properties','-annotators',\n # annotators, '-tokenize.options', options, #'-tokenize.whitespace', 'true',\n # '-outputFormat', 'json', '-prettyPrint', 'false']\n print(' '.join(cmd))\n\n # We use pexpect to keep the subprocess alive and feed it commands.\n # Because we don't want to get hit by the max terminal buffer size,\n # we turn off canonical input processing to have unlimited bytes.\n self.corenlp = pexpect.spawn('/bin/bash', maxread=100000, timeout=60)\n self.corenlp.setecho(False)\n self.corenlp.sendline('stty -icanon')\n self.corenlp.sendline(' '.join(cmd))\n self.corenlp.delaybeforesend = 0\n self.corenlp.delayafterread = 0\n self.corenlp.expect_exact('NLP>', searchwindowsize=100)",
"def cli():\n config, auth, execute_now = read_command_line_arguments()\n main(config, auth, execute_now)",
"def run():\n\n call_args = sys.argv[1:]\n main(call_args)",
"def main():\n count = 0\n\n # Read in the required files and filenames.\n predicted_proteins, protein_db, output_file_aug_to_fasta, \\\n output_file_proteins_to_db, blastp_output, output_to_file, \\\n overwrite = call_files()\n\n # Write all entries in the AUGUSTUS output to a FASTA file\n for record in split_records_aug(predicted_proteins):\n if count == 0:\n mode = 'w'\n else:\n mode = 'a'\n write_fasta(record, output_file_aug_to_fasta, mode)\n count += 1\n\n # Create a blast database and carry out a blastp search\n blast_db = blast_database(protein_db, 'prot', True,\n output_file_proteins_to_db, overwrite)\n\n blastp_file = blastp(output_file_proteins_to_db, output_file_aug_to_fasta,\n True, blastp_output, overwrite, 7)\n\n # Parse the blastp results for the desired information\n blast_results = parse_blastp_output(blastp_output)\n\n # Print the results\n print_output(blast_results)",
"def Start(self):\n\n\n\n assert not self._process, 'Start() can only be called once'\n self._process = subprocess.Popen(self._args)",
"def start_comp(command_line, log='', env='', foreground='no', no_stdin = 'yes'):\n proc_title_argv = command_line.split()\n\n if proc_title_argv[0] == 'taskset':\n real_program = proc_title_argv[3]\n else:\n real_program = proc_title_argv[0]\n\n # first test shared library link\n try:\n can_find_all_shared_libs(real_program)\n except IOError, e:\n print e;\n raise\n\n my_stdout = None\n my_stderr = None\n my_stdin = None\n if (no_stdin == 'yes'):\n my_stdin = open('/dev/null', 'r')\n\n if log:\n dir = os.path.dirname(log)\n if dir:\n try:\n exist_ok_makedirs(dir, 0777)\n except OSError, (errno, strerror):\n sys.stderr.write('%s: %s\\n' % (dir, strerror))\n raise\n try:\n log_fd = open(log, \"w\")\n except IOError, (errno, strerror):\n print 'cannot open %s: %s' % (log, strerror)\n raise\n else:\n my_stdout = log_fd\n my_stderr = subprocess.STDOUT\n\n #command = [ path ]\n #if options:\n # command += options\n\n my_env = {}\n if env != '':\n env_list = env.split('\\t')\n env_val = '%s:%s' % (env_list[1], env_list[2])\n my_env[env_list[0]] = env_val\n\n try:\n p = subprocess.Popen(proc_title_argv, shell = False,\n # stdin = subprocess.PIPE,\\\n #stdin = None,\n stdin = my_stdin,\n stdout = my_stdout,\n stderr = my_stderr,#)\n env = my_env)\n except OSError, (errno, strerror):\n #sys.exit('cannot execute %s: %s' % (path, strerror))\n print 'cannot execute %s: %s' % (real_program, strerror)\n raise\n except ValueError, strerror:\n #sys.exit('subprocess.Popen value error: %s' %strerror)\n print 'subprocess.Popen value error: %s' % (strerror)\n raise\n\n #proc_name = os.path.basename(path)\n if proc_title_argv[0] == 'taskset':\n try:\n proc_name = os.path.basename(proc_title_argv[3])\n except IndexError,e:\n print \"path: \", path\n sys.exit(e)\n else:\n proc_name = os.path.basename(proc_title_argv[0])\n\n max_retry = 20\n retry = 0\n while True:\n if retry == max_retry:\n sys.exit('cannot exec. %s' % proc_name)\n\n #if kill_proc_exact.lookup_process_exact(proc):\n if get_pids_exact(proc_name):\n break;\n else:\n time.sleep(0.1)\n retry += 1\n\n if foreground == 'yes':\n try:\n p.wait()\n except KeyboardInterrupt, strerror:\n pass",
"def main():\n\n args = parse_arguments()\n show_parameters(args)\n\n bundleDefinitions = get_bundle_definitions(args.bfile)\n show_bundle_definitions(bundleDefinitions)\n\n check_definition_integrity(bundleDefinitions)\n\n bundlesForConfig = determine_bundles_for_config(args.config, bundleDefinitions)\n show_bundles_for_config(args.config, bundlesForConfig)\n\n output_result(bundlesForConfig, args.ofile)\n\n sys.exit(0)",
"def run_blast(self, metadata, analysistype, program, outfmt, evalue='1E-5', num_threads=12, num_alignments=1000000,\n perc_identity=70, task='blastn'):\n with progressbar(metadata) as bar:\n for sample in bar:\n # Run the BioPython BLASTn module with the genome as query, fasta (target gene) as db.\n make_path(sample[analysistype].reportdir)\n # Set the name and path of the BLAST report as reportdir/samplename_blastprogram.tsv\n sample[analysistype].report = os.path.join(\n sample[analysistype].reportdir, '{name}_{program}_{at}.tsv'.format(name=sample.name,\n program=program,\n at=analysistype))\n # Check the size of the report (if it exists). If it has size 0, something went wrong on a previous\n # iteration of the script. Delete the empty file in preparation for another try\n try:\n size = os.path.getsize(sample[analysistype].report)\n # If a report was created, but no results entered - program crashed, or no sequences passed\n # thresholds, remove the report, and run the blast analyses again\n if size == 0:\n os.remove(sample[analysistype].report)\n except FileNotFoundError:\n pass\n # Split the extension from the file path\n db = os.path.splitext(sample[analysistype].combinedtargets)[0]\n # Create the command line argument using the appropriate BioPython BLAST wrapper\n if program == 'blastn':\n blast = self.blastn_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt,\n perc_identity=perc_identity,\n task=task)\n elif program == 'blastp':\n blast = self.blastp_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt)\n elif program == 'blastx':\n blast = self.blastx_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt)\n elif program == 'tblastn':\n blast = self.tblastn_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt)\n elif program == 'tblastx':\n blast = self.tblastx_commandline(sample=sample,\n analysistype=analysistype,\n db=db,\n evalue=evalue,\n num_alignments=num_alignments,\n num_threads=num_threads,\n outfmt=outfmt)\n else:\n blast = str()\n assert blast, 'Something went wrong, the BLAST program you provided ({program}) isn\\'t supported'\\\n .format(program=program)\n # Save the blast command in the metadata\n sample[analysistype].blastcommand = str(blast)\n # Only run blast if the report doesn't exist\n if not os.path.isfile(sample[analysistype].report):\n try:\n blast()\n except ApplicationError as e:\n logging.debug(e)\n try:\n os.remove(sample[analysistype].report)\n except (IOError, ApplicationError):\n pass\n # Return the updated metadata object\n return metadata",
"def main():\n LOGGER.info('Loading Application')\n main_app = Application()\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-c\", \"--console\", help=\"Command Line Mode\", action=\"store_true\")\n args = parser.parse_args()\n if args.console:\n LOGGER.info('Command Line Mode')\n main_app.run()\n else:\n main_app.controller.gui_enabled = True\n try:\n import gui\n except ModuleNotFoundError:\n from herdcl import gui\n app = gui.MainUI()\n LOGGER.info('Opening GUI')\n app.mainloop()",
"def start(builder):\n global rss_builder\n rss_builder, sys.argv = builder, []\n app.run()",
"def main():\n logging.basicConfig(level=logging.INFO)\n # update some config\n update_config()\n\n # The hook will only be used in the Qt GUI right now\n util.setup_thread_excepthook()\n\n # parse command line\n parser = get_parser()\n preprocess_cmdline_args(sys.argv)\n args = parser.parse_args()\n\n config_options = process_config_options(args)\n\n # todo: defer this to gui\n config = SimpleConfig(config_options)\n cmdname = args.cmd if args.cmd is not None else \"gui\"\n\n # run non-RPC commands separately\n if cmdname in [\"create\", \"restore\"]:\n run_non_rpc(config)\n sys.exit(0)\n\n if cmdname == \"gui\":\n result = run_gui(config, config_options)\n elif cmdname == \"daemon\":\n result = run_daemon(config, config_options)\n else:\n result = run_cmdline(config, config_options, cmdname)\n\n print_result(result)\n sys.exit(0)",
"def main():\n parser = argparse.ArgumentParser(\n description=\"library access from the command line.\")\n parser.add_argument(\"-v\", \"--version\", help=\"Display curator-cli version\",\n action=\"store_true\", default=False)\n parser.add_argument(\"libname\", help=\"Path to the librarian library file\",\n type=str, default=\"library.lbr\")\n args = parser.parse_args()\n\n if args.version:\n print('curator-cli v'+__version__)\n sys.exit(0)\n\n colorama.init()\n CLI(args.libname).cmdloop()",
"def main():\n boba_blast_game.main()"
] | [
"0.60168827",
"0.5796402",
"0.56809795",
"0.56387544",
"0.55868495",
"0.55859977",
"0.5585382",
"0.55839694",
"0.556747",
"0.55591005",
"0.5537728",
"0.5533495",
"0.5521777",
"0.54482013",
"0.5421747",
"0.54031545",
"0.5402231",
"0.5383822",
"0.53771955",
"0.5370251",
"0.5368047",
"0.53562915",
"0.53305024",
"0.5309203",
"0.53061736",
"0.5286426",
"0.5236051",
"0.5219585",
"0.521793",
"0.519533"
] | 0.66386735 | 0 |
Initialize a parser that tries to catch BlastErrors. | def __init__(self, bad_report_handle = None):
self._bad_report_handle = bad_report_handle
#self._b_parser = BlastParser()
self._scanner = _Scanner()
self._consumer = _BlastErrorConsumer() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, parser=None):",
"def __init__(self, parser: Any = None):",
"def __init__(self, *args, **kw):\n self.parser = Parser(*args, **kw)",
"def test_create_new_gerber_parser(self):\n parser = Gerber()\n assert parser != None",
"def _parse(self):\n try:\n # parse token stream into abstract syntax tree (AST)\n self._ast = self._rule_container()\n\n except ParseError:\n raise\n\n except Exception as exc:\n raise ParseError(u'Unexpected error: {0}'.format(unicode(exc)))",
"def __init__(self, *args, **kw):\n try:\n return self._parse_args(*args, **kw)\n except (DateError, TimeError, DateTimeError):\n raise\n except Exception:\n raise SyntaxError('Unable to parse {}, {}'.format(args, kw))",
"def __init__(self, parser):\n if parser == \"csv\":\n self._parser = CSVParser()\n elif parser == \"static\":\n self._parser = StaticParser()\n else:\n raise NotImplementedError",
"def setup_parser(self, parser):",
"def __init__(self):\n print \"You asked for a Parser!\"",
"def __init__(self):\n Parser.__init__(self)\n self.__line_number = 0 # initialize the line number to 0",
"def initParser():\n libxml2mod.xmlInitParser()",
"def __init__(self):\n try:\n # this succeeds with python 2\n import StringIO\n class_StringIO = StringIO.StringIO\n except Exception:\n # this succeeds with python 3\n import io\n class_StringIO = io.StringIO\n\n # create some XML with an error\n sio = class_StringIO( \"<foo> <bar> </foo>\\n\" )\n try:\n ET.parse( sio )\n except Exception:\n self.ET_exc_class = sys.exc_info()[0]\n else:\n # something is wrong; the drawback to this fallback is that you\n # cannot distinguish an XML error from other errors\n self.ET_exc_class = Exception",
"def __init__(self, redirector, terminators, multilineCommands, legalChars, commentGrammars, commentInProgress,\n case_insensitive, blankLinesAllowed, prefixParser, preparse, postparse, shortcuts):\n\n self.commentGrammars = commentGrammars\n self.preparse = preparse\n self.postparse = postparse\n self.shortcuts = shortcuts\n\n self.main_parser = self._build_main_parser(redirector=redirector, terminators=terminators,\n multilineCommands=multilineCommands, legalChars=legalChars,\n commentInProgress=commentInProgress,\n case_insensitive=case_insensitive,\n blankLinesAllowed=blankLinesAllowed, prefixParser=prefixParser)\n self.input_source_parser = self._build_input_source_parser(legalChars=legalChars,\n commentInProgress=commentInProgress)",
"def __init__(self,\r\n config,\r\n stream_handle,\r\n exception_callback):\r\n\r\n # Call the superclass constructor\r\n super(FlortDjCsppParser, self).__init__(config,\r\n stream_handle,\r\n exception_callback,\r\n DATA_REGEX,\r\n ignore_matcher=IGNORE_MATCHER)",
"def __init__(self, exception, message=\"Invalid requests parse!\"):\n self.message = message\n self.exception = exception\n super().__init__(self.message)",
"def __init__(self, url = None, spec_string = None, lazy = False, **kwargs):\n assert url or spec_string and not (url and spec_string), \\\n 'You must provide either a URL to read, or a spec string to '\\\n 'parse, but not both!'\n\n # Keep the parameters around for later use\n self.url = None\n if url:\n from .util.url import absurl\n from .util.fs import abspath\n import os\n self.url = absurl(url, abspath(os.getcwd()))\n else:\n self.url = _PLACEHOLDER_URL\n\n self._spec_string = spec_string\n\n # Initialize variables we're filling later\n self.specification = None\n self.version = None\n self.version_name = None\n self.version_parsed = ()\n self.valid = False\n\n # Add kw args as options\n self.options = kwargs\n\n # Verify backend\n from .util import default_validation_backend\n self.backend = self.options.get('backend', default_validation_backend())\n if self.backend not in BaseParser.BACKENDS.keys():\n raise ValueError('Backend may only be one of %s!'\n % (BaseParser.BACKENDS.keys(), ))\n\n # Start parsing if lazy mode is not requested.\n if not lazy:\n self.parse()",
"def __init__(self, line):\n # Throw an exception if we don't see the parenthesis that mark a cause\n if not line[108] == '(':\n raise ParsingException\n if not line[159:160] == ')':\n raise ParsingException\n\n # Parsing definitions\n self.cause = line[109:159].strip()",
"def StacktraceParser(cls):\n return FracasCrashParser()",
"def __init__(self, separator=' ', scanner=None, parser=None):\n self._separator = None\n self.separator = separator\n self.scanner = load(scanner, Scanner, self.DEFAULT_SCANNER)\n self.parser = load(parser, ParserBase, self.DEFAULT_PARSER)",
"def __init__(self):\n\n self.prim_parser = parser.Parser()",
"def StacktraceParser(cls):\n return CracasCrashParser()",
"def __init__(self, parser: Parser, baudrate=9600):\n self.parser = parser",
"def __init__(self):\n\n self.parser = self.define_parser()\n self.pen = Pen()",
"def __init__(self, node):\n from aiida.common import exceptions\n super(BigDFTParser, self).__init__(node)\n if not issubclass(node.process_class, BigDFTCalculation):\n raise exceptions.ParsingError(\"Can only parse BigDFTCalculation\")",
"def _init_parser(self):\n # outputParser = (pyparsing.Literal('>>') | (pyparsing.WordStart() + '>') | pyparsing.Regex('[^=]>'))('output')\n outputParser = (pyparsing.Literal(self.redirector * 2) |\n (pyparsing.WordStart() + self.redirector) |\n pyparsing.Regex('[^=]' + self.redirector))('output')\n\n terminatorParser = pyparsing.Or(\n [(hasattr(t, 'parseString') and t) or pyparsing.Literal(t) for t in self.terminators])('terminator')\n stringEnd = pyparsing.stringEnd ^ '\\nEOF'\n self.multilineCommand = pyparsing.Or(\n [pyparsing.Keyword(c, caseless=self.case_insensitive) for c in self.multilineCommands])('multilineCommand')\n oneLineCommand = (~self.multilineCommand + pyparsing.Word(self.legalChars))('command')\n pipe = pyparsing.Keyword('|', identChars='|')\n self.commentGrammars.ignore(pyparsing.quotedString).setParseAction(lambda x: '')\n doNotParse = self.commentGrammars | self.commentInProgress | pyparsing.quotedString\n afterElements = \\\n pyparsing.Optional(pipe + pyparsing.SkipTo(outputParser ^ stringEnd, ignore=doNotParse)('pipeTo')) + \\\n pyparsing.Optional(\n outputParser + pyparsing.SkipTo(stringEnd, ignore=doNotParse).setParseAction(lambda x: x[0].strip())(\n 'outputTo'))\n if self.case_insensitive:\n self.multilineCommand.setParseAction(lambda x: x[0].lower())\n oneLineCommand.setParseAction(lambda x: x[0].lower())\n if self.blankLinesAllowed:\n self.blankLineTerminationParser = pyparsing.NoMatch\n else:\n self.blankLineTerminator = (pyparsing.lineEnd + pyparsing.lineEnd)('terminator')\n self.blankLineTerminator.setResultsName('terminator')\n self.blankLineTerminationParser = ((self.multilineCommand ^ oneLineCommand) +\n pyparsing.SkipTo(self.blankLineTerminator, ignore=doNotParse).setParseAction(\n lambda x: x[0].strip())('args') + self.blankLineTerminator)('statement')\n self.multilineParser = (((self.multilineCommand ^ oneLineCommand) + pyparsing.SkipTo(terminatorParser,\n ignore=doNotParse).setParseAction(\n lambda x: x[0].strip())('args') + terminatorParser)('statement') +\n pyparsing.SkipTo(outputParser ^ pipe ^ stringEnd, ignore=doNotParse).setParseAction(\n lambda x: x[0].strip())('suffix') + afterElements)\n self.multilineParser.ignore(self.commentInProgress)\n self.singleLineParser = ((oneLineCommand + pyparsing.SkipTo(terminatorParser ^ stringEnd ^ pipe ^ outputParser,\n ignore=doNotParse).setParseAction(\n lambda x: x[0].strip())('args'))('statement') +\n pyparsing.Optional(terminatorParser) + afterElements)\n # self.multilineParser = self.multilineParser.setResultsName('multilineParser')\n # self.singleLineParser = self.singleLineParser.setResultsName('singleLineParser')\n self.blankLineTerminationParser = self.blankLineTerminationParser.setResultsName('statement')\n self.parser = self.prefixParser + (\n stringEnd |\n self.multilineParser |\n self.singleLineParser |\n self.blankLineTerminationParser |\n self.multilineCommand + pyparsing.SkipTo(stringEnd, ignore=doNotParse)\n )\n self.parser.ignore(self.commentGrammars)\n\n inputMark = pyparsing.Literal('<')\n inputMark.setParseAction(lambda x: '')\n fileName = pyparsing.Word(self.legalChars + '/\\\\')\n inputFrom = fileName('inputFrom')\n inputFrom.setParseAction(replace_with_file_contents)\n # a not-entirely-satisfactory way of distinguishing < as in \"import from\" from <\n # as in \"lesser than\"\n self.inputParser = inputMark + pyparsing.Optional(inputFrom) + pyparsing.Optional('>') + \\\n pyparsing.Optional(fileName) + (pyparsing.stringEnd | '|')\n self.inputParser.ignore(self.commentInProgress)",
"def setup_parse(self, inputstring: str, document: nodes.document) -> None:\n self.inputstring = inputstring\n self.document = document",
"def __init__(self, *_, **kwargs):\n self.parser = kwargs.pop(\"parser\", ANSI_PARSER)\n super().__init__()\n if self._code_indexes is None:\n self._code_indexes, self._char_indexes = self._get_indexes()",
"def __init__(self):\n this = _libsbml.new_RDFAnnotationParser()\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self):\n this = _libsbml.new_SyntaxChecker()\n try: self.this.append(this)\n except: self.this = this",
"def test_init(self):\n p = top.Parser()\n msg = 'Object is not an top.Parser'\n self.assertIsInstance(p, top.Parser, msg)"
] | [
"0.6177303",
"0.6087029",
"0.6007341",
"0.5901439",
"0.58615685",
"0.5810311",
"0.57898545",
"0.5760166",
"0.5715188",
"0.5695503",
"0.56734216",
"0.55885917",
"0.55627733",
"0.5551707",
"0.552452",
"0.5515549",
"0.55094403",
"0.5507326",
"0.5501497",
"0.54901755",
"0.5484418",
"0.5474281",
"0.5453084",
"0.54521513",
"0.543879",
"0.54315865",
"0.5425119",
"0.5418808",
"0.5418502",
"0.54182273"
] | 0.685799 | 0 |
Parse a handle, attempting to diagnose errors. | def parse(self, handle):
results = handle.read()
try:
self._scanner.feed(File.StringHandle(results), self._consumer)
except ValueError, msg:
# if we have a bad_report_file, save the info to it first
if self._bad_report_handle:
# send the info to the error handle
self._bad_report_handle.write(results)
# now we want to try and diagnose the error
self._diagnose_error(
File.StringHandle(results), self._consumer.data)
# if we got here we can't figure out the problem
# so we should pass along the syntax error we got
raise
return self._consumer.data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _diagnose_error(self, handle, data_record):\n line = handle.readline()\n\n while line:\n # 'Searchingdone' instead of 'Searching......done' seems\n # to indicate a failure to perform the BLAST due to\n # low quality sequence\n if line.startswith('Searchingdone'):\n raise LowQualityBlastError(\"Blast failure occured on query: \",\n data_record.query)\n line = handle.readline()",
"def doomed_parser(line):\n raise exceptions.LineParseException('Error occurred')",
"def parse(self, handle, do_features=...): # -> SeqRecord | None:\n ...",
"def handle_err(self):\n pass",
"def test_lsusb_parse_error_generic(self):\n self.assertRaises(ParseError, jc.parsers.lsusb.parse, self.generic_lsusb_t, quiet=True)",
"def parse(self, handle):\n line_count = 0\n for line in handle:\n line_count += 1\n fields = self.split(line)\n for handler in self._call_chain:\n # execute the callback's expected 'process'\n # method, passing in fields dict.\n getattr(handler, 'process')(fields)\n\n return line_count",
"def parse(handle, known_handle) -> ty.Iterator[data.Entry]:\n entries = parser.parse(CONTEXT, handle, known_handle)\n return map(op.itemgetter(0), entries)",
"def __parse_error(self, text):\n m = self.__size_expr.match(text)\n if m is not None:\n self.errcode = b\"\"\n self.errmsg = self.__read_block(int(m.group(1)) + 2)\n return\n\n m = self.__error_expr.match(text)\n if m is None:\n raise Error(\"Bad error message\")\n if m.group(1) is not None:\n self.errcode = m.group(1).strip(b\"()\")\n else:\n self.errcode = b\"\"\n self.errmsg = m.group(2).strip(b'\"')",
"def test_handle_error_does_not_raise_type_errors():\n formatter = mock.create_autospec(base.BaseFormatter, instance=True)\n guide = style_guide.StyleGuide(\n create_options(select=[\"T111\"], ignore=[]),\n formatter=formatter,\n stats=statistics.Statistics(),\n )\n\n assert 1 == guide.handle_error(\n \"T111\", \"file.py\", 1, 1, \"error found\", \"a = 1\"\n )",
"def test():\n\ttry:\n\t\tprint \"Raising ParseErrror.\"\n\t\traise ParseError\n\texcept ParseError:\n\t\tprint \"Caught ParseError.\"",
"def __init__(self, handle, parser=None):\n try:\n handle.readline\n except AttributeError:\n raise ValueError(\n \"I expected a file handle or file-like object, got %s\"\n % type(handle))\n self._uhandle = File.UndoHandle(handle)\n self._parser = parser\n self._header = []",
"def read(handle, debug=0):\n iterator = parse(handle, debug)\n try:\n first = iterator.next()\n except StopIteration:\n first = None\n if first is None:\n raise ValueError(\"No pathways found in handle\")\n try:\n second = iterator.next()\n except StopIteration:\n second = None\n if second is not None:\n raise ValueError(\"More than one pathway found in handle\")\n return first",
"def parse(handle):\n while True:\n record = __read(handle)\n if not record:\n break\n yield record",
"def handle_connection(conn):\n\n\ttry:\n\t\treq = http_parse_req(http_read(conn))\n\t\thandlers[req.method](conn, req)\n\texcept:\n\t\ttry:\n\t\t# Ignore nested exceptions, as we dont care if the 400\n\t\t# reaches the client or not\n\t\t\thttp_400(conn, b\"Invalid request\\n\")\n\t\texcept:\n\t\t\tpass",
"def __init__(self, bad_report_handle = None):\n self._bad_report_handle = bad_report_handle\n \n #self._b_parser = BlastParser()\n self._scanner = _Scanner()\n self._consumer = _BlastErrorConsumer()",
"def normalize_handle(val):\n m = handle_regexp.match(val)\n return m.group(2)",
"def test_trace_parse_handling():\n\n print(\"Testing incorrect parsing:\")\n assert not actions.trace.TraceAction().parse(\"5:4\", logger)\n assert not actions.trace.TraceAction().parse(\"THISHOULDFAIL\", logger)\n assert not actions.trace.TraceAction().parse(\"\", logger)",
"def test_handle_raise_value_error(self) -> None:\n with pytest.raises(ValueError) as excinfo:\n FileLookup.handle(\"foo\")\n assert (\n str(excinfo.value) == \"Query 'foo' doesn't match regex: \"\n \"^(?P<codec>[base64|json|json-parameterized|parameterized|\"\n \"parameterized-b64|plain|yaml|yaml-parameterized]:.+$)\"\n )",
"def parse(self, filehandle):\n l = filehandle.readline()\n if l.split()[0] != '##maf':\n return\n else:\n self.setpar(l.split()[1:])\n\n l=filehandle.readline()\n while l:\n la = l.split()\n## print la\n if(len(la)==0 or la[0]=='#'):\n## print \"skipping\"\n 1\n elif(la[0]=='a'):\n## print \"reading alignment\"\n self.readalign(la[1:], filehandle)\n else:\n## print \"end of records\"\n return\n\n l=filehandle.readline()",
"def test_readbadformat(self):\n\n self.assertRaises(ParseError, self.hw, self.badfile)",
"def test_handle_raise_validation_error(self) -> None:\n with pytest.raises(ValidationError) as excinfo:\n FileLookup.handle(\"foo:bar\")\n assert excinfo.value.errors() == [\n {\n \"loc\": (\"codec\",),\n \"msg\": f\"Codec 'foo' must be one of: {', '.join(CODECS)}\",\n \"type\": \"value_error\",\n }\n ]",
"def test_parseMethodExceptionLogged(self):\n\n class UnhandledException(Exception):\n \"\"\"\n An unhandled exception.\n \"\"\"\n\n def raisesValueError(line):\n raise UnhandledException\n\n self.server.parseState = \"command\"\n self.server.parse_command = raisesValueError\n\n self.server.lineReceived(b\"invalid\")\n\n self.assertTrue(self.flushLoggedErrors(UnhandledException))",
"def _parse_error(self, error):\n error = str(error)\n # Nvidia\n # 0(7): error C1008: undefined variable \"MV\"\n m = re.match(r'(\\d+)\\((\\d+)\\)\\s*:\\s(.*)', error)\n if m:\n return int(m.group(2)), m.group(3)\n # ATI / Intel\n # ERROR: 0:131: '{' : syntax error parse error\n m = re.match(r'ERROR:\\s(\\d+):(\\d+):\\s(.*)', error)\n if m:\n return int(m.group(2)), m.group(3)\n # Nouveau\n # 0:28(16): error: syntax error, unexpected ')', expecting '('\n m = re.match(r'(\\d+):(\\d+)\\((\\d+)\\):\\s(.*)', error)\n if m:\n return int(m.group(2)), m.group(4)\n # Other ...\n return None, error",
"def handle_expt(self):\r\n self._perform_on_error_handling()",
"def test_handle__bad_file(self):\n self.assertRaises(\n CommandError,\n self.c.handle,\n csv=\"inexistent.file.csv\",\n confirmed=\"Y\",\n stdout=self.c.stderr,\n )",
"def is_handle_valid(handle):\n invalid_handles = [\"0x00000000\",\n \"0xFFFFFFFF\",\n \"0xFFFFFFFE\"]\n return str(handle) not in invalid_handles",
"def __init__(self, handle, parser=None):\n\n if type(handle) is not FileType and type(handle) is not InstanceType:\n raise ValueError, \"I expected a file handle or file-like object\"\n self._uhandle = File.UndoHandle(handle)\n self._parser = parser",
"def sephandle(handle):\n if re.match('^[a-zA-Z]+[a-zA-Z0-9_-]*@[a-z0-9.]+\\.[a-z]+$', handle) is None:\n raise errors.InvalidHandleError('{0}'.format(handle))\n handle = handle.split('@')\n pod, user = handle[1], handle[0]\n return (pod, user)",
"def try_parse(blob, filename=None):\n ret = None\n\n for parser in [location_csv.blob_to_dict, gpx_parser.blob_to_dict]:\n try:\n ret = parser(blob)\n if ret:\n logging.debug(\n \"try_pares -> Got return for: {}, returning!\".format(\n parser.__doc__))\n return ret\n except TypeError as e:\n logging.debug(\"Failed parsing with parser: {} -> {}\".format(\n parser.__doc__, e))\n\n return None",
"def _parse(self):\n try:\n # parse token stream into abstract syntax tree (AST)\n self._ast = self._rule_container()\n\n except ParseError:\n raise\n\n except Exception as exc:\n raise ParseError(u'Unexpected error: {0}'.format(unicode(exc)))"
] | [
"0.5839811",
"0.57687813",
"0.5528219",
"0.550164",
"0.5492486",
"0.54591936",
"0.53429043",
"0.5302302",
"0.5275931",
"0.5261604",
"0.5244928",
"0.5179438",
"0.5175663",
"0.51575375",
"0.5084481",
"0.50780374",
"0.5070664",
"0.5024885",
"0.5024073",
"0.5020243",
"0.5017326",
"0.5013578",
"0.497783",
"0.49670923",
"0.4963231",
"0.49310607",
"0.4906602",
"0.49009046",
"0.4883596",
"0.48786137"
] | 0.6661924 | 0 |
Attempt to diagnose an error in the passed handle. | def _diagnose_error(self, handle, data_record):
line = handle.readline()
while line:
# 'Searchingdone' instead of 'Searching......done' seems
# to indicate a failure to perform the BLAST due to
# low quality sequence
if line.startswith('Searchingdone'):
raise LowQualityBlastError("Blast failure occured on query: ",
data_record.query)
line = handle.readline() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def myHandleError(self, record):\n if raiseExceptions:\n ei = sys.exc_info()\n try:\n traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr)\n except IOError:\n pass # see issue 5971\n finally:\n del ei\n raise",
"def myHandleError(self, record):\n if raiseExceptions:\n ei = sys.exc_info()\n try:\n traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr)\n except IOError:\n pass # see issue 5971\n finally:\n del ei\n raise",
"def handle_exception(e):\n print(e)\n return error()",
"def handle_error():\n print \"An error occurred. Trace:\\n\"\n traceback.print_exc()",
"def handle_err(self):\n pass",
"def ErrCheckHandle(result, func, args):\n if not result:\n raise WinError()\n return AutoHANDLE(result)",
"def _call_error_handler(self, handle_error, confidence):\n handle_error(line_number=100,\n category=self._category,\n confidence=confidence,\n message=\"message\")",
"def test_handle_error_does_not_raise_type_errors():\n formatter = mock.create_autospec(base.BaseFormatter, instance=True)\n guide = style_guide.StyleGuide(\n create_options(select=[\"T111\"], ignore=[]),\n formatter=formatter,\n stats=statistics.Statistics(),\n )\n\n assert 1 == guide.handle_error(\n \"T111\", \"file.py\", 1, 1, \"error found\", \"a = 1\"\n )",
"def handle_expt(self):\r\n self._perform_on_error_handling()",
"def test_handle_error(self):\n patcher = mock.patch('drogulus.dht.lookup.log.info')\n mock_info = patcher.start()\n lookup = Lookup(FindNode, self.target, self.node, self.event_loop)\n lookup._lookup = mock.MagicMock()\n uuid = [uuid for uuid in lookup.pending_requests.keys()][0]\n pending_task = lookup.pending_requests[uuid]\n contact = lookup.shortlist[0]\n lookup.event_loop.call_soon = mock.MagicMock()\n lookup._handle_error(uuid, contact, Exception('Foo'))\n self.assertNotIn(contact, lookup.shortlist)\n self.assertNotIn(uuid, lookup.pending_requests)\n lookup.event_loop.call_soon.assert_called_once_with(\n pending_task.cancel)\n # Log the error and associated exception (2 calls)\n self.assertEqual(mock_info.call_count, 2)\n self.assertEqual(lookup._lookup.call_count, 1)\n patcher.stop()",
"def handle_error(self):\n self.cmd_channel.debug(\"ActiveDTP.handle_error()\")\n logerror(traceback.format_exc())\n self.close()",
"def test_handle__bad_file(self):\n self.assertRaises(\n CommandError,\n self.c.handle,\n csv=\"inexistent.file.csv\",\n confirmed=\"Y\",\n stdout=self.c.stderr,\n )",
"def abortAsError(action, value, error_handle):\n print_info(\"failed: failure action= abort_as_error\")\n error_handle['action'] = 'ABORT_AS_ERROR'\n return error_handle",
"def _handle_error(self, errno, msg):\n if self.error_callback != None:\n #Call the error callback but expect failure.\n try:\n self.error_callback(errno, msg, self.rpcclient)\n except Exception as ex:\n self.log.failure(\"Error in error handler for '{cmd!r}'.\",cmd=self.command)\n else:\n #If no handler is set, all we do is log.\n self.log.error(\"Notice: no on_error defined for '{cmd!r}, command result: {msg!r}\",cmd=self.command,msg=msg)",
"def _raise_unknown_error(ex):\n raise MsticpyKqlConnectionError(\n \"Another exception was returned by the service\",\n *ex.args,\n f\"Full exception:\\n{str(ex)}\",\n title=\"connection failed\",\n )",
"def handle_error(self):\n self.cmd_channel.debug(\"DTPHandler.handle_error()\")\n try:\n raise\n # if error is connection related we provide a detailed\n # information about it\n except socket.error, err:\n if err[0] in errno.errorcode:\n error = err[1]\n else:\n error = \"Unknown connection error\"\n # an error could occur in case we fail reading / writing\n # from / to file (e.g. file system gets full)\n except EnvironmentError, err:\n error = _strerror(err)\n except:\n # some other exception occurred; we don't want to provide\n # confidential error messages to user so we return a\n # generic \"unknown error\" response.\n logerror(traceback.format_exc()) \n error = \"Unknown error\"\n self.cmd_channel.respond(\"426 %s; transfer aborted.\" %error)\n self.close()",
"def test_uncaught_exception_show_details(self, mock_st_error, mock_st_exception):\n with testutil.patch_config_options({\"client.showErrorDetails\": True}):\n exc = RuntimeError(\"boom!\")\n handle_uncaught_app_exception(exc)\n\n mock_st_error.assert_not_called()\n mock_st_exception.assert_called_once_with(exc)",
"def error(self, handler):\n pass",
"def ctrl_err(ht, h, ansi: bool):\n if ansi:\n state = CREATE_BUFFER(22)\n message = CREATE_BUFFER(1024 * 4)\n odbc_func = ODBC_API.SQLGetDiagRec\n raw_s = lambda s: bytes(s, 'ascii')\n else:\n state = CREATE_BUFFER_U(24)\n message = CREATE_BUFFER_U(1024 * 4)\n odbc_func = ODBC_API.SQLGetDiagRecW\n raw_s = str\n native_error = ctypes.c_int()\n err_list = []\n number_errors = 1\n while True:\n ret = odbc_func(ht, h, number_errors, state, ADDR(native_error), message, 1024, ADDR(C_SHORT()))\n if ret == SQL_NO_DATA:\n # No more data, I can raise print(err_list[0][1])\n state = err_list[0][0] if err_list else ''\n err_text = raw_s('[') + state + raw_s('] ') + (err_list[0][1] if err_list else '')\n if state[:2] in {raw_s('24'), raw_s('25'), raw_s('42')}:\n raise ProgrammingError(state, err_text)\n elif state[:2] in {raw_s('22')}:\n raise DataError(state, err_text)\n elif state[:2] in {raw_s('23')} or state == raw_s('40002'):\n raise IntegrityError(state, err_text)\n elif state == raw_s('0A000'):\n raise NotSupportedError(state, err_text)\n elif state in {raw_s('HYT00'), raw_s('HYT01'), raw_s('01000')}:\n raise OperationalError(state, err_text)\n elif state[:2] in {raw_s('IM'), raw_s('HY')}:\n raise Error(state, err_text)\n # else:\n # raise DatabaseError(state, err_text)\n elif ret == -2:\n # The handle passed is an invalid handle\n raise ProgrammingError('', 'SQL_INVALID_HANDLE')\n elif ret == SQL_SUCCESS:\n err_list.append((state.value, message.value, native_error.value) if ansi else (from_buffer_u(state), from_buffer_u(message), native_error.value))\n # number_errors += 1\n elif ret == -1:\n raise ProgrammingError('', 'SQL_ERROR')",
"def handle_error(self, request_handler, client_address):\n logger.debug('handle_error(%s:%s)' % client_address)",
"def test_log_error(log_error, capsys, test_df):\n\n err_msg = \"This is a test Exception\"\n\n @log_step(log_error=log_error)\n def do_nothing(df, *args, **kwargs):\n raise RuntimeError(err_msg)\n\n err_reraised = False\n try:\n test_df.pipe(do_nothing)\n except RuntimeError:\n err_reraised = True\n\n captured = capsys.readouterr()\n\n assert err_reraised\n assert \"FAILED\" in captured.out\n assert (f\"FAILED with error: {err_msg}\" in captured.out) == log_error",
"def handle_error(self):\n self.cmd_channel.debug(\"PassiveDTP.handle_error()\")\n logerror(traceback.format_exc())\n self.close()",
"def _raise_adal_error(ex):\n if ex.args[0] == \"Unexpected polling state code_expired\":\n raise MsticpyKqlConnectionError(\n \"Authentication request was not completed.\",\n title=\"authentication timed out\",\n )\n\n err_response = getattr(ex, \"error_response\")\n if err_response and \"error_description\" in ex.error_response:\n ex_mssgs = ex.error_response[\"error_description\"].split(\"\\r\\n\")\n else:\n ex_mssgs = [f\"Full error: {ex}\"]\n raise MsticpyKqlConnectionError(\n *ex_mssgs, title=\"could not authenticate to tenant\"\n )",
"def handle_fb_error():\n def deco_handle(f):\n def f_handle(*args, **kwargs):\n self = args[0]\n try:\n return f(*args, **kwargs)\n except:\n this_exception = sys.exc_info()\n status_msg = None\n try:\n # don't wait long, the status msg should be there already\n self.driver.implicitly_wait(1)\n status_msg=self.driver.find_element_by_class_name('status-msg')\n raise AssertionError('found fb status-msg: %s' % status_msg.text)\n except:\n # if it has info, re-raise\n if status_msg:\n if len(status_msg.text) > 0:\n raise\n # we didn't find a status_msg, just re-raise the original\n raise this_exception[1], None, this_exception[2]\n return f_handle\n return deco_handle",
"def handle_error(self, params):\n\n # Run the error handler if needed.\n if (self.must_handle_error()):\n log.warning(\"Running On Error error handler...\")\n self.got_error = False\n self.error_handler.eval(context=self, params=params)\n\n # The error has now been cleared.\n self.got_error = False",
"def _check_error(return_value):\n if return_value < 0:\n raise IOError(pm.lib.Pm_GetErrorText(return_value))",
"def _handle_error(self, soc):\n err_string = \"socket error\"\n if soc in self._reading:\n err_string += (\" with '%s' read\" % self._reading[soc])\n if soc in self._writing:\n err_string += (\" with '%s' still to write\" % self._writing[soc])\n self._log_error(err_string)\n self._cleanup(soc)",
"def handle_awful_failure(fail_text):\r\n if g.debug:\r\n import sys\r\n s = sys.exc_info()\r\n # reraise the original error with the original stack trace\r\n raise s[1], None, s[2]\r\n try:\r\n # log the traceback, and flag the \"path\" as the error location\r\n import traceback\r\n g.log.error(\"FULLPATH: %s\" % fail_text)\r\n g.log.error(traceback.format_exc())\r\n return redditbroke % fail_text\r\n except:\r\n # we are doomed. Admit defeat\r\n return \"This is an error that should never occur. You win.\"",
"def _handle_error(self, err: ctypes.c_char_p, method: str) -> Exception:\n if err:\n string = ctypes.string_at(err).decode(\"utf-8\")\n self._free_error(err)\n return RuntimeError(string)\n else:\n return RuntimeError(f\"Unknown error in {method}. \")",
"def check_handle(handle):\n return os.path.isfile(get_path_filename(handle))"
] | [
"0.5868295",
"0.5868295",
"0.5857427",
"0.5809658",
"0.57716674",
"0.57299936",
"0.56281805",
"0.5510797",
"0.5452078",
"0.5449711",
"0.5429637",
"0.5401915",
"0.53741544",
"0.537217",
"0.5346068",
"0.53016144",
"0.52907413",
"0.5285683",
"0.5241748",
"0.52281684",
"0.521598",
"0.5191825",
"0.51669294",
"0.5156949",
"0.5146979",
"0.5141896",
"0.51390064",
"0.5137862",
"0.51311743",
"0.5120141"
] | 0.625596 | 0 |
Decrease dataset size by cutting requested classes smaller | def cut_classes(self, dataset, classes, max_size, label):
# Cherry picked classes
class_dfs = []
for c in classes:
picked_data = dataset.loc[(dataset.loc[:,label] == c),:].reset_index(drop=True)
class_dfs.append(picked_data.loc[0:min(len(picked_data), max_size),:])
#class_dfs.append(picked_data.sample(n=min(len(picked_data), max_size)))
# Concat
data = pd.concat(class_dfs)
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reduce_class_size(dataset, reductionType, targetSize):\n\n classes = []\n classesContent = []\n ds_reduced = []\n originalDataset = dataset.copy()\n absOriginalLength = len(originalDataset)\n absTargetLength = 0\n targetMultiplicator = 0\n\n # calc absolute length to reduce to\n if (reductionType == 'percentage'):\n if (targetSize < 1 and targetSize > 0):\n targetMultiplicator = 100*targetSize\n elif (targetSize < 100 and targetSize > 0):\n targetMultiplicator = 1\n else:\n print(\"targetSize not valid! Use either a value less than one or a value less than 100 but always greater than 0\")\n return\n absTargetLength = (absOriginalLength/100)*targetMultiplicator\n elif (reductionType == 'absolute') and (targetSize < absOriginalLength) and (targetSize > 0):\n absTargetLength = targetSize\n else:\n print(\"ReductionType or targetSize not valid! Use: percentage (value greater 0 and less than 1) or absolute (value greater 0 and less than dataset size)\")\n return\n\n # find all available lable types\n for(ind, punch) in enumerate(originalDataset):\n if(punch.label[0] not in classes):\n classes.append(punch.label[0])\n classesContent.append(0)\n\n print(\"Found the following classes: {}\".format(classes))\n\n # reduce the size of the dataset\n for (ind, punch) in enumerate(originalDataset):\n if(classesContent[classes.index(punch.label[0])] < absTargetLength):\n classesContent[classes.index(punch.label[0])] += 1\n ds_reduced.append(punch.copy())\n\n print(\"class balance: {}\".format(classesContent))\n return ds_reduced",
"def truncate_sample_size(data,classes,others=None,max_size_given=None,rng=np.random.RandomState(100)): \n u, indices = np.unique(classes,return_inverse=True)\n indices=np.asarray(indices)\n num_u=len(u)\n sample_sizes=[]\n \n # get sample size of each class\n for i in range(num_u):\n sample_size_this=np.sum(indices==i)\n sample_sizes.append(sample_size_this)\n sample_sizes=np.array(sample_sizes,dtype=int)\n \n #size_min=np.amin(sample_sizes) # smallest sample size\n size_max=np.amax(sample_sizes) # largest sample size\n \n if size_max<max_size_given:\n max_size_given=size_max\n sample_sizes[sample_sizes>max_size_given]=max_size_given \n\n indices_all=np.array([],dtype=indices.dtype)\n indices_range=np.array(range(len(indices)))\n \n for i in range(num_u):\n ind_this_num=indices_range[indices==i]\n ind_this_reduced=ind_this_num[rng.choice(len(ind_this_num),size=sample_sizes[i],replace=False)]\n indices_all=np.append(indices_all,ind_this_reduced)\n \n # reduce the data \n data=data[indices_all,:]\n classes=classes[indices_all]\n if np.any(others):\n others=others[indices_all]\n return data,classes,indices_all,others",
"def reduce_sample_size(data,classes,times=2):\n data=data[range(0,data.shape[0],times)]\n classes=classes[range(0,classes.shape[0],times)]\n return data,classes",
"def test_reduce_features_size(self):\n # Get some data\n data = array([[0.564, 20.661, 1], [-18.512, 41.168, -1],\n [-0.009, 20.440, 7]])\n cdata = CData(data)\n\n # ===================================\n # Perform PCA to reduce to 2 features\n # ===================================\n\n # Reduce by nearest int closest to 60%, rounding up\n frac = 0.6\n cdata.reduce_features(frac)\n self.assertTrue(cdata.data.shape == (3, 2))",
"def _get_shrink_factor(self, obj_class):\n return 0.5 if obj_class == 1 else 0.2",
"def clean_dataset(args, min_class_count=5):\n classes_count = get_classes_count(args)\n print(f\"cleaning dataset of {len(args)} arguments\")\n classes_to_remove = []\n for c in classes_count.keys():\n if classes_count[c] < min_class_count:\n classes_to_remove.append(c)\n\n clean_args = [a for a in args if a.frame_id not in classes_to_remove]\n removed_args = [a for a in args if a.frame_id in classes_to_remove]\n print(f\"removed {len(removed_args)} arguments from dataset. {len(clean_args)} remaining.\")\n return clean_args, removed_args",
"def truncate_features(self):\n num_variable = len(self.Train_data['X'][0])\n for i in xrange(len(self.Train_data['X'])):\n num_variable = min([num_variable, len(self.Train_data['X'][i])])\n # truncate train, validation and test\n for i in xrange(len(self.Train_data['X'])):\n self.Train_data['X'][i] = self.Train_data['X'][i][0:num_variable]\n for i in xrange(len(self.Validation_data['X'])):\n self.Validation_data['X'][i] = self.Validation_data['X'][i][0:num_variable]\n for i in xrange(len(self.Test_data['X'])):\n self.Test_data['X'][i] = self.Test_data['X'][i][0:num_variable]\n return num_variable",
"def num_classes(self):\n\t\treturn 10",
"def split_dataset(self, test_size=0.20):\n\t\t(self.training_data, self.test_data, self.training_labels, self.test_labels) = train_test_split(self.training_data, self.training_labels, test_size=test_size)",
"def get_train_dev_sets (X, Y, train_set_proportion = 0.9):\n \n size_classes = np.unique(Y, return_counts = True)[1] # get an array of all class sizes\n\n # our training set contains train_set_proportion * smallest class size of each class\n size_smallest_class = min (np.unique (Y, return_counts = True)[1]) \n size_train_set_class = int (train_set_proportion * size_smallest_class)\n print (\"size_train_set_class:\", size_train_set_class)\n \n \n num_classes = np.shape(np.unique(Y))[0]\n\n size_classes_cum = np.empty ((0))\n \n # get an array of cumulative indices, starting with 0, for where each class starts\n for i in range (0, num_classes): \n size_classes_cum = np.append (size_classes_cum, int (sum(size_classes[0:i])))\n # add on final size of the data set +1 so we can iterate i+1 over num_classes to get end indices\n size_classes_cum = np.append (size_classes_cum, int(Y.shape[1]))\n \n sorted_indices = np.argsort (Y[0,:]) # get the list of indices that will sort Y by class\n X_sorted = X[:, sorted_indices]\n Y_sorted = Y[:, sorted_indices] \n \n # initialise sets\n train_set_X = np.empty ((X.shape[0], 0))\n train_set_Y = np.empty ((1, 0))\n dev_set_X = np.empty ((X.shape[0], 0))\n dev_set_Y = np.empty ((1, 0))\n \n \n for i in range (0, num_classes):\n X_this_class = X_sorted[:, int (size_classes_cum[i]):int (size_classes_cum[i]+size_train_set_class)]\n train_set_X = np.append (train_set_X, \n X_sorted[:, int (size_classes_cum[i]):int (size_classes_cum[i]+size_train_set_class)], \n axis=1)\n train_set_Y = np.append (train_set_Y, \n Y_sorted[:, int (size_classes_cum[i]):int (size_classes_cum[i]+size_train_set_class)], \n axis=1)\n dev_set_X = np.append (dev_set_X, \n X_sorted[:, int (size_classes_cum[i]+size_train_set_class):int(size_classes_cum[i+1])], \n axis=1)\n dev_set_Y = np.append (dev_set_Y, \n Y_sorted[:, int (size_classes_cum[i]+size_train_set_class):int(size_classes_cum[i+1])], \n axis=1)\n\n \n # Finally, apply the same shuffle to X and Y sets\n train_shuffled_indices = np.arange (train_set_X.shape[1])\n dev_shuffled_indices = np.arange (dev_set_X.shape[1])\n\n np.random.shuffle (train_shuffled_indices)\n np.random.shuffle (dev_shuffled_indices)\n\n train_set_X = train_set_X[:,train_shuffled_indices]\n train_set_Y = train_set_Y[:,train_shuffled_indices].astype (np.int16) \n dev_set_X = dev_set_X[:,dev_shuffled_indices]\n dev_set_Y = dev_set_Y[:,dev_shuffled_indices].astype (np.int16) \n \n return train_set_X, train_set_Y, dev_set_X, dev_set_Y",
"def balance_sample_size(data, classes, others=None, min_size_given=None, rng=np.random.RandomState(100)): \n u, indices = np.unique(classes,return_inverse=True)\n indices=np.asarray(indices)\n num_u=len(u)\n sample_sizes=[]\n \n # get sample size of each class\n for i in range(num_u):\n sample_size_this=np.sum(indices==i)\n sample_sizes.append(sample_size_this) \n \n size_min=np.amin(sample_sizes) # smallest sample size\n \n if min_size_given and size_min>min_size_given:\n size_min=min_size_given \n \n indices_all=np.array([],dtype=indices.dtype)\n indices_range=np.array(range(len(indices)))\n \n for i in range(num_u):\n ind_this_num=indices_range[indices==i]\n ind_this_reduced=ind_this_num[rng.choice(len(ind_this_num),size=size_min,replace=False)]\n indices_all=np.append(indices_all,ind_this_reduced)\n \n # reduce the data \n data=data[indices_all]\n classes=classes[indices_all]\n if np.any(others):\n others=others[indices_all]\n return data,classes,others",
"def sampling_class_portion(data,classes,others=None,class_portion=None,rng=np.random.RandomState(100)):\n u, indices = np.unique(classes,return_inverse=True)\n indices=np.asarray(indices)\n num_u=len(u)\n sample_sizes=dict()\n \n # get sample size of each class\n size_min=float(\"inf\")\n for i in range(num_u):\n sample_size_this=np.sum(indices==i)\n sample_sizes[u[i]]=sample_size_this\n if class_portion[u[i]]==1 and sample_size_this<size_min:\n size_min=sample_size_this\n print(size_min)\n\n indices_all=np.array([],dtype=indices.dtype)\n indices_range=np.array(range(len(indices)))\n \n # sampling\n for i in range(num_u):\n ind_this_num=indices_range[indices==i]\n replacetf=True if sample_sizes[u[i]]<(size_min*class_portion[u[i]]) else False\n ind_this_reduced=ind_this_num[rng.choice(sample_sizes[u[i]],size=size_min*class_portion[u[i]],replace=replacetf)]\n indices_all=np.append(indices_all,ind_this_reduced)\n \n # get the sampled data \n data=data[indices_all,:]\n classes=classes[indices_all]\n if np.any(others):\n others=others[indices_all]\n return data,classes,indices_all,others",
"def reduce_class_size(ratio, class_size, N_classes, G, student_schedule, \n\t\t\t\t\t copy=False):\n\tif copy:\n\t\tG = G.copy()\n\t\tstudent_schedule = student_schedule.copy()\n\t\n\tN_remove = round(ratio * class_size)\n\n\t# link types that are affected by students not being present at school\n\taffected_links = ['student_student_intra_class', \n\t\t\t\t\t 'student_student_table_neighbour',\n\t\t\t\t\t 'student_student_daycare',\n\t\t\t\t\t 'teaching_teacher_student',\n\t\t\t\t\t 'daycare_supervision_teacher_student']\n\n\tfor wd in range(1, 6):\n\t\tfor c in range(1, N_classes + 1):\n\t\t\tstudent_nodes = student_schedule[student_schedule['hour_1'] == c]\\\n\t\t\t\t\t.loc[wd].index\n\t\t\t# pick a number of students from every class and remove them\n\t\t\tstudents_to_remove = np.random.choice(student_nodes, N_remove, \\\n\t\t\t\treplace=False)\n\n\t\t\t## remove edges from the graph\n\t\t\t# find all edges on the given weekday in which at least one student\n\t\t\t# from the list of students to remove is involved. Only edges with a\n\t\t\t# link type that is affected by the absence from school are selected \n\t\t\t# (i.e. no family or friendship contacts)\n\t\t\tedges_to_remove = [(u, v, k) for u, v, k, data in \\\n\t\t\tG.edges(keys=True, data=True) if data['link_type'] in \\\n\t\t\taffected_links and data['weekday'] == wd and \\\n\t\t\t(u in students_to_remove or v in students_to_remove)]\n\t\t\t# remove affected edges from the graph\n\t\t\tfor e in edges_to_remove:\n\t\t\t\tG.remove_edge(e[0], e[1], key=e[2])\n\t\n\t\t\t## remove entries in the student schedule at the corresponding days\n\t\n\t\t\t# set all entries for students on the given weekday to nan in the \n\t\t\t# student schedule\n\t\t\tfor s in students_to_remove:\n\t\t\t\tfor hour in range(1, 10):\n\t\t\t\t\tstudent_schedule.loc[wd, s]['hour_{}'.format(hour)] = pd.NA\n\t\t\t\t\t\t\t\t\t\n\tif copy:\n\t\treturn G, student_schedule",
"def mask_classes(outputs: torch.Tensor, dataset: ContinualDataset, k: int) -> None:\n outputs[:, 0:k * dataset.N_CLASSES_PER_TASK] = -float('inf')\n outputs[:, (k + 1) * dataset.N_CLASSES_PER_TASK:\n dataset.N_TASKS * dataset.N_CLASSES_PER_TASK] = -float('inf')",
"def balance_sample_size_increase(data,classes,others=None,max_size_given=None,rng=np.random.RandomState(100)): \n u, indices = np.unique(classes,return_inverse=True)\n indices=np.asarray(indices)\n num_u=len(u)\n sample_sizes=[]\n \n # get sample size of each class\n for i in range(num_u):\n sample_size_this=np.sum(indices==i)\n sample_sizes.append(sample_size_this) \n \n size_max=np.amax(sample_sizes) # largest sample size\n \n if max_size_given and size_max<max_size_given:\n size_max=max_size_given \n \n indices_all=np.array([],dtype=indices.dtype)\n indices_range=np.array(range(len(indices)))\n \n for i in range(num_u):\n ind_this_num=indices_range[indices==i]\n #replacetf=True if sample_sizes[i]<size_max else False\n if sample_sizes[i]>=size_max:\n ind_this_increased=ind_this_num[rng.choice(sample_sizes[i],size=size_max,replace=False)]\n indices_all=np.append(indices_all,ind_this_increased)\n else: # make sure each sample is used at least once\n ind_this_increased=ind_this_num\n ind_this_increased2=ind_this_num[rng.choice(sample_sizes[i],size=size_max-sample_sizes[i],replace=True)]\n indices_all=np.append(indices_all,ind_this_increased)\n indices_all=np.append(indices_all,ind_this_increased2)\n \n # increase the data \n data=data[indices_all]\n classes=classes[indices_all]\n if np.any(others):\n others=others[indices_all]\n return data,classes,others",
"def num_classes():\n return NUM_CLASSES",
"def _shrink(self):\n raise NotImplementedError(\"Should have implemented this.\")",
"def setClassFilter(self, includeClasses):\n self.__datasets = [d for d in self.__datasetsAll if d[-1] in includeClasses]\n self.__scaled_datasets = None\n self.activeClasses = includeClasses\n self.dataChanged.emit()",
"def normalize_data(self):\r\n # quantify data for each column except classification column for noise reduction\r\n for column_header in self.classification_training_data.columns:\r\n if column_header == \"Class\":\r\n continue\r\n if column_header == \"Age\":\r\n bin_size = 2\r\n elif column_header == \"Ht\":\r\n bin_size = 5\r\n else:\r\n bin_size = 1\r\n for idx in self.classification_training_data.index:\r\n self.classification_training_data.at[idx, column_header] = math.floor(\r\n self.classification_training_data[column_header][idx] / bin_size) * bin_size",
"def get_num_classes(dataset: str):\n if dataset == \"imagenet\" or dataset == \"kitti\":\n return 1000\n elif dataset == \"cifar10\" or dataset == \"mnist\" or dataset == \"fashion_mnist\":\n return 10",
"def data_split(dataset, val_ratio=0.1, test_ratio=0.1, seed=1234):\n\n\t# How you grab the labels will depend on what type of Pytorch Dataset object 'dataset' is\n\t# (i.e. ImageFolder/DatasetFolder or not)\n\n\t# For fun, check the method resolution order (MRO) of 'dataset'\n\tprint('Dataset object\\'s inheritance: ', type(dataset).__mro__)\n\n\t# Determine what kind of Dataset object it is, then grab labels\n\t# Warning: currently this will break for anything other than an ImageFolder or CIFAR10 train set\n\tif isinstance(dataset, datasets.CIFAR10):\n\t\tlabels = dataset.train_labels\n\telif isinstance(dataset, datasets.ImageFolder):\n\t\tlabels = [img[1] for img in dataset.imgs]\n\telse:\n\t\terror('Dataset not supported yet')\n\n\t# Calculate class priors, (number in class)/(size of dataset)\n\tidcs = [i for i in range(len(dataset))]\n\tsamples_per_class = np.bincount(np.array(labels))\n\tpriors = samples_per_class/len(labels)\n\n\t# Number of samples in each class for val and test set \n\tval_per_class = np.ceil(samples_per_class*val_ratio).astype(np.int)\n\ttest_per_class = np.ceil(samples_per_class*test_ratio).astype(np.int)\n\n\t# Copy and shuffle the labels and corresponding indices to randomize before splitting\n\tshuffled_labels = list(labels)\n\tshuffled_idcs = list(idcs)\n\trandom.Random(seed).shuffle(shuffled_labels)\n\trandom.Random(seed).shuffle(shuffled_idcs)\n\n\t# Iterate through, grabbing indices for each class to place in validation set\n\t# until the desired number is reached\n\tval_idcs = []\n\tval_counts = np.zeros(val_per_class.shape)\n\n\tfor i, l in zip(shuffled_idcs, shuffled_labels):\n\t\t# Check if validation set quota has been reached yet for this class\n\t\tif val_counts[l] < val_per_class[l]:\n\t\t\tval_idcs.append(i)\n\t\t\tval_counts[l] += 1\n\n\t\t# Check if stopping point is reached\n\t\tif (val_counts == val_per_class).all():\n\t\t\tbreak\n\n\t# Repeat for test set\n\ttest_idcs = []\n\ttest_counts = np.zeros(test_per_class.shape)\n\tfor i, l in zip(shuffled_idcs, shuffled_labels):\n\t\t# Check if this index is already in val set\n\t\tif i in val_idcs:\n\t\t\tcontinue\n\n\t\t# Check if test set quota has been reached yet for this class\n\t\tif test_counts[l] < test_per_class[l]:\n\t\t\ttest_idcs.append(i)\n\t\t\ttest_counts[l] += 1\n\n\t\t# Check if stopping point is reached\n\t\tif (test_counts == test_per_class).all():\n\t\t\tbreak\n\n\t# Get train indices too (all the remaining samples not in val or test)\n\ttrain_idcs = [j for j in idcs if j not in val_idcs+test_idcs]\n\n\t# Split the data\n\ttrain = Subset(dataset, train_idcs)\n\tval = Subset(dataset, val_idcs)\n\ttest = Subset(dataset, test_idcs)\n\n\treturn train, val, test",
"def subsampling(dataset, class_column_index, class_max_count, class_dict):\n out = []\n for row in dataset:\n cls = row[class_column_index]\n rInt = np.random.randint(0, class_dict[cls])\n if rInt <= class_max_count:\n out.append(row)\n ss_data = np.array(out)\n\n return ss_data",
"def __split_dataset(self):\n self.train, self.valid, _, _ = train_test_split(self.data, self.data, test_size=0.2)\n self.valid, self.test, _, _ = train_test_split(self.valid, self.valid, test_size=0.5)",
"def balance_classes(self, classids):\n \n # Get ROI class counts for each sample patch:\n samples = self.SampleID\n counts = self.count_classes(samples)\n counts = counts[:, classids]\n totalcount = np.sum(counts, axis=0)\n \n # Find the class with minimum and maximum total count:\n c_min = np.argmin(totalcount)\n c_max = np.argmax(totalcount)\n \n # Class balancing is performed as long as the min-max class ratio is \n # not within 50%.\n #\n # Balancing Algorithm:\n # * Randomly sample from samples with non-zero min-class ROI counts \n # and zero maximum class ROIs.\n # * Simulaneously, randomly sample a subset of max-class only samples \n # to be removed from the dataset. This levels the field from both \n # directions.\n class_ratio = totalcount[c_min] / totalcount[c_max]\n while (class_ratio < 0.5) & (len(samples) < 3*5000):\n # Find samples with maximum min-max class ratio:\n N = np.sum((counts[:,c_min] > 0) & (counts[:,c_max] == 0))\n M = int(0.5*N)\n \n # Min-class samples to add:\n min_sample = np.nonzero((counts[:,c_min]>0) & (counts[:,c_max]==0))\n min_sample = min_sample[0] # Unfold tuple\n min_sample = min_sample[np.random.randint(0, len(min_sample)-1, N)]\n \n # Max-class samples to remove:\n max_sample = np.nonzero((counts[:,c_min]==0) & (counts[:,c_max]>0))\n max_sample = max_sample[0] # Unfold tuple\n max_sample = max_sample[np.random.randint(0, len(max_sample)-1, M)]\n max_sample = np.unique(max_sample)\n \n # Construct new sample set:\n min_sample = samples[min_sample]\n samples = np.append(np.delete(samples, max_sample), min_sample)\n \n # Recompute total count and min-max class ratio:\n counts = self.count_classes(samples)[:, classids]\n totalcount = np.sum(counts, axis=0)\n c_min = np.argmin(totalcount)\n c_max = np.argmax(totalcount)\n class_ratio = totalcount[c_min] / totalcount[c_max]\n \n # Done, balanced, update samples:\n balancedset = self.Samples[samples,:]\n self._set_sampling_scheme_(balancedset)",
"def deleteClass(X,y,num,c):\n\t\n\ttwoIndex=np.array([i for i in range(len(y)) if y[i]==c])\n\tnp.random.shuffle(twoIndex)\n\n\tif num >= 0.7*len(twoIndex):\n\t\tprint('Number of examples requested for delete too many...')\n\t\texit()\n\n\n\tdelIndex=twoIndex[0:num]\n\n\tX=np.delete(X,delIndex,0)\n\ty=np.delete(y,delIndex,0)\n\n\tprint(X.shape,y.shape)\n\n\treturn(X,y)",
"def cut(self, max_lenght):\n self.V_estimates = self.V_estimates[:max_lenght]\n super().cut(max_lenght)",
"def train_dev_split(docs, dev_size):\n pass",
"def clean_partition(self, partition):\n for cls in list(partition.keys()):\n if len(partition[cls]) < self.num_samples_per_class:\n del (partition[cls])\n return partition",
"def truncate(self):\n\n self.population = self.population[:self.max_number_trees]",
"def make_stratified_split_of_segmentation_dataset(\n dataset: Union[Dataset, np.ndarray, List],\n num_classes: int,\n split_ratio: Optional[float] = 0.2,\n names_of_classes: Optional[int] = None,\n verbose: bool = False,\n ignore_index: Optional[bool] = None,\n max_optimization_iterations: int = 1000000,\n split_n_sample_slack: int = 0,\n):\n disable_tqdm = not verbose\n if isinstance(dataset, Dataset):\n label_fn = _calc_label_fn(dataset[0])\n dataset = [label_fn(dataset[_i]) for _i in trange(len(dataset), disable=disable_tqdm)]\n icm = instance_class_matrix(dataset, num_classes, disable=disable_tqdm)\n # TODO: remove columns with ignore_index\n # icm = icm[:, 1:].numpy()\n ds_cc = icm.sum(axis=0)\n ds_swcc = (icm > 0).astype(np.long).sum(axis=0)\n if names_of_classes is None:\n names_of_classes = [f\"class_{_i}\" for _i in range(num_classes)]\n dataset_stats = pd.DataFrame({\n 'class_count': ds_cc,\n 'samples_with_class_count': ds_swcc\n }, index=names_of_classes)\n if verbose:\n print(dataset_stats.sort_values('samples_with_class_count', ascending=False))\n optimization_weights_for_classes = np.zeros(icm.shape[1], dtype=np.float)\n # TODO: override weights (importance of classes)\n optimization_weights_for_classes = 1.0 / ds_cc\n optimization_weights_for_classes[ds_cc == 0] = 0\n optimization_weights_for_classes /= optimization_weights_for_classes.sum()\n if verbose:\n print('\\n'.join(f\"{_f:1.9f}\" for _f in optimization_weights_for_classes))\n num_samples = icm.shape[0]\n testset_size = int(np.floor(num_samples * split_ratio))\n\n def calc_cost(subsample):\n subset_class_voxels = icm[subsample].sum(axis=0)\n per_class_ratios = subset_class_voxels / ds_cc.astype(np.float)\n return (optimization_weights_for_classes * np.abs(split_ratio - per_class_ratios)).sum()\n\n cost_stats = []\n best_cost = np.inf\n best_testset = None\n for _ in trange(max_optimization_iterations):\n if split_n_sample_slack:\n subsample_size = np.random.randint(testset_size - split_n_sample_slack, testset_size + split_n_sample_slack)\n else:\n subsample_size = testset_size\n random_testset = np.random.permutation(num_samples)[:subsample_size]\n _cost = calc_cost(random_testset)\n if _cost < best_cost:\n best_cost = _cost\n best_testset = random_testset\n cost_stats.append(_cost)\n\n subset_class_stats = icm[best_testset].sum(axis=0)\n per_class_ratios = subset_class_stats / ds_cc.astype(np.float)\n residual = np.abs(split_ratio - per_class_ratios)\n # TODO: need to account for ignore_index\n # optimization_results = pd.DataFrame({\n # 'weights': optimization_weights_for_classes,\n # 'ratios': per_class_ratios\n # }, index=names_of_classes[1:])\n # TODO: plot histograms of splits\n # if verbose:\n # pd.Series(cost_stats).plot(kind='hist')\n # pd.Series(cost_stats).plot(kind='hist', bins=50)\n # icm[:, optimization_weights_for_classes == 0].sum(axis=1)\n # optimization_weights_for_classes == 0\n # removed_classes = np.where(optimization_weights_for_classes==0)[0] + 1\n # scenes_with_no_classes_but_removed = np.where(icm[:,optimization_weights_for_classes!=0].sum(axis=1)==0)[0]\n # for _scene_id in scenes_with_no_classes_but_removed:\n # print(f\"scene_id={_scene_id}: {labels[_scene_id]['semantic'].unique()}\")\n return best_testset"
] | [
"0.73414516",
"0.68224937",
"0.6783226",
"0.61126554",
"0.5833041",
"0.5807217",
"0.58048767",
"0.5798329",
"0.5755408",
"0.5639198",
"0.56274307",
"0.5619286",
"0.56011754",
"0.5576871",
"0.5574115",
"0.5558189",
"0.55560374",
"0.5554359",
"0.55406946",
"0.55294955",
"0.55260533",
"0.55128485",
"0.5512442",
"0.549895",
"0.5494869",
"0.5487094",
"0.54566133",
"0.54561",
"0.545474",
"0.5448642"
] | 0.7343867 | 0 |
Save prediction results to csv file for visualisation purposes. | def save_prediction(self, meta, y_pred, y, filename):
df = pd.DataFrame(meta)
df['y_pred'] = y_pred
df['y'] = y
print(df)
df.loc[:, 'id'] = df.index
self.df_to_csv(df, filename, store_header=False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_results(predictions, filename):\n with open(filename, 'w') as f:\n f.write(\"id,ACTION\\n\")\n for i, pred in enumerate(predictions):\n f.write(\"%d,%f\\n\" % (i + 1, pred))",
"def save_prediction(self):\n if DataLoader.data is None:\n messagebox.showerror(\"Information\", \"Data file is empty, please load the data first.\")\n return\n if Trainer.y_pred is None:\n messagebox.showerror(\"Information\", \"Preciction has not been made, please train a new model and predict or \"\n \"load a model and predict.\")\n return\n\n path = filedialog.asksaveasfile(mode='w', defaultextension=\".csv\", filetypes=[(\"csv files\", '*.csv'),\n (\"xlsx files\", '*.xlsx'),\n (\"dat files\", '*.dat')])\n\n copy_data = DataLoader.data.copy()\n copy_data['prediction'] = Trainer.y_pred\n copy_data.to_csv(path, index=False)\n\n # Clears memory\n copy_data.drop(copy_data.index, inplace=True)\n del copy_data",
"def save_results(self):\n results = pd.concat([\n pd.DataFrame(self.IDs.cpu().numpy(), columns= ['ID']), \n pd.DataFrame(self.predicted_labels.cpu().numpy(), columns= ['predicted_label']),\n pd.DataFrame(self.correct_predictions.cpu().numpy(), columns= ['correct_prediction']),\n pd.DataFrame(self.epistemic_uncertainty.cpu().numpy(), columns= ['epistemic_uncertainty']), \n pd.DataFrame(self.aleatoric_uncertainty.cpu().numpy(), columns= ['aleatoric_uncertainty']), \n pd.DataFrame(self.total_uncertainty.cpu().numpy(), columns= ['total_uncertainty']), \n ], axis=1)\n\n create_results_directory()\n results.to_csv('results/{}_{}_results.csv'.format(self.__class__.__name__, datetime.datetime.now().replace(microsecond=0).isoformat()), index=False)",
"def write_results(file_path, predictions):\n with open(file_path, \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\",\")\n writer.writerow([\"Id\", \"Bound\"])\n for id, bound in enumerate(predictions):\n writer.writerow([id, bound])",
"def write_predictions(y_pred, filename, yname=None) :\n out = open(filename, 'wb')\n f = csv.writer(out)\n if yname :\n f.writerow([yname])\n f.writerows(zip(y_pred))\n out.close()",
"def write_predictions(y_pred, filename, yname=None) :\n out = open(filename, 'wb')\n f = csv.writer(out)\n if yname :\n f.writerow([yname])\n f.writerows(list(zip(y_pred)))\n out.close()",
"def write_predictions(y_pred, filename, yname=None) :\n out = open(filename, 'wb')\n f = csv.writer(out)\n if yname :\n f.writerow([yname])\n f.writerows(list(zip(y_pred)))\n out.close()",
"def save_performances(self):\r\n nb_datasets = len(self.results)\r\n resu = [[] for k in range(nb_datasets)]\r\n\r\n # fetch results\r\n for k in range(nb_datasets):\r\n best = np.argmax(self.results[k]['mean_test_score'])\r\n resu[k].append(('score', self.results[k]['mean_test_score'][best]))\r\n resu[k] = resu[k] + list(self.results[k]['params'][best].items())\r\n\r\n # write results in csv\r\n for k, resu in enumerate(resu):\r\n with open('results/final_results_{}.csv'.format(k), 'a') as file:\r\n writer = csv.writer(file)\r\n writer.writerow(resu)",
"def save_results(self, path):\n create_folder(path)\n self.get_scores().to_csv(path + r'/scores.csv', index=False)\n self.get_results().to_csv(path + r'/results.csv', index=False)\n self.get_pivot_last_epoch().to_csv(path + r'/pivot_last_epoch.csv', index=True)",
"def write_predictions_to_file(predictor, testDataFname, enc, outputFname, features=None):\n\n testData, _, testDataIds, _ = make_data(testDataFname, features=features, enc=enc)\n\n dt = datetime.now()\n predictions = predictor.predict(testData)\n print 'predicting took', datetime.now() - dt\n\n featureSelectionOutput = np.transpose(np.vstack((testDataIds, predictions.round().astype(int))))\n\n with open(outputFname, 'wb') as outputFile:\n writer = csv.writer(outputFile)\n writer.writerow(['id', 'loss'])\n writer.writerows(featureSelectionOutput)",
"def write_predictions(pred, filename=\"pred.csv\"):\n output_file = open(filename, \"wb\")\n writer = csv.writer(output_file)\n datetimes = get_datetimes(\"test.csv\")\n\n writer.writerow([\"datetime\", \"count\"])\n\n for index, count in enumerate(pred):\n writer.writerow([datetimes[index], int(count)])\n\n output_file.close()",
"def write_results(self, results):\n predictions = open('hmm_results.csv', 'w')\n predictions.write(\"Type,Prediction\")\n for type in results:\n if type == 'O':\n continue\n predictions.write(\"\\n\" + str(type) + \",\")\n for interval in results[type]:\n predictions.write(str(interval) + \" \")\n predictions.close()",
"def store_classes_and_predictions(output_file_path, classes, predictions):\n with open(output_file_path, mode='a', newline='') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n csvwriter.writerow(['true', 'predicted'])\n for i in range(len(classes)):\n csvwriter.writerow([classes.iloc[i], predictions.iloc[i]])",
"def save_predicted_results(predicted_results):\n # Save the model\n with open(\"predicted_results\", \"wb\") as predicted_results_file:\n pickle.dump(predicted_results, predicted_results_file)",
"def exportEvaluation(self, results, url):\r\n # research\r\n profprint()\r\n if not os.path.exists(url):\r\n print \"creating new results file: \",url\r\n open(url, 'w').close()\r\n myfile = open(url, 'a')\r\n\r\n wr = csv.writer(myfile)\r\n r = numpy.array(results)\r\n if len(r.shape) == 1:\r\n wr.writerow(results)\r\n else:\r\n wr.writerows(results)",
"def exportEvaluation(self,results,url):\n profbox()\n if not os.path.exists(url):\n open(url, 'w').close()\n myfile = open(url, 'a')\n\n wr = csv.writer(myfile)\n r = numpy.array(results)\n if len(r.shape) == 1:\n wr.writerow(results)\n else:\n wr.writerows(results)",
"def save_output(pris):\n pris.to_csv('reactors_pris_2016.csv',\n index=False,\n sep=',',\n )",
"def log_results(self, path):\n pd.DataFrame(self.results).to_csv(path)",
"def write_results_to_csv(ids,\n sentiments_actuals,\n sentiments_predictions,\n filename):\n output = pd.DataFrame(data={\n \"id\": ids,\n \"sentiment_actual\": sentiments_actuals,\n \"sentiment_predicted\": sentiments_predictions})\n output.to_csv(filename, index=False, quoting=3)",
"def log_inference(tester, name, description):\r\n\tfor dataset, output in tester.preds.items():\r\n\t\tresults = pandas.DataFrame.from_dict(output)\r\n\t\tpath = os.path.join(\r\n\t\t\tEXPERIMENT_PATH, tester.config[\"name\"] + '-' + dataset)\r\n\t\twith open(path + \".csv\", \"w\") as f:\r\n\t\t\tresults.to_csv(f, sep=\"\\t\", encoding='utf-8', \r\n\t\t\t\tfloat_format='%.3f', index=False)\r\n\r\n\t\twith open(path + \"-predictions.csv\", \"w\") as f:\r\n\t\t\tresults[[\"tag\", \"y_hat\"]].to_csv(\r\n\t\t\t\tf, index=False, float_format='%.3f', header=False)",
"def save_csv(self, filename): # DONE\n self.data.to_csv(filename)",
"def save_prediction(predictions, image_file, path):\n\t\n\tsave_file = convert_file_extension_to_txt(image_file)\n\t\n\twith open(os.path.join(path, save_file), 'w') as f:\n\t\tfor prediction in predictions:\n\t\t\tf.write(str(prediction) + \"\\n\")",
"def save_to_csv(self):\n path = partial(os.path.join, 'datasets')\n save_name = self.name.lower().replace(' ', '_')\n self.df['values'].sum(axis=1).to_csv(path('{0}_values.csv'.format(save_name)))\n self.df['allocations'].to_csv(path('{0}_allocations.csv'.format(save_name)))\n self.df['returns'].to_csv(path('{0}_returns.csv'.format(save_name)))\n self.trades.to_csv(path('{0}_trades.csv'.format(save_name)))",
"def write_predictions(prediction_dic, result_path):\n with open(result_path, 'wb') as outfile:\n outfile.write(bytes('Patient_ID,HPV/p16_status\\n', 'UTF-8'))\n for patient_id, pred in prediction_dic.items():\n outfile.write(bytes(str(patient_id) + ',' + str(pred) + '\\n', 'UTF-8'))",
"def save_submission(results, file_name='submission.csv'):\n submission_path = path.join('..', 'output', file_name)\n results.to_csv(submission_path)",
"def _store_predict_result(self):\n try:\n self._predict_res.to_csv(os.path.join(self._result_path, PredictConstance.PREDICT_FILE),\n index=False)\n return True\n except Exception as err:\n self.managerlogger.logger.error(\"joint_predict_result error: %s\" % err)\n self.errorlogger.logger.error(\"joint_predict_result error:\\n %s\" % traceback.format_exc())\n\n return False",
"def create_csv_submission(ids, y_pred, name):\n with open(name, 'w') as csvfile:\n fieldnames = ['Id', 'Prediction']\n writer = csv.DictWriter(csvfile, delimiter=\",\", fieldnames=fieldnames)\n writer.writeheader()\n for r1, r2 in zip(ids, y_pred):\n writer.writerow({'Id':r1,'Prediction':round(r2)})",
"def save_results(self, results, file_name, file_type):\n if file_type == 'csv':\n csv_filename = '{}.csv'.format(file_name)\n\n with open(csv_filename, 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerows(results)",
"def write_to_file(self, results):\n with open(self.outputFilename, \"w\") as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=',') \n title_row = ('asset_id', 'component_id', 'latitude', 'longitude', 'installation_date', 'commissioning_date', 'street_name', 'cabinet_id', 'nominal_wattage', 'current_time', 'current_LogValue', 'current_IsLogValueOff') \n csvWriter.writerow(title_row)\n for record in results:\n csvWriter.writerow(record)",
"def save(self, data, outpath):\n data.to_csv(outpath)"
] | [
"0.7703693",
"0.7628422",
"0.7522447",
"0.74385107",
"0.74357957",
"0.7419766",
"0.7419766",
"0.736686",
"0.73543096",
"0.732228",
"0.7308786",
"0.7291229",
"0.71699023",
"0.71563935",
"0.711707",
"0.7090318",
"0.7074257",
"0.7062179",
"0.70042425",
"0.6984243",
"0.6929999",
"0.68592006",
"0.6846279",
"0.68278944",
"0.6776817",
"0.67703694",
"0.6684959",
"0.6683029",
"0.6663423",
"0.6661753"
] | 0.76600194 | 1 |
Upload all files from folder to bucket | def _upload_dir_to_bucket(self, path, ext_path):
for file in os.listdir(path):
self._upload_to_bucket(path+'/'+file, ext_path+'/'+file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def upload_files_s3(files, bucket):\n \n print('************************************')\n print('Uploading files to s3 bucket...')\n print('************************************')\n \n for i in range(len(files)):\n upload_file_s3(files[i], bucket)\n \n print('************************************')\n print('Upload complete')\n print('************************************')",
"def upload_files(self, folder):\n\n # Load all blobs in the session to make sure only upload needed files\n blobs = GoogleStorage().list_blobs_with_prefix(self.bucket_name, folder)\n blobs = [blob.name for blob in blobs]\n\n project_home = os.environ['PROJ_HOME']\n root_folder = os.path.join(project_home, folder)\n\n for file in os.listdir(root_folder):\n file_name = \"{folder}/{file}\".format(folder=folder, file=file)\n if file_name not in blobs:\n source_file_name = os.path.join(project_home, file_name)\n GoogleStorage().upload_blob(\n self.bucket_name, source_file_name, file_name)\n print('Uploaded file {}'.format(source_file_name))",
"def upload_images_to_s3(directory):\n for f in directory.iterdir():\n if str(f).endswith(('.png', '.jpg', '.jpeg')):\n full_file_path = str(f.parent) + \"/\" + str(f.name)\n file_name = str(f.name)\n s3_client.upload_file(full_file_path, BASE_BUCKET, file_name)\n print(f,\"put\")",
"def upload_all_to_s3(static_root):\n conn = _get_connection()\n\n files = _get_file_list(static_root)\n _build_local_metadata_file(files, home=static_root)\n\n local_metadata = _fetch_current_local_metadata()\n remote_metadata = _fetch_current_remote_metadata(conn)\n files_to_upload = _filter_file_list(files, local_metadata, remote_metadata)\n\n start_time = time.time()\n print 'Upload start: Landing in BUCKET_NAME: %s' % BUCKET_NAME\n\n for f in files_to_upload:\n #Upload to Bucket\n upload_file(conn, os.path.join(static_root, f), f)\n\n #Upload Gzip css/js version if gzip is enabled\n can_be_gzipped = _file_can_be_compressed(os.path.join(static_root, f))\n if GZIP_ENABLED and can_be_gzipped:\n upload_file(conn, os.path.join(static_root, f), f, gzip=True)\n\n #Extra files\n if EXTRA_FILES:\n print 'Now, uploading extra files outside public/static'\n for filename_local, filename_s3 in EXTRA_FILES.items():\n upload_file(conn, filename_local, filename_s3)\n\n end_time = time.time()\n print 'Upload finished: \\\n Time elapsed: %s s' % round(end_time - start_time, 3)\n\n # refresh metadata file on the server\n print 'Uploading local metadata file'\n upload_file(conn, LOCAL_METADATA_FILE, REMOTE_METADATA_FILE)\n print 'Uploading process DONE'",
"def upload(jsonfiles):\n # clear S3 Bucket\n bucket = S3Bucket()\n bucket.clear()\n for jsonfile in jsonfiles:\n filename = os.path.basename(jsonfile)\n key = build_key(filename)\n logging.info(\"%s %s\", filename, key)\n # store json in S3 object\n bucket.store(key, jsonfile)",
"def upload_json_to_s3(directory):\n for f in directory.iterdir():\n if str(f).endswith('.json'):\n full_file_path = str(f.parent) + \"/\" + str(f.name)\n file_name = str(f.name)\n s3_client.upload_file(full_file_path, BASE_BUCKET, file_name)",
"def upload_files(self, logger):\n logger.info(\"Uploading all files to GCS . . .\")\n\n source_file_name = self.path + '/data/'\n files = os.listdir(source_file_name)\n\n # Setting credentials using JSON file\n try:\n storage_client = storage.Client()\n # Getting bucket object\n bucket = storage_client.bucket(\"my-bigdata-projects\")\n if 'bt_challenge_boa.csv' in files:\n # Name of the object to be stored in the bucket\n object_name_in_gcs_bucket = bucket.blob(\n \"data/csv/bt_challenge_boa.csv\"\n )\n object_name_in_gcs_bucket.upload_from_filename(\n source_file_name + 'bt_challenge_boa.csv'\n )\n except Exception as error:\n logger.info(\"Something went wrong!\")\n logger.error(\"Error: {}\".format(error))\n\n logger.info(\"Files have been uploaded . . .\")",
"def upload_files(self, files):\n\n for f in files:\n self.scp.put(f, recursive=True)",
"def push_backup(args: Arguments) -> None:\n\n files = get_files_from_previous_backup(args.site)\n bucket = get_bucket(args)\n\n for path in files:\n upload_file(\n path=path,\n site_name=args.site,\n bucket=bucket,\n bucket_directory=args.bucket_directory,\n )\n\n print(\"Done!\")",
"def s3_sync(s3_bucket, s3_prefix, sync_path=\".\"):\n # Get bucket\n s3_resource = boto3.resource(\"s3\")\n bucket = s3_resource.Bucket(s3_bucket)\n\n # Walk paths and subdirectories, uploading files\n for path, subdirs, files in os.walk(sync_path):\n # Get relative path prefix\n relpath = os.path.relpath(path, sync_path)\n if not relpath.startswith('.'):\n prefix = os.path.join(s3_prefix, relpath)\n else:\n prefix = s3_prefix\n\n for file in files:\n file_key = os.path.join(prefix, file)\n bucket.upload_file(os.path.join(path, file), file_key)",
"def upload(filename, bucket):\n k = Key(bucket)\n k.key = uuid.uuid1().hex\n print \"Uploading batch to {}, key: {}...\".format(bucket.name, k.key)\n k.set_contents_from_filename(filename, reduced_redundancy=True)\n print \" Done.\"\n \n\n\n bucket = openBucket(dest)",
"def uploadFiles(self, filenames):\n bucket = self._S3_USER_UPLOAD_BUCKET\n prefix = self._S3_USER_UPLOAD_DIR\n uuid_dir = uuid.uuid4()\n # TODO(aimee): This should upload to a user-namespaced directory\n for filename in filenames:\n basename = os.path.basename(filename)\n response = self._upload_s3(filename, bucket, f\"{prefix}/{uuid_dir}/{basename}\")\n return f\"Upload file subdirectory: {uuid_dir} (keep a record of this if you want to share these files with other users)\"",
"def upload_dataset(bucket_name, directory, num_threads=20):\n s3 = boto3.resource('s3')\n\n def upload_file(queue):\n while True:\n obj = queue.get()\n if obj is None:\n break\n abspath, s3_path = obj\n s3.meta.client.upload_file(abspath, bucket_name, s3_path)\n queue.task_done()\n\n # create a queue for objects that need to be uploaded\n # and spawn threads to upload them concurrently\n upload_queue = Queue(maxsize=0)\n workers = []\n for worker in range(num_threads):\n worker = Thread(target=upload_file, args=(upload_queue, ))\n worker.setDaemon(True)\n worker.start()\n workers.append(worker)\n\n for root, _, files in os.walk(directory):\n for file in files:\n abspath = os.path.join(root, file)\n relpath = os.path.relpath(abspath, directory)\n s3_path = os.path.basename(directory) + \"/\" + relpath\n upload_queue.put((abspath, s3_path))\n\n # wait for the queue to be empty, then join all threads\n upload_queue.join()\n for _ in range(num_threads):\n upload_queue.put(None)\n for worker in workers:\n worker.join()",
"def upload_files_to_S3(sourceDir, bucket_name, destDir, aws_access_key_id=None, aws_secret_access_key=None):\n\n # set up the connection to the AWS Bucket.\n if aws_access_key_id == None or aws_secret_access_key == None:\n client = boto3.client(service_name='s3', aws_access_key_id=None, aws_secret_access_key=None)\n else:\n client = boto3.client(service_name='s3', aws_access_key_id=aws_access_key_id,\n aws_secret_access_key=aws_secret_access_key)\n transfer = boto3.s3.transfer.S3Transfer(client)\n\n # Get a list of all the files that have already been uploaded to S3\n MyS3Objects = [s.key for s in boto3.resource('s3').Bucket(bucket_name).objects.filter(Prefix=destDir)]\n\n\n\n\n uploadFileNames = files_to_upload(sourceDir)\n\n #print(sourceDir)\n #print(uploadFileNames)\n\n\n UploadCounter = 0\n\n for filename in uploadFileNames:\n sourcepath = filename[0]\n destpath = destDir + '/' + filename[1]\n\n # If the file is already on S3, don't upload it again\n if destpath in MyS3Objects:\n print(destpath, \" is already on S3\")\n continue\n\n UploadCounter += 1\n if UploadCounter % 100 == 0: print(\"Files Uploaded:\", UploadCounter)\n\n # print ('Uploading %s to Amazon S3 bucket %s' % (sourcepath, bucket_name))\n\n transfer.upload_file(sourcepath, bucket_name, destpath)\n\n print(\"All the files have been uploaded!\")",
"def upload_bucket_files(\n self,\n organization_id: str,\n bucket_id: str,\n target_dir: str,\n lifetime: str=None) -> dict:\n messages = list()\n response = {\"error_messages\": messages}\n\n target_dir_path = '{}/'.format(str(Path(target_dir).absolute()))\n for root, dirs, files in os.walk(target_dir):\n for dirname in dirs[:]:\n if dirname.startswith('.'):\n dirs.remove(dirname)\n\n for filename in files:\n if filename.startswith('.'):\n continue\n filepath = Path(root, filename)\n file_location = str(\n filepath.absolute()).replace(\n target_dir_path, '', 1)\n content_type, _ = mimetypes.guess_type(str(filepath))\n metadata = {\n \"x-abeja-meta-filename\": file_location\n }\n with open(str(filepath), 'rb') as f:\n try:\n self.upload_bucket_file(\n organization_id,\n bucket_id,\n f,\n file_location,\n content_type,\n metadata=metadata,\n lifetime=lifetime)\n except (BadRequest, Unauthorized, NotFound, Forbidden, InternalServerError) as e:\n messages.append({\n \"message\": 'Upload failed file({}), {}: {}'.format(\n filepath, e.__class__.__name__, str(e))\n })\n response[\"status\"] = False if messages else True\n return response",
"def upload(all_files, session):\n remote_directory = unique_path('cli-import')\n log.info('uploading files to %s' % remote_directory)\n\n for filename in all_files:\n callback = _progress_callback\n log.info(\"Uploading %s\" % filename)\n session.uploadWrapper(filename, remote_directory, callback=callback)\n if callback:\n print('')\n return remote_directory",
"def cloud_files(self, cloud_bucket_name: str, credentials: Mapping, files_to_upload: List, private: bool = True) -> Iterator[str]:",
"def upload_bam(bam_s3_path, local_folder_path):\n\n upload_folder(bam_s3_path, local_folder_path)",
"def upload_folder(self, path, folder):\n for root, _, files in os.walk(folder):\n for file in files:\n filename = os.path.join(root, file)\n with open(filename, \"rb\") as f:\n rel_posix_path = \"/\".join(os.path.relpath(filename, folder).split(os.sep))\n self.put_file(\"{}/{}\".format(path, rel_posix_path), f)",
"def upload_to_s3(site, bucket, directory=None, files=None, prefix=None):\n if bucket is None:\n print red('Error: Bucket must be specified.')\n return\n if directory is None and files is None:\n print red('Error: Directory and/or files must be specified.')\n return\n # Setup boto\n import boto\n from boto.s3.bucket import Bucket\n from boto.s3.key import Key\n import mimetypes\n import fnmatch\n\n setup_aws_access_key(site)\n\n # Connect to S3\n c = boto.connect_s3()\n b = Bucket(c, bucket)\n\n # Fix the prefix\n # prefix itself shouldn't have a / prefix itself but should end with /\n if prefix:\n prefix = prefix.lstrip('/')\n if prefix and not prefix.endswith('/'):\n prefix = prefix + '/'\n\n def __upload(key, filename):\n k = Key(b)\n k.key = key\n headers = {}\n content_type = mimetypes.guess_type(filename)[0]\n if site.has_key('webapp') and site['webapp'].get('cache_control'):\n for pattern in site['webapp']['cache_control']:\n if fnmatch.fnmatch(filename, pattern):\n headers['Cache-Control'] = site['webapp']['cache_control'][pattern]\n break\n if site.has_key('webapp') and site['webapp'].get('gzip_types') and content_type in site['webapp']['gzip_types']:\n from gzip import GzipFile\n from StringIO import StringIO\n # Need to specify content_type when uploading from a string!\n headers['Content-Type'] = content_type\n headers['Content-Encoding'] = 'gzip'\n s = StringIO()\n g = GzipFile(fileobj=s, mode='wb')\n with open(filename, 'rb') as f:\n g.write(f.read())\n g.close()\n k.set_contents_from_string(s.getvalue(), headers)\n else:\n k.set_contents_from_filename(filename, headers)\n\n if files:\n # Upload individual files\n if directory:\n keys = [filename.lstrip('/') for filename in files]\n files = [os.path.join(directory, filename) for filename in files]\n else:\n keys = [os.path.split(filename)[1] for filename in files]\n for i, filename in enumerate(files):\n print 'Uploading %s' % keys[i]\n if prefix:\n key = prefix + keys[i]\n else:\n key = keys[i]\n __upload(key, filename)\n elif directory:\n # Upload an entire directory\n def __upload_dir(arg, dirname, names):\n # arg is the starting directory\n for name in names:\n filename = os.path.join(dirname, name)\n if not os.path.isdir(filename) and not os.path.islink(filename) and not name.startswith('.'):\n key = filename[len(arg):]\n if key.startswith('/'):\n key = key[1:]\n if prefix:\n key = prefix + key\n print 'Uploading %s' % key\n __upload(key, filename)\n os.path.walk(directory, __upload_dir, directory)",
"def upload(filename, bucket):\n print(\"Uploading {} to S3\".format(filename.lower().replace('_', '-')))\n url = \"https://s3.ca-central-1.amazonaws.com/{}/{}\".format(bucket,\n filename.lower().replace('_', '-'))\n with open('{}/{}'.format(WORK_DIR, filename), 'rb') as data:\n requests.put(url, data=data)",
"def upload_handler(self):\n \n for root, dirs, files in os.walk(self.path):\n\n current_dir = os.path.basename(root)\n \n if root == self.path:\n root_id = self.gapy.create_file(current_dir, path=root, isFolder=True)\n else:\n parents_id = self.filesystem[os.path.dirname(root)][\"id\"]\n root_id = self.gapy.create_file(current_dir, path=root, isFolder=True, parents_id=[parents_id])\n print(f\"\\033[94m The directory {current_dir} was uploaded \\033[0m\")\n\n self.filesystem[root.rstrip(\"/\")] = { \"id\": root_id, \"files\": [] }\n \n if files:\n for f in files:\n if f not in IGNORE_FILES and os.path.getsize(root+\"/\"+f) > 0:\n file_id = self.gapy.create_file(f, path=root, parents_id=[root_id])\n self.filesystem[root][\"files\"].append({ \"name\": f, \"id\": file_id})\n print(f\"\\033[94m The file {f} was uploaded \\033[0m\")\n \n self.update_fs()",
"def _add_files(self, category, files, session, bucket=None):\n\n with session[category].make_commit('master') as commit:\n for filename, content in files.items():\n if bucket:\n commit.put_file_url(\n filename,\n 's3://%s/%s' % (bucket, content)\n )\n else:\n commit.put_file_bytes(\n filename,\n content\n )",
"def upload_to_s3(bucket_name, sourceDir):\n try:\n client = boto3.client('s3')\n resource = boto3.resource('s3')\n except ClientError as err:\n print(\"Failed to create boto3 client.\\n\" + str(err))\n return False\n try:\n # clean the bucket\n bucket = resource.Bucket(bucket_name)\n for key in bucket.objects.all():\n key.delete()\n\n # upload the new files\n uploadFileNames = getFiles(sourceDir)\n print(\"Found \" + len(uploadFileNames).__str__() + ' files')\n\n for filename in uploadFileNames:\n destName = os.path.join(*(filename.split('/')[1:]))\n print(\"Uploading file \" + filename + ' to ' + destName)\n resource.Object(bucket_name, destName).put(Body=open(filename, 'rb'),\n ContentType=get_contenttype_from_filename(filename))\n\n except ClientError as err:\n print(\"Failed to upload artefact to S3.\\n\" + str(err))\n return False\n except IOError as err:\n print(\"Failed to access artefact in this directory.\\n\" + str(err))\n return False\n\n return True",
"def uploadFilestoS3(self):\n allfilesuploadedcount = 0\n for eachfiledic in self.fileTobeUploaded:\n if eachfiledic[\"uploadedSuccess\"] == 0: #Means this file never got uploaded.\n if os.path.getsize(eachfiledic[\"filepath\"]) < 1000000000: #<1GB\n s3Log.info (\"FileSize < 1GB for :{}, so using single part upload.\".format(eachfiledic[\"filepath\"]) )\n if self.singlePartUpload(eachfiledic) == True:\n eachfiledic[\"uploadedSuccess\"] = 1\n allfilesuploadedcount = allfilesuploadedcount + 1\n else:\n s3Log.info (\"FileSize > 1GB for :{}, so using Multi Part upload. \\n\".format(eachfiledic[\"filepath\"]) )\n if self.multiPartUpload(eachfiledic) == True:\n eachfiledic[\"uploadedSuccess\"] = 1\n allfilesuploadedcount = allfilesuploadedcount + 1\n\n\n elif eachfiledic[\"uploadedSuccess\"] == 1: #Means it got uploaded in the last run.\n allfilesuploadedcount = allfilesuploadedcount + 1\n\n self.saveStateOfThisRun()\n if len(self.fileTobeUploaded) == allfilesuploadedcount: #Means we uploaded all files in the queue\n return True\n else:\n return False",
"def write_files_to_pod_and_upload(\n self,\n mcg_obj,\n awscli_pod,\n bucket_to_write,\n original_folder,\n amount=1,\n s3_creds=None,\n ):\n full_object_path = f\"s3://{bucket_to_write}\"\n object_list = []\n\n for i in range(amount):\n file_name = f\"testfile{i}.txt\"\n object_list.append(file_name)\n awscli_pod.exec_cmd_on_pod(\n f\"dd if=/dev/urandom of={original_folder}/{file_name} bs=1M count=1 status=none\"\n )\n if s3_creds:\n # Write data directly to target bucket from original dir\n sync_object_directory(\n awscli_pod,\n original_folder,\n full_object_path,\n signed_request_creds=s3_creds,\n )\n else:\n # Write data directly to NS bucket from original dir\n sync_object_directory(\n awscli_pod, original_folder, full_object_path, mcg_obj\n )\n return object_list",
"def upload_bucket_samples():\n if not Config.region:\n logger.error(\"You must specify a region in order to scan a bucket target\")\n raise SystemExit(\n \"Target region not specified. Use -r or --region to specify the target region.\"\n )\n # Connect to S3 in our target region\n s_3 = boto3.resource(\"s3\", region_name=Config.region)\n # Connect to our target bucket\n bucket = s_3.Bucket(Config.target_dir)\n # Retrieve a list of all objects in the bucket\n summaries = bucket.objects.all()\n # Inform the user as this may take a minute\n logger.info(\"Assembling volume from target bucket (%s) for submission\", Config.target_dir)\n # Loop through our list of files, downloading each to memory then upload them to the Sandbox\n for item in summaries:\n # Grab the file name from the path\n filename = os.path.basename(item.key)\n # Teensy bit of witch-doctor magic to download the file\n # straight into the payload used for our upload to the Sandbox\n response = Samples.upload_sample(file_name=filename,\n file_data=io.BytesIO(\n bucket.Object(key=item.key).get()[\"Body\"].read()\n )\n )\n # Retrieve our uploaded file SHA256 identifier\n sha = response[\"body\"][\"resources\"][0][\"sha256\"]\n # Add this SHA256 to the upload payload element\n Analyzer.uploaded.append(sha)\n # Track the upload so we recognize the file when we're done\n Analyzer.files.append([filename, item.key, sha])\n # Inform the user of our progress\n logger.debug(\"Uploaded %s to %s\", filename, sha)",
"def files_to_upload(source_directory: str) -> list:\n upload_file_names = []\n\n print(source_directory)\n for dirName, subdirList, fileList in os.walk(source_directory):\n for filename in fileList:\n file_path = os.path.join(dirName, filename)\n s3key = os.path.join(os.path.basename(dirName) + '/' + filename)\n upload_file_names.append((file_path, s3key))\n return upload_file_names",
"def upload_child_objects(self, local_dir_path, s3_dir_path, recursive=False, fn_pattern=None):\n child_objects = [os.path.join(local_dir_path, f) for f in os.listdir(local_dir_path)]\n child_files = [f for f in child_objects if os.path.isfile(f)]\n child_dirs = [f for f in child_objects if os.path.isdir(f)]\n\n for child_file in child_files:\n if not fn_pattern or fnmatch.fnmatch(child_file, fn_pattern):\n s3_object_path = os.path.join(s3_dir_path, os.path.basename(child_file))\n logging.debug(\"Uploading \\\"{}\\\" to \\\"{}\\\"\".format(child_file, s3_object_path))\n self.upload_object(child_file, s3_object_path)\n\n if recursive:\n for child_dir_local in child_dirs:\n child_dir_s3 = os.path.join(s3_dir_path, os.path.basename(child_dir_local))\n self.upload_child_objects(child_dir_local, child_dir_s3, recursive, fn_pattern)",
"def _upload_to_gcs(self, files_to_upload):\n # Compose mime_type using file format passed as param\n mime_type = 'application/' + self.export_format['file_format']\n hook = GoogleCloudStorageHook(\n google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,\n delegate_to=self.delegate_to)\n for object, tmp_file_handle in files_to_upload.items():\n hook.upload(self.bucket, object, tmp_file_handle.name, mime_type)"
] | [
"0.7796129",
"0.76484376",
"0.76440495",
"0.7522285",
"0.73205024",
"0.7259498",
"0.71569556",
"0.69658166",
"0.68993825",
"0.6886849",
"0.6884797",
"0.67420983",
"0.67358345",
"0.6709969",
"0.6703498",
"0.66821843",
"0.66715854",
"0.66610366",
"0.6659203",
"0.6640648",
"0.6636185",
"0.6634004",
"0.6632965",
"0.66194236",
"0.65939945",
"0.65412617",
"0.6528638",
"0.6506675",
"0.64479274",
"0.64442647"
] | 0.7838725 | 0 |
Upload file to bucket if bucket is set and ext_filename is not None | def _upload_to_bucket(self, filename, ext_filename):
if ext_filename is None:
return
if self.s3:
self.bucket.upload_file(filename, ext_filename)
logging.info('Uploaded {} to S3 with name {}'.format(filename, ext_filename))
if self.gs:
try:
client = storage.Client()
bucket = client.get_bucket(self.bucket_name)
blob = storage.Blob(ext_filename, bucket)
blob.upload_from_filename(filename)
logging.info('Uploaded to {}'.format(ext_filename))
except:
logging.warning('Uploading file to bucket failed') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def upload_file(\n self, bucket_id: uplink.Path, filename: uplink.Path, file: uplink.Body\n ):\n pass",
"def upload_file(Filename=None, Bucket=None, Key=None, ExtraArgs=None, Callback=None, Config=None):\n pass",
"def upload_file(file, bucket_path, bucket=S3_BUCKET):\n # Bucket path should be somedir/name_of_file.ext\n try:\n if isinstance(file, str):\n resource.upload_file(file, bucket, bucket_path)\n else:\n resource.upload_fileobj(file, bucket, bucket_path)\n except:\n raise ChildProcessError('Something broke, Cap\\'n')",
"def upload(filename, bucket):\n print(\"Uploading {} to S3\".format(filename.lower().replace('_', '-')))\n url = \"https://s3.ca-central-1.amazonaws.com/{}/{}\".format(bucket,\n filename.lower().replace('_', '-'))\n with open('{}/{}'.format(WORK_DIR, filename), 'rb') as data:\n requests.put(url, data=data)",
"def _cloud_storage_upload(local_file, bucket, filename_on_bucket):\n client = storage.Client()\n\n bucket = client.get_bucket(bucket)\n blob = bucket.blob(filename_on_bucket)\n blob.upload_from_filename(local_file)\n print('uploaded ', bucket, filename_on_bucket)",
"def upload(self, file_path, bucket_name, file_name):\n\n self.client.upload_file(file_path, bucket_name, file_name)",
"def _upload_file(file_name, bucket, object_name):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n try:\n s3.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True",
"def upload_to_bucket(bucket_name, path_to_source_file, upload_file_name):\r\n\r\n try:\r\n # initialize client & get blob\r\n _, _, blob = create_client(bucket_name, upload_file_name)\r\n\r\n # set the path to source file\r\n blob.upload_from_filename(path_to_source_file)\r\n \r\n except Exception as err:\r\n raise err\r\n sys.exit(1)\r\n \r\n else:\r\n print(f\"upload file '{path_to_source_file}' succeed\")\r\n\r\n return None",
"def upload_file(bucket_name, filename, file):\n client = get_client()\n bucket = client.get_bucket(bucket_name)\n blob = bucket.blob(filename)\n blob.upload_from_file(file)",
"def upload_file(self, file_name, bucket, destination_name):\n try:\n not self.client.upload_file(file_name, bucket, destination_name)\n except Exception as ex:\n raise ex",
"def _upload_s3(self, filename, bucket, objectKey):\n return s3_client.upload_file(filename, bucket, objectKey)",
"def put_upload(self):\n # print \"starting upload...\", self.current_upload['filepath']\n self.touch()\n self.log(\"STARTING_UPLOAD\", level=INFO)\n try:\n Backend.put_file(self.fileobj, self.current_upload[\"gcs_url\"])\n except exceptions.FilePutError as err:\n self.handle_put_error(err, self.fileobj)\n raise",
"def upload_file_s3(file_name, bucket):\n\n # If S3 object_name was not specified, use file_name \n try:\n response = s3_client.upload_file(file_name,\n bucket, \n file_name.replace('../',''))\n print(\"Uploaded \" + file_name)\n except ClientError as e:\n print(\"Failed to upload \" + file_name)\n logging.error(e)\n return False\n return True",
"def upload_file(file_name, bucket):\r\n object_name = file_name\r\n s3_client = boto3.client('s3')\r\n response = s3_client.upload_file(file_name, bucket, object_name)\r\n\r\n return response",
"def upload_s3_file(key, bucket, filename):\n s3_client = boto3.client('s3')\n s3_client.upload_file(filename, bucket, key)\n return True",
"def upload_fileobj(self, bucket_name, file_obj, key):\n self._client.upload_fileobj(Fileobj=file_obj, Bucket=bucket_name, Key=key)",
"def upload_to_s3(file_name, bucket, key): \n s3 = boto3.resource('s3') \n try:\n s3.meta.client.upload_file(file_name, bucket, key)\n print(\"s3 upload success -- uploaded \" + file_name + \" to the bucket: \" + bucket)\n except ClientError as e:\n logging.error(e)\n return False\n print(\"s3 upload error occurs\", e)\n return True",
"def upload_file(self, keyUrl='', body='', ContentType='', bucket=None):\n \n if bucket is None:\n bucket = self.AWS_S3_BUCKET\n \n #Verificamos si existe body\n if body is None:\n body=''\n \n try:\n self.get_s3_client().put_object(Bucket=bucket, Key=keyUrl, Body=body, ACL='public-read', ContentType=ContentType)\n return True\n \n except ClientError as e:\n return False",
"def upload_file(file_name, bucket, object_name='patients.log'):\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n # Upload the file\n s3_client = boto3.client('s3')\n try:\n response = s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True",
"def upload_file(self, bucket_name, file_path, key):\n self._client.upload_file(Filename=file_path, Bucket=bucket_name, Key=key)",
"def upload_file(file_name, bucket, object_name=None):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n try:\n response = s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True",
"def upload_blob(self, bucket_name, file_name, contents):\n\n bucket = self.storage_client.bucket(bucket_name)\n blob = bucket.blob(file_name)\n blob.upload_from_string(contents)\n print(\n \"File {} uploaded to bucket {} as file {}.\".format(\n file_name, bucket_name, file_name\n )\n )",
"def upload(\n bucket: str, key: str, filename: str, session: Optional[boto3.Session] = None\n) -> None:\n s3_client = _get_client(session)\n LOGGER.info(\"uploading %s to s3://%s/%s...\", filename, bucket, key)\n s3_client.upload_file(Filename=filename, Bucket=bucket, Key=key)",
"def _upload(auth_http, project_id, bucket_name, file_path, object_name, acl):\n with open(file_path, 'rb') as f:\n data = f.read()\n content_type, content_encoding = mimetypes.guess_type(file_path)\n\n headers = {\n 'x-goog-project-id': project_id,\n 'x-goog-api-version': API_VERSION,\n 'x-goog-acl': acl,\n 'Content-Length': '%d' % len(data)\n }\n if content_type: headers['Content-Type'] = content_type\n if content_type: headers['Content-Encoding'] = content_encoding\n\n try:\n response, content = auth_http.request(\n 'http://%s.storage.googleapis.com/%s' % (bucket_name, object_name),\n method='PUT',\n headers=headers,\n body=data)\n except httplib2.ServerNotFoundError, se:\n raise Error(404, 'Server not found.')\n\n if response.status >= 300:\n raise Error(response.status, response.reason)\n\n return content",
"def upload(filename, bucket):\n k = Key(bucket)\n k.key = uuid.uuid1().hex\n print \"Uploading batch to {}, key: {}...\".format(bucket.name, k.key)\n k.set_contents_from_filename(filename, reduced_redundancy=True)\n print \" Done.\"\n \n\n\n bucket = openBucket(dest)",
"def upload_file(file_name: str, bucket: str, object_name: str =None) -> None:\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n s3_client = boto3.client(\"s3\")\n try:\n s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)",
"def test_put_file(self):\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n uploads = self.upload_path\n src = os.path.join(uploads, 'demo-test.tar.gz')\n id = utils.generate_id('demo-test.tar.gz')\n backend.put(src, id)\n path = '/'.join(backend.id_to_path(id)) + '/demo-test.tar.gz'\n self.assertTrue(backend.exists(path))",
"def upload_file(file_name, bucket, object_name=None):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n s3_client = boto3.client('s3', aws_access_key_id='', aws_secret_access_key='')\n try:\n response = s3_client.upload_file(file_name, bucket, object_name, ExtraArgs={'ACL':'public-read'})\n except ClientError as e:\n logging.error(e)\n return False\n return True",
"def upload_file(file_name, bucket, object_name=None):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n s3_client = boto3.client('s3')\n try:\n response = s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True",
"def upload_file(file_name, bucket, object_name=None):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n s3_client = boto3.client('s3')\n try:\n response = s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True"
] | [
"0.7600532",
"0.74214303",
"0.7285555",
"0.7134322",
"0.71040857",
"0.71002007",
"0.7091589",
"0.7090663",
"0.7078668",
"0.707424",
"0.70586765",
"0.69802374",
"0.69690824",
"0.6930814",
"0.69067895",
"0.687239",
"0.6847536",
"0.68146276",
"0.679449",
"0.6766457",
"0.67521816",
"0.67337114",
"0.6696551",
"0.6673576",
"0.66535825",
"0.6652695",
"0.66511756",
"0.66414",
"0.66356856",
"0.66356856"
] | 0.85554653 | 0 |
Download all files from bucket and save them to 'local_path' | def _download_dir_from_bucket(self, ext_path, local_path, force=False):
if os.path.exists(local_path) and not force:
logging.info('Path {} already exists. Not overwriting...'.format(local_path))
return
if os.path.exists(local_path) and force:
logging.info('Path {} already exists. Overwriting...'.format(local_path))
if self.s3:
for object in self.bucket.objects.filter(Prefix = remoteDirectoryName):
local_name = object.key.replace(ext_path, local_path)
self._download_from_bucket(object.key, local_name)
if self.gs:
storage_client = storage.Client()
bucket = storage_client.get_bucket(self.bucket_name)
blobs = bucket.list_blobs(prefix=ext_path)
for blob in blobs:
local_name = blob.name.replace(ext_path, local_path)
self._download_from_bucket(blob.name, local_name, force) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read(self, local_path): # noqa: D402\n data_location = self.download_url\n data_location = rewrite_s3_links_locally(data_location)\n response = requests.get(data_location)\n write_file_locally(response.content, local_path)",
"def download_file(s3_path, local_path):\n s3.meta.client.download_file(bucket_name, s3_path, local_path)",
"def download_output_files(self):\n bucket_list = self.bucket.list(\"output/part\")\n for bucket_entry in bucket_list:\n key_string = str(bucket_entry.key)\n # check if file exists locally, if not: download it\n if not os.path.exists(key_string):\n bucket_entry.get_contents_to_filename(\"../\" + key_string)\n else:\n print \"output file already exists, please delete\"",
"def download_chain(s3_path, local_path, bucket_name='lwr-inverse-us-east'):\n s3 = boto3.resource(\"s3\")\n lwr_AIES = s3.Bucket(bucket_name)\n try:\n lwr_AIES.download_file(Key=s3_path, Filename=local_path)\n print(\"Download successful\")\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n print(\"The object does not exist.\")\n else:\n raise",
"def _download_from_bucket(self, ext_filename, local_filename, force=False):\n if os.path.exists(local_filename) and not force:\n logging.info('File {} already exists. Not overwriting...'.format(local_filename))\n return\n if os.path.exists(local_filename) and force:\n logging.info('File {} already exists. Overwriting...'.format(local_filename))\n else:\n logging.info('File {} does not exist. Downloading...'.format(local_filename))\n\n Path(os.path.dirname(local_filename)).mkdir(parents=True, exist_ok=True)\n\n if self.s3:\n self.bucket.download_file(ext_filename, local_filename)\n logging.info('Downloaded {} to {}'.format(ext_filename, local_filename))\n if self.gs:\n try:\n client = storage.Client()\n bucket = client.get_bucket(self.bucket_name)\n blob = storage.Blob(ext_filename, bucket)\n blob.download_to_filename(local_filename)\n logging.info('Downloaded {} to {}'.format(ext_filename, local_filename))\n except:\n logging.warning('Downloading failed')\n\n i += 1",
"def download(self):\n if os.path.isfile(self.lpath) and os.path.getsize(self.lpath) > 0:\n return\n print('Downloading %s' % self.path)\n if dry_run:\n return\n ldir = os.path.dirname(self.lpath)\n if not os.path.isdir(ldir):\n os.makedirs(ldir, 0o755)\n self.arts.s3_bucket.download_file(self.path, self.lpath)",
"def _download_s3_folder(s3, bucket_name, s3_store_path, local_dir):\n bucket = s3.Bucket(bucket_name)\n for obj in bucket.objects.filter(Prefix=s3_store_path):\n target = os.path.join(local_dir, os.path.relpath(obj.key, s3_store_path))\n if not os.path.exists(os.path.dirname(target)):\n os.makedirs(os.path.dirname(target))\n if obj.key[-1] == '/':\n continue\n bucket.download_file(obj.key, target)\n logger.info(\"{} Downloaded.\".format(obj.key)) # log progress",
"def downloadLocal(url_list,path):\n print(\"You are downloading {} images\".format(parser_arguments().limit),end=\" \");print(\"of {} class.\".format(parser_arguments().classes))\n print(\"Please, be patient :)\")\n for i in range(len(url_list)):\n filename= url_list[i].split(\"/\")[-1] # name of the picture file\n r = requests.get(url_list[i], stream =True)\n print(filename)\n\n with open(filename,'wb') as f : # create the file locally in binary-write mode\n r = requests.get(url_list[i], stream =True)\n shutil.copyfileobj(r.raw, f) #write our image to the file\n shutil.move(filename,path)\n print('Done!')",
"def fetch_s3_filepaths_to_local(keys, local_save_directory):\n local_paths = []\n for key in keys:\n local_path = '{}{}'.format(local_save_directory, get_s3_filename(key.name))\n\n with open(local_path, 'wb') as f:\n key.get_contents_to_file(f)\n logger.info('%s saved to %s', key.name, local_path)\n local_paths.append(local_path)\n\n return local_paths",
"def download_file(self):\n files = self.s3_client.list_objects_v2(\n Bucket=settings.PRIVATE_DATA_BUCKET_NAME, Prefix=f\"{self.import_type}/\"\n )[\"Contents\"]\n\n latest_file_key = sorted(files, key=lambda f: f[\"LastModified\"])[0][\"Key\"]\n print(latest_file_key)\n file = Path(self.tmp_dir.name) / self.import_type / \"full.csv\"\n file.parent.mkdir(exist_ok=True, parents=True)\n self.file_path = file\n with file.open(\"wb\") as f:\n self.s3_client.download_fileobj(\n settings.PRIVATE_DATA_BUCKET_NAME, latest_file_key, f\n )",
"def download_file(url_path):\n local_filename = url_path.split('/')[-3] + \"-\" + url_path.split('/')[-1]\n local_filename = OUT_DIR + local_filename\n print local_filename\n url = \"https://commoncrawl.s3.amazonaws.com/\" + url_path\n # NOTE the stream=True parameter\n req = requests.get(url, stream=True)\n with open(local_filename, 'wb') as write_f:\n for chunk in req.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n write_f.write(chunk)\n write_f.close()\n return local_filename",
"def get_s3_files(self, path, bucket, profile, files=None, mydir=None):\n\n # Set the path to the directory where files reside\n s3_path = bucket + path\n\n # Create folder on VM for downloaded files\n if not isinstance(mydir, str):\n mydir = path\n if not os.path.exists(mydir):\n os.makedirs(mydir)\n\n # If files is an array of filenames, download them\n if isinstance(files, list):\n print(\"Getting files...\")\n for filename in files:\n s3_filepath = s3_path + str(filename)\n if os.path.exists(mydir + str(filename)):\n print(\"File \" + filename + \" already downloaded in that location.\")\n else:\n print(s3_filepath)\n cmd = [\"aws\", \"s3\", \"--profile\", profile, \"cp\", s3_filepath, mydir]\n try:\n output = subprocess.check_output(\n cmd, stderr=subprocess.STDOUT, shell=True\n ).decode(\"UTF-8\")\n except Exception as e:\n output = e.output.decode(\"UTF-8\")\n print(\"ERROR:\" + output)\n # If files == None, which syncs the s3_path 'directory'\n else:\n print(\"Syncing directory \" + s3_path)\n cmd = [\"aws\", \"s3\", \"--profile\", profile, \"sync\", s3_path, mydir]\n try:\n output = subprocess.check_output(\n cmd, stderr=subprocess.STDOUT, shell=True\n ).decode(\"UTF-8\")\n except Exception as e:\n output = e.output.decode(\"UTF-8\")\n print(\"ERROR:\" + output)\n print(\"Finished\")",
"def sync_files(self, folder):\n blobs = GoogleStorage().list_blobs_with_prefix(self.bucket_name, folder)\n\n # Create the session folder if not existing\n project_home = os.environ['PROJ_HOME']\n root_folder = os.path.join(project_home, folder)\n if not os.path.isdir(root_folder):\n os.makedirs(root_folder)\n\n # Start download files\n for blob in blobs:\n destination_file_name = os.path.join(project_home, blob.name)\n\n # Check if the local file exist before download file\n if not os.path.isfile(destination_file_name):\n\n # Create folder to avoid exception when download\n destination_file_folder = os.path.dirname(destination_file_name)\n if not os.path.isdir(destination_file_folder):\n os.makedirs(destination_file_folder)\n\n blob.download_to_filename(destination_file_name)\n print('Downloaded file {}'.format(destination_file_name))",
"def download_files(self):",
"def download(self, bucket_name, file_name, file_path):\n\n self.client.download_file(bucket_name, file_name, file_path)",
"def download_file(self, bucket_name, key_name, local_file_location):\n try:\n self.logger.info(\"Downloading {}/{} from S3 to {}\".format(bucket_name, key_name, local_file_location))\n self.s3_resource.Bucket(bucket_name).download_file(key_name, local_file_location)\n except Exception as e:\n message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,\n 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}\n self.logger.exception(message)\n raise",
"def sync_submission_files(self):\n blobs = GoogleStorage().list_blobs_with_prefix(self.bucket_name, 'submissions')\n\n for blob in blobs:\n destination_file_name = os.path.join(os.environ['PROJ_HOME'], blob.name)\n\n # Check if the local file exist before download file\n if not os.path.isfile(destination_file_name):\n blob.download_to_filename(destination_file_name)\n print('Downloaded file {destination_file_name}'.format(destination_file_name=destination_file_name))",
"def s3_download(path):\n with s3_read(path):\n # Reading the file will cache the file locally.\n pass",
"def download_object(self, s3_path, local_path):\n # creating local directory if necessary\n local_directory = os.path.dirname(local_path)\n if not os.path.exists(local_directory):\n logging.debug(\"Creating directory \\\"{}\\\" in local filesystem\".format(local_directory))\n os.makedirs(local_directory)\n\n # downloading file from S3\n logging.info(\"Downloading file from S3 \\\"{}\\\" to \\\"{}\\\"\".format(s3_path, local_path))\n bucket_name, key = S3Util.get_bucket_and_key(s3_path)\n self.s3_resource.Bucket(bucket_name).download_file(key, local_path)",
"def _download_file(self, artifact_path, local_path):\n full_path = self.base_artifact_path / artifact_path\n with self.managed_folder.get_file(str(full_path)) as remote_file:\n with open(local_path, \"wb\") as local_file:\n for line in remote_file:\n local_file.write(line)",
"def download_file(url: str, local_dir: str = '.', local_filename: str = '') -> str:\n os.makedirs(f'{local_dir}', exist_ok=True)\n local_filename = local_filename if local_filename else url.split('/')[-1]\n if os.path.exists(f'{local_dir}/{local_filename}'):\n print(\"{0}/{1} already exists. Skipping download.\".format(local_dir, local_filename))\n else:\n print(\"Downloading file from {0} to {1}/{2}.\".format(url, local_dir, local_filename))\n with requests.get(url, stream=True) as r:\n r.raise_for_status()\n with open(f'./{local_dir}/{local_filename}', 'wb') as f:\n for chunk in r.iter_content(chunk_size=128):\n f.write(chunk)\n print(\"Finished saving file from {0} to {1}/{2}.\".format(url, local_dir, local_filename))\n return f'{local_dir}/{local_filename}'",
"def download_bucket(blob_name, path_to_file):\r\n blob = bucket.blob(blob_name)\r\n blob.download_to_filename(path_to_file)",
"def download(url, bucket_id, key_prefix):\n\n baseFile = '_'.join(url.split('/')[-4:]) #os.path.basename(url)\n\n #move the file to a more uniq path\n os.umask(0002)\n temp_path = \"/tmp/\"\n file = os.path.join(temp_path,baseFile)\n bucket = conn.get_bucket(bucket_id)\n key = bucket.get_key(key_prefix + baseFile, validate=False)\n s3_exists = key.exists()\n file_exists = os.path.isfile(file)\n \n if not file_exists and s3_exists:\n sys.stderr.write(\"Downloading %s from S3\\n\"%url)\n key.get_contents_to_filename(file)\n sys.stderr.write(\"Downloaded %s from S3\\n\"%url)\n elif not file_exists and not s3_exists:\n sys.stderr.write(\"Downloading %s from the web\\n\"%url)\n try:\n req = urllib2.urlopen(url)\n total_size = int(req.info().getheader('Content-Length').strip())\n downloaded = 0\n CHUNK = 256 * 10240\n with open(file, 'wb') as fp:\n while True:\n chunk = req.read(CHUNK)\n downloaded += len(chunk)\n #print math.floor( (downloaded / total_size) * 100 )\n if not chunk: break\n fp.write(chunk)\n except urllib2.HTTPError, e:\n sys.stderr.write(\"HTTP Error: %s %s\\n\"%(e.code , url))\n return False\n except urllib2.URLError, e:\n sys.stderr.write(\"URL Error: %s %s\\n\"%(e.reason , url))\n return False\n sys.stderr.write(\"Downloaded %s from the web\\n\"%url)\n\n if not s3_exists:\n sys.stderr.write(\"Uploading %s to S3\\n\"%url)\n key.set_contents_from_filename(file)\n\n sys.stderr.write(\"File ready: %s\\n\"%url)\n return file",
"async def download_files(self, download_path):\n\n async with vt.Client(self.apikey) as client:\n while True:\n file_hash = await self.queue.get()\n file_path = os.path.join(download_path, file_hash)\n with open(file_path, \"wb\") as f:\n await client.download_file_async(file_hash, f)\n self.queue.task_done()",
"def download(self):\n cloud_path = f\"gs://{const.GCS_BUCKET}/{self.GCS_PATH}\"\n # download label file\n label_zip = download_file_from_gcs(\n cloud_path, self.root, self.LABEL_ZIP\n )\n with zipfile.ZipFile(label_zip, \"r\") as zip_dir:\n zip_dir.extractall(self.root)\n\n # download tfexamples for a dataset split\n tfexamples_zip = download_file_from_gcs(\n cloud_path, self.root, self.SPLITS_ZIP.get(self.split)\n )\n with zipfile.ZipFile(tfexamples_zip, \"r\") as zip_dir:\n zip_dir.extractall(self.root)",
"def download_from_s3(s3_path, local_path):\n # Connect to s3 using aws access key\n try:\n s3 = boto3.resource('s3',\n aws_access_key_id=os.environ.get(\"AWS_ACCESS_KEY_ID\"),\n aws_secret_access_key=os.environ.get(\"AWS_SECRET_ACCESS_KEY\"))\n logger.info(\"AWS S3 Connected.\")\n except botocore.exceptions.PartialCredentialsError:\n logger.error(\"AWS Credentials Invalid.\")\n\n bucket_name, s3_store_path = _parse_s3(s3_path)\n _download_s3_folder(s3, bucket_name, s3_store_path, local_path)\n logger.info(\"All Image Downloaded from S3.\")",
"def sync_up(self, bucket, remote_path, local_path):\n # TODO: make sync_down; both can probably use generic sync code\n b = self.conn.get_bucket(bucket)\n remote_ls = b.list(remote_path)\n remote_ls = [f.name for f in remote_ls]\n local_ls = os.listdir(local_path)\n for local_file in local_ls:\n remote_file = remote_path + local_file\n if remote_file not in remote_ls:\n logger.info('Transferring file to S3: %s', remote_file)\n key = b.new_key(remote_file)\n key.set_contents_from_filename(os.path.join(local_path, local_file))",
"def download_finish(self, cloud_file):",
"def download_file(url, local_filename):\n response = requests.get(url, stream=True)\n with open(local_filename, \"wb\") as outfile:\n for chunk in response.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n outfile.write(chunk)",
"def download_file(url, local_path):\n try:\n local_filename = normalizeFilenameToCommonDateFormat(url.split('/')[-1])\n \n destination_dir = local_path #os.path.join(local_path, os.path.splitext(os.path.basename(local_filename))[0])\n \n #if not os.path.exists(destination_dir):\n # os.makedirs(destination_dir)\n \n destination_file = os.path.join(destination_dir, local_filename)\n \n if not os.path.exists(destination_file):\n # NOTE the stream=True parameter \n r = requests.get(url, stream=True)\n with open(destination_file, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024): \n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n #f.flush() commented by recommendation from J.F.Sebastian\n # Sleep so that we aren't rude\n sleep(1)\n else:\n return destination_file + ' already '\n \n return destination_file\n except ValueError as err:\n return \"Skipping %s, not \" % (url.split('/')[-1])"
] | [
"0.71020555",
"0.7054091",
"0.7037894",
"0.69815624",
"0.6956301",
"0.6908696",
"0.68448865",
"0.6791386",
"0.6786215",
"0.67604995",
"0.67517626",
"0.6706798",
"0.66289604",
"0.66087854",
"0.65796745",
"0.6552148",
"0.6546638",
"0.6536262",
"0.650691",
"0.6451316",
"0.63997996",
"0.6386122",
"0.63223314",
"0.62965417",
"0.62854487",
"0.62716734",
"0.6267853",
"0.6258953",
"0.6258752",
"0.62565666"
] | 0.7218672 | 0 |
Download file from bucket and save it to 'local_filename' | def _download_from_bucket(self, ext_filename, local_filename, force=False):
if os.path.exists(local_filename) and not force:
logging.info('File {} already exists. Not overwriting...'.format(local_filename))
return
if os.path.exists(local_filename) and force:
logging.info('File {} already exists. Overwriting...'.format(local_filename))
else:
logging.info('File {} does not exist. Downloading...'.format(local_filename))
Path(os.path.dirname(local_filename)).mkdir(parents=True, exist_ok=True)
if self.s3:
self.bucket.download_file(ext_filename, local_filename)
logging.info('Downloaded {} to {}'.format(ext_filename, local_filename))
if self.gs:
try:
client = storage.Client()
bucket = client.get_bucket(self.bucket_name)
blob = storage.Blob(ext_filename, bucket)
blob.download_to_filename(local_filename)
logging.info('Downloaded {} to {}'.format(ext_filename, local_filename))
except:
logging.warning('Downloading failed')
i += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download_file(s3_path, local_path):\n s3.meta.client.download_file(bucket_name, s3_path, local_path)",
"def download_file(self, bucket_name, key_name, local_file_location):\n try:\n self.logger.info(\"Downloading {}/{} from S3 to {}\".format(bucket_name, key_name, local_file_location))\n self.s3_resource.Bucket(bucket_name).download_file(key_name, local_file_location)\n except Exception as e:\n message = {'FILE': __file__.split('/')[-1], 'CLASS': self.__class__.__name__,\n 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}\n self.logger.exception(message)\n raise",
"def download_file(bucket,file_name):\n with open(file_name, 'wb') as f:\n s3.download_fileobj(bucket, file_name,f)\n print(file_name, \": is downloaded\")",
"def download_file(url_path):\n local_filename = url_path.split('/')[-3] + \"-\" + url_path.split('/')[-1]\n local_filename = OUT_DIR + local_filename\n print local_filename\n url = \"https://commoncrawl.s3.amazonaws.com/\" + url_path\n # NOTE the stream=True parameter\n req = requests.get(url, stream=True)\n with open(local_filename, 'wb') as write_f:\n for chunk in req.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n write_f.write(chunk)\n write_f.close()\n return local_filename",
"def download_bucket(blob_name, path_to_file):\r\n blob = bucket.blob(blob_name)\r\n blob.download_to_filename(path_to_file)",
"def download_file(url: str, local_dir: str = '.', local_filename: str = '') -> str:\n os.makedirs(f'{local_dir}', exist_ok=True)\n local_filename = local_filename if local_filename else url.split('/')[-1]\n if os.path.exists(f'{local_dir}/{local_filename}'):\n print(\"{0}/{1} already exists. Skipping download.\".format(local_dir, local_filename))\n else:\n print(\"Downloading file from {0} to {1}/{2}.\".format(url, local_dir, local_filename))\n with requests.get(url, stream=True) as r:\n r.raise_for_status()\n with open(f'./{local_dir}/{local_filename}', 'wb') as f:\n for chunk in r.iter_content(chunk_size=128):\n f.write(chunk)\n print(\"Finished saving file from {0} to {1}/{2}.\".format(url, local_dir, local_filename))\n return f'{local_dir}/{local_filename}'",
"def download(self, bucket_name, file_name, file_path):\n\n self.client.download_file(bucket_name, file_name, file_path)",
"def download(self, bucket, object, filename=None):\n service = self.get_conn()\n downloaded_file_bytes = service \\\n .objects() \\\n .get_media(bucket=bucket, object=object) \\\n .execute()\n\n # Write the file to local file path, if requested.\n if filename:\n write_argument = 'wb' if isinstance(downloaded_file_bytes, bytes) else 'w'\n with open(filename, write_argument) as file_fd:\n file_fd.write(downloaded_file_bytes)\n\n return downloaded_file_bytes",
"def download_blob(source_blob_name, destination_file_name, bucket_name=\"bts-ml-data\"):\n # bucket_name = \"your-bucket-name\"\n # source_blob_name = \"storage-object-name\"\n # destination_file_name = \"local/path/to/file\"\n\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n\n blob.download_to_filename(destination_file_name)\n\n print(\n \"Blob {} downloaded to {}.\".format(\n source_blob_name, destination_file_name\n )\n )",
"def download_blob(bucket_name, source_blob_name, destination_file_name):\n storage_client = storage.Client()\n try:\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n \n blob.download_to_filename(destination_file_name)\n \n print('Blob {} downloaded to {}.'.format(\n source_blob_name,\n destination_file_name)) \n except:\n print(\"User does not have access to that bucket. Trying public link:\")\n gcs_url = 'https://%(bucket)s.storage.googleapis.com/%(file)s' % {'bucket':bucket_name, 'file':source_blob_name}\n urllib.urlretrieve(gcs_url, destination_file_name)\n print (\"Download complete\")",
"def download_file_from_icos(icos_obj, bucket: str, local_file_name: str, key: str) -> None:\r\n try:\r\n icos_obj.download_file(Bucket=bucket, Key=key, Filename=local_file_name)\r\n except Exception as e:\r\n print(Exception, e)\r\n else:\r\n print('File `{}` downloaded from ICOS and saved locally as `{}`.'.format(key, local_file_name))",
"def download_chain(s3_path, local_path, bucket_name='lwr-inverse-us-east'):\n s3 = boto3.resource(\"s3\")\n lwr_AIES = s3.Bucket(bucket_name)\n try:\n lwr_AIES.download_file(Key=s3_path, Filename=local_path)\n print(\"Download successful\")\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n print(\"The object does not exist.\")\n else:\n raise",
"def download_file(bucket, key, filename):\n validate_bucket_name(bucket)\n validate_key_name(key)\n client = get_client()\n\n client.download_file(bucket, key, filename)",
"def download_file(Bucket=None, Key=None, Filename=None, ExtraArgs=None, Callback=None, Config=None):\n pass",
"def read(self, local_path): # noqa: D402\n data_location = self.download_url\n data_location = rewrite_s3_links_locally(data_location)\n response = requests.get(data_location)\n write_file_locally(response.content, local_path)",
"def download_object(self, s3_path, local_path):\n # creating local directory if necessary\n local_directory = os.path.dirname(local_path)\n if not os.path.exists(local_directory):\n logging.debug(\"Creating directory \\\"{}\\\" in local filesystem\".format(local_directory))\n os.makedirs(local_directory)\n\n # downloading file from S3\n logging.info(\"Downloading file from S3 \\\"{}\\\" to \\\"{}\\\"\".format(s3_path, local_path))\n bucket_name, key = S3Util.get_bucket_and_key(s3_path)\n self.s3_resource.Bucket(bucket_name).download_file(key, local_path)",
"def download_file(self, source_file_name, destination_file_name, **keyword_args):\n blob = self.bucket.blob(source_file_name)\n blob.download_to_filename(destination_file_name, **keyword_args)\n print(f\"Download file {source_file_name} and save as {destination_file_name}\")",
"def download_file(self):\n files = self.s3_client.list_objects_v2(\n Bucket=settings.PRIVATE_DATA_BUCKET_NAME, Prefix=f\"{self.import_type}/\"\n )[\"Contents\"]\n\n latest_file_key = sorted(files, key=lambda f: f[\"LastModified\"])[0][\"Key\"]\n print(latest_file_key)\n file = Path(self.tmp_dir.name) / self.import_type / \"full.csv\"\n file.parent.mkdir(exist_ok=True, parents=True)\n self.file_path = file\n with file.open(\"wb\") as f:\n self.s3_client.download_fileobj(\n settings.PRIVATE_DATA_BUCKET_NAME, latest_file_key, f\n )",
"def download_file(url, local_filename, update=False):\n if os.path.isfile(local_filename):\n if not update:\n return\n else:\n os.remove(local_filename)\n\n r = requests.get(url, stream=True)\n # http://stackoverflow.com/questions/15352668/download-and-decompress-gzipped-file-in-memory\n with open(local_filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)",
"def urlgrab(self, url, filename=None, **kwargs):\n blob_location = \"%s/%s\" % (self.base_path, url)\n self.verbose_logger.info(\"downloading gs://%s/%s to %s\" % (self.bucket.name, blob_location, filename))\n url = url.lstrip('/')\n if not filename:\n filename = url\n\n blob = storage.blob.Blob(name=blob_location,bucket = self.bucket)\n blob.download_to_filename(filename)\n return filename",
"def download_file(url, local_path):\n try:\n local_filename = normalizeFilenameToCommonDateFormat(url.split('/')[-1])\n \n destination_dir = local_path #os.path.join(local_path, os.path.splitext(os.path.basename(local_filename))[0])\n \n #if not os.path.exists(destination_dir):\n # os.makedirs(destination_dir)\n \n destination_file = os.path.join(destination_dir, local_filename)\n \n if not os.path.exists(destination_file):\n # NOTE the stream=True parameter \n r = requests.get(url, stream=True)\n with open(destination_file, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024): \n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n #f.flush() commented by recommendation from J.F.Sebastian\n # Sleep so that we aren't rude\n sleep(1)\n else:\n return destination_file + ' already '\n \n return destination_file\n except ValueError as err:\n return \"Skipping %s, not \" % (url.split('/')[-1])",
"def download_blob(bucket_name, source_blob_name, destination_file_name):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n\n blob.download_to_filename(destination_file_name)\n\n print('Blob {} downloaded to {}.'.format(\n source_blob_name,\n destination_file_name))",
"def download_file(url, local_filename):\n response = requests.get(url, stream=True)\n with open(local_filename, \"wb\") as outfile:\n for chunk in response.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n outfile.write(chunk)",
"def download_blob(bucket_name, source_blob_name, destination_file_name):\n # The ID of your GCS bucket\n # bucket_name = \"your-bucket-name\"\n\n # The ID of your GCS object\n # source_blob_name = \"storage-object-name\"\n\n # The path to which the file should be downloaded\n # destination_file_name = \"local/path/to/file\"\n\n storage_client = storage.Client()\n\n bucket = storage_client.bucket(bucket_name)\n\n # Construct a client side representation of a blob.\n # Note `Bucket.blob` differs from `Bucket.get_blob` as it doesn't retrieve\n # any content from Google Cloud Storage. As we don't need additional data,\n # using `Bucket.blob` is preferred here.\n blob = bucket.blob(source_blob_name)\n blob.download_to_filename(destination_file_name)\n\n print(\n \"Downloaded storage object {} from bucket {} to local file {}.\".format(\n source_blob_name, bucket_name, destination_file_name))",
"def download_file(self, bucket, key, download_dir, download_file=None, temp_dir=None):\n for argument in [(\"Bucket\", bucket), (\"Key\", key)]:\n if not isinstance(argument[1], six.string_types):\n raise ValueError('{} must be a string'.format(argument[0]))\n\n if not temp_dir:\n temp_dir = download_dir\n\n if not download_file:\n download_file = os.path.basename(key)\n\n create_directory_tree(temp_dir)\n create_directory_tree(download_dir)\n\n local_file_path = os.path.join(download_dir, download_file)\n\n # The file was already downloaded\n if os.path.isfile(local_file_path):\n return local_file_path\n\n # Avoid other instances to download the same file\n filelock_filepath = get_filelock_path(download_file)\n lock = filelock.FileLock(filelock_filepath)\n try:\n with lock.acquire(timeout=10):\n downloaded_file = self._download_parts(bucket, key, download_file, temp_dir)\n if downloaded_file is not None and downloaded_file != local_file_path:\n os.rename(downloaded_file, local_file_path)\n except filelock.Timeout:\n raise S3ResumableBloqued(\"Another instance is currently downloading {}\".format(\n local_file_path))\n\n return local_file_path",
"def download(url, bucket_id, key_prefix):\n\n baseFile = '_'.join(url.split('/')[-4:]) #os.path.basename(url)\n\n #move the file to a more uniq path\n os.umask(0002)\n temp_path = \"/tmp/\"\n file = os.path.join(temp_path,baseFile)\n bucket = conn.get_bucket(bucket_id)\n key = bucket.get_key(key_prefix + baseFile, validate=False)\n s3_exists = key.exists()\n file_exists = os.path.isfile(file)\n \n if not file_exists and s3_exists:\n sys.stderr.write(\"Downloading %s from S3\\n\"%url)\n key.get_contents_to_filename(file)\n sys.stderr.write(\"Downloaded %s from S3\\n\"%url)\n elif not file_exists and not s3_exists:\n sys.stderr.write(\"Downloading %s from the web\\n\"%url)\n try:\n req = urllib2.urlopen(url)\n total_size = int(req.info().getheader('Content-Length').strip())\n downloaded = 0\n CHUNK = 256 * 10240\n with open(file, 'wb') as fp:\n while True:\n chunk = req.read(CHUNK)\n downloaded += len(chunk)\n #print math.floor( (downloaded / total_size) * 100 )\n if not chunk: break\n fp.write(chunk)\n except urllib2.HTTPError, e:\n sys.stderr.write(\"HTTP Error: %s %s\\n\"%(e.code , url))\n return False\n except urllib2.URLError, e:\n sys.stderr.write(\"URL Error: %s %s\\n\"%(e.reason , url))\n return False\n sys.stderr.write(\"Downloaded %s from the web\\n\"%url)\n\n if not s3_exists:\n sys.stderr.write(\"Uploading %s to S3\\n\"%url)\n key.set_contents_from_filename(file)\n\n sys.stderr.write(\"File ready: %s\\n\"%url)\n return file",
"def download_file(self, bucket, key, local_path):\n\n if self.key_exists(bucket, key):\n self._s3.Bucket(bucket).download_file(key, local_path)\n\n else:\n raise S3FileNotFoundException(\"File Not Found - \" + key)\n\n return os.path.isfile(local_path)",
"def s3_download(path):\n with s3_read(path):\n # Reading the file will cache the file locally.\n pass",
"def download_file(self, bucket_name, object_name, file_name):\n self._client.download_file(bucket_name, object_name, file_name)",
"def download(self, bucket_name, key_name, fname):\n dname = os.path.dirname(fname)\n if dname and not os.path.exists(dname):\n os.makedirs(dname)\n bucket = self.s3_.get_bucket(bucket_name)\n key = bucket.get_key(key_name)\n return key.get_contents_to_filename(fname)"
] | [
"0.78716594",
"0.7565938",
"0.7506007",
"0.7422166",
"0.73331714",
"0.7330301",
"0.73252624",
"0.72543865",
"0.72113806",
"0.7189365",
"0.71500474",
"0.71356577",
"0.71005404",
"0.70790374",
"0.7073477",
"0.70685893",
"0.7034173",
"0.7021857",
"0.7018799",
"0.700558",
"0.7002941",
"0.6993194",
"0.6986597",
"0.69743556",
"0.69737905",
"0.6959816",
"0.6940246",
"0.69353235",
"0.69350624",
"0.69128615"
] | 0.8098665 | 0 |
Report CV results and save them to file | def report_cv_results(self, results, scores=['score'], filename=None, n_top=5):
res = ""
for score in scores:
res += "{}\n".format(score)
res += "-------------------------------\n"
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_{}'.format(score)] == i)
for candidate in candidates:
res += "Model with rank: {0}\n".format(i)
res += "Mean validation {0}: {1:.3f} (std: {2:.3f})\n".format(
score,
results['mean_test_{}'.format(score)][candidate],
results['std_test_{}'.format(score)][candidate])
res += "Parameters: {0}\n".format(results['params'][candidate])
res += "\n"
if filename is not None:
with open(filename, 'w') as f:
f.write(res)
self._upload_to_bucket(filename, filename)
logging.info(res) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_report(cv_rfc, lrc, x_test_variable, y_test_dep):\n # Ensure the function works\n try:\n cls.classification_report_image(\n cv_rfc, lrc, x_test_variable, y_test_dep)\n logging.info(\"Successfully Plotting Classification Results\")\n except Exception as err:\n logging.error(\"Errors in Plotting Classification Results\")\n raise err\n # Ensure the output exists\n for cols in [\"roc_curve\", \"explanation\"]:\n try:\n assert os.path.isfile(\"images/results/\"+cols+\".png\")\n except AssertionError as err:\n logging.error(\"Errors in generatingi %s classification file\", cols)\n raise err",
"def write_results(detections, filename):\n start = time.time()\n\n boxes, labels, scores = detections\n with PathManager.open(filename, \"w\") as f:\n for key in boxes.keys():\n for box, label, score in zip(boxes[key], labels[key], scores[key]):\n f.write(\n \"%s,%.03f,%.03f,%.03f,%.03f,%d,%.04f\\n\"\n % (key, box[1], box[0], box[3], box[2], label, score)\n )\n\n logger.info(\"AVA results wrote to %s\" % filename)\n logger.info(\"\\ttook %d seconds.\" % (time.time() - start))",
"def save(self,filename):\n f = open(filename,'w')\n f.write('Test results for %s v%s\\n' % (self.description,self.version))\n f.write('Series ran by %s\\n\\n' % self.person_name)\n for result in self.values():\n f.write('%-70s : %s\\n' % (result.id,result.outcome))\n if result.outcome != Result.PASS:\n for (kind, annotation) in result.annotations.items():\n f.write('%s:\\n%s\\n' % (kind, as_utf8(annotation)))\n f.write('\\n')\n f.write('\\n\\nPasses: %i\\n' % self.get_pass_count())\n f.write('Fails: %i\\n' % self.get_fail_count())\n f.write('Errors: %i\\n' % self.get_error_count())\n f.write('Untested: %i\\n' % self.get_untested_count())\n f.write('Skipped: %i\\n' % self.get_skipped_count())\n f.close()",
"def status_print(optim_result):\n \n # Get all the models tested so far in DataFrame format\n all_models = pd.DataFrame(bayes_cv_tuner.cv_results_) \n \n # Get current parameters and the best parameters \n best_params = pd.Series(bayes_cv_tuner.best_params_)\n print('Model #{}\\nBest mse: {}\\nBest params: {}\\n'.format(\n len(all_models),\n np.round(bayes_cv_tuner.best_score_, 4),\n bayes_cv_tuner.best_params_\n ))\n \n # Save all model results\n clf_name = bayes_cv_tuner.estimator.__class__.__name__\n all_models.to_csv(clf_name+\"_cv_results.csv\")",
"def finalize_result(self):\n logging.debug(\"finalize_result()\")\n with open(self.html_file, \"a\") as result_file:\n result_file.write(\"<br/>Analyzis successful\")\n with open(self.txt_file, \"a\") as result_file:\n result_file.write(\"Analyzis successful\")",
"def report(self, output_dir):",
"def collect(self,outfilename):\n # TODO actually gather results and check if run is successful\n if os.path.isfile(outfilename):\n self.completed=True\n else:\n self.completed=False",
"def exportEvaluation(self,results,url):\n profbox()\n if not os.path.exists(url):\n open(url, 'w').close()\n myfile = open(url, 'a')\n\n wr = csv.writer(myfile)\n r = numpy.array(results)\n if len(r.shape) == 1:\n wr.writerow(results)\n else:\n wr.writerows(results)",
"def exportEvaluation(self, results, url):\r\n # research\r\n profprint()\r\n if not os.path.exists(url):\r\n print \"creating new results file: \",url\r\n open(url, 'w').close()\r\n myfile = open(url, 'a')\r\n\r\n wr = csv.writer(myfile)\r\n r = numpy.array(results)\r\n if len(r.shape) == 1:\r\n wr.writerow(results)\r\n else:\r\n wr.writerows(results)",
"def classification_report(self):\n print('Classification Report ...')\n cr = classification_report(self.y_test, self.y_pred, output_dict=True)\n df = pd.DataFrame(cr)\n df.to_csv('csv/cr/' + self.model_name + '_' + self.label + '_cr.csv')\n print(cr)",
"def save_results(PATH, data, filename):\n with open(PATH + '/' + filename + \".txt\",\"w\") as file:\n file.write(\"Results of heuristic models with mean and standard deviation.\\n\")\n for result in data:\n write_result(file, result)\n file.close()\n print('results saved in:'+ PATH + '/' + filename + \".txt\")",
"def save_results(self, path):\n create_folder(path)\n self.get_scores().to_csv(path + r'/scores.csv', index=False)\n self.get_results().to_csv(path + r'/results.csv', index=False)\n self.get_pivot_last_epoch().to_csv(path + r'/pivot_last_epoch.csv', index=True)",
"def log_results(self, filename=None):\n\n self.ad_log['train_auc'] = self.diag['train']['auc'][-1]\n self.ad_log['train_accuracy'] = self.diag['train']['acc'][-1]\n self.ad_log['train_time'] = self.train_time\n\n self.ad_log['test_auc'] = self.diag['test']['auc'][-1]\n self.ad_log['test_accuracy'] = self.diag['test']['acc'][-1]\n self.ad_log['test_time'] = self.test_time\n\n self.ad_log.save_to_file(filename=filename)",
"def output_results(self, filename):\n\n self.data.plot(title='Result of applying {} onto data set'.format(self.transformations[-1]))\n plt.savefig(\"results/{}.png\".format(filename))\n plt.close()",
"def test_file(self, file_name, version, classifier_type):\n labels = []\n with open(file_name) as f:\n for line in f.readlines():\n print(line,self.predict(line))\n labels.append(self.predict(line))\n \n filename = 'test_results-' + classifier_type + '-' + version + '.txt'\n \n with open(filename, 'w') as f:\n for label in labels:\n f.write(str(label)+\"\\n\")\n \n print (\"Results from \",file_name,\" printed to:\",filename)",
"def analyze(results: str):\n # Initialize plot\n co.nb.matplotlib_inline()\n plt.figure(figsize=(5, 4), linewidth=1)\n\n for model in os.listdir(results):\n # For each model, read in the results DataFrame\n df = pd.read_csv(os.path.join(results, model))\n\n # Compute statistics\n auc = roc_auc_score(df.true, df.pred)\n fpr, tpr, _ = roc_curve(df.true, df.proba)\n\n # Plot the stats\n plt.plot(fpr, tpr, label=f'{model} Score: ' + str(round(auc, 5)))\n\n # Finish up the plot and print it\n plt.plot([0, 1], [0, 1], 'k--', label='Random: 0.5')\n plt.xlabel('False positive rate')\n plt.ylabel('True positive rate')\n plt.title('ROC Curve')\n plt.legend(loc='best')\n plt.show()",
"def save_results(self, name):\n # metrics in npz format\n # filename = join(self.out_dir, '%s_metrics.npz' % name)\n # np.savez(filename, tel_r=self.tel_r, cable_length=self.cable_length,\n # cable_length_2=self.cable_length_2, uv_hist=self.uv_hist)\n\n if self.cable_length: # Empty dict() evaluate to False\n # ASCII CSV table of radius vs cable length\n filename = join(self.out_dir, '%s_cables.txt' % name)\n data = np.array([[k, v] for k, v in\n self.cable_length.iteritems()])\n data = np.sort(data, axis=0)\n np.savetxt(filename, data, fmt=b'%.10f %.10f')\n\n if self.cable_length_2: # Empty dict() evaluate to False\n # ASCII CSV table of radius vs cable length\n filename = join(self.out_dir, '%s_cables_2.txt' % name)\n data = np.array([[k, v] for k, v in\n self.cable_length_2.iteritems()])\n data = np.sort(data, axis=0)\n np.savetxt(filename, data, fmt=b'%.10f %.10f')\n\n if self.cable_length_3: # Empty dict() evaluates to False\n filename = join(self.out_dir, '%s_cables.txt' % name)\n data = np.array([[k, v] for k, v in\n self.cable_length_3.iteritems()])\n data = np.sort(data, axis=0)\n np.savetxt(filename, data, fmt=b'%.10f %.10f')\n\n # Save a pickle with the PSF comparison info.\n if self.psf:\n filename = join(self.out_dir, '%s_psf.p' % name)\n pickle.dump(self.psf, open(filename, 'wb'))\n\n # Save a pickle of uv hist data.\n if self.uv_hist:\n filename = join(self.out_dir, '%s_uv_hist.p' % name)\n pickle.dump(self.uv_hist, open(filename, 'wb'))",
"def log_results(best_model, model_name, max_features, train_score, test_score,\n score_fp):\n\n # ensure the directorys where metrics are stored are created\n if not os.path.exists(os.path.dirname(score_fp)):\n os.makedirs(os.path.dirname(score_fp), exist_ok=True)\n\n st = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')\n with open(score_fp, 'a+') as f:\n f.write(st + '\\n')\n f.write('-' * 100 + '\\n')\n f.write('Model Run: {}\\n\\n'.format(model_name))\n f.write('Params: {}\\n\\n'.format(best_model.get_params())) \n f.write('Max features: {}\\n\\n'.format(max_features))\n f.write('Train Score: {}\\n\\n'.format(train_score))\n f.write('Test Score: {}\\n\\n'.format(test_score))",
"def save(self):\n pickle_save(self.results, 'results', self.main_dir)",
"def log_results(self, path):\n pd.DataFrame(self.results).to_csv(path)",
"def save_result(self):\n self.print_to_console()",
"def evaluate(start, stop, step, save_file=\"results.txt\", img_size=None):\n\n if img_size:\n bb_format = BBFormat.YOLO\n else:\n bb_format = BBFormat.XYWH\n\n results = []\n\n for i in range(start, stop+step, step):\n dir_dets = os.path.join(TOP_DIR_DETS, str(i))\n\n det_bbs = converter.text2bb(dir_dets, bb_type=BBType.DETECTED,\n bb_format=bb_format, type_coordinates=CoordinatesType.ABSOLUTE, img_size=img_size)\n gt_bbs = converter.text2bb(DIR_GTS, bb_type=BBType.GROUND_TRUTH,\n bb_format=BBFormat.XYWH, type_coordinates=CoordinatesType.ABSOLUTE, img_size=img_size)\n\n\n result = {\"iteration\": i}\n coco_res = coco_evaluator.get_coco_summary(gt_bbs, det_bbs)\n dict_res = pascal_voc_evaluator.get_pascalvoc_metrics(gt_bbs, det_bbs, 0.5, generate_table=True, method=MethodAveragePrecision.EVERY_POINT_INTERPOLATION)\n\n # Merge dicts\n result |= coco_res\n result |= {'Pascal COCO': dict_res['mAP']}\n results.append(result)\n\n print(i)\n\n\n keys = results[0].keys()\n with open(save_file, 'w', newline='') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(results)",
"def _publish_results(self):\n\n doc = Document()\n date = get_stamp()\n\n labels = ExperimentTemplateBase.parameters_to_string(self._topology_parameters_list)\n\n title = 'Mutual Information labels vs ' + self._experiment_name\n self.plot_save(title,\n self._mutual_info,\n self._baseline_mutual_info,\n 'Norm. mutual information',\n labels, date, self._docs_folder, doc)\n\n title = 'Weak classifier accuracy labels vs ' + self._experiment_name\n self.plot_save(title,\n self._classifier_accuracy,\n self._baseline_classifier_accuracy,\n 'Classifier accuracy',\n labels, date, self._docs_folder, doc) #, smoothing_size=3)\n\n title = 'average delta'\n f = plot_multiple_runs(\n self._different_steps[0], # here the X axes are identical\n self._average_delta,\n title=title,\n ylabel='log(delta)',\n xlabel='steps',\n labels=labels\n )\n add_fig_to_doc(f, path.join(self._docs_folder, title), doc)\n\n title = 'average boosting duration'\n f = plot_multiple_runs(\n self._different_steps[0],\n self._average_boosting_dur,\n title=title,\n ylabel='duration',\n xlabel='steps',\n labels=labels\n )\n add_fig_to_doc(f, path.join(self._docs_folder, title), doc)\n\n doc.write_file(path.join(self._docs_folder, to_safe_name(self._complete_name() + date + \".html\")))\n\n print('done')",
"def output():\n\n print(\"\\n*****************************************************************\")\n print(\"\\nAll transfer data is saved in 'All_transfer_frequencies.csv'\")\n print(\"\\nThe most likely transfers are saved in 'likely_transfers.csv'\")\n\n os.mkdir(\"Transfer_results\")\n os.system(\"mv *.csv Transfer_results\")\n\n print(\"\\nBoth results are saved in the 'Transfer_results' directory\")\n print(\"\\nScript finished running\")\n print(\"\\n*****************************************************************\")",
"def write_result(self, file_path):\n f = open(file_path, \"a\")\n f.write(\"{}\\t{}\\n\".format(*[self.name, str(self.ROC_AUC_value)]))\n f.close()",
"def save_submission(results, file_name='submission.csv'):\n submission_path = path.join('..', 'output', file_name)\n results.to_csv(submission_path)",
"def save_fit_result(self, fitresult, outfile):\n save_modelresult(fitresult, outfile)",
"def dump_result(self, primes, covers):\n\n fname = '{0}-result.csv'.format(os.path.splitext(self.fname)[0])\n\n for f in self.feats:\n if len(f) > 2:\n print('c2 non-binary features detected; not dumping the result')\n return\n\n with open(fname, 'w') as fp:\n print(','.join(self.names), file=fp)\n\n for cid in covers:\n for pid in covers[cid]:\n feats = ['' for n in range(len(self.names))]\n\n for l in primes[cid][pid - 1]:\n name, val = self.fvmap.opp[l]\n feats[self.nm2id[name]] = val\n\n # label\n name, val = self.fvmap.opp[cid]\n feats[self.nm2id[name]] = val\n\n print(','.join(feats), file=fp)",
"def save_results(self):\n results = pd.concat([\n pd.DataFrame(self.IDs.cpu().numpy(), columns= ['ID']), \n pd.DataFrame(self.predicted_labels.cpu().numpy(), columns= ['predicted_label']),\n pd.DataFrame(self.correct_predictions.cpu().numpy(), columns= ['correct_prediction']),\n pd.DataFrame(self.epistemic_uncertainty.cpu().numpy(), columns= ['epistemic_uncertainty']), \n pd.DataFrame(self.aleatoric_uncertainty.cpu().numpy(), columns= ['aleatoric_uncertainty']), \n pd.DataFrame(self.total_uncertainty.cpu().numpy(), columns= ['total_uncertainty']), \n ], axis=1)\n\n create_results_directory()\n results.to_csv('results/{}_{}_results.csv'.format(self.__class__.__name__, datetime.datetime.now().replace(microsecond=0).isoformat()), index=False)",
"def run(self):\n # get components list\n #component_id_list = self.getComponentsList()\n asset_id = 3776\n component_id_list = self.get_component_info_for_one_asset(asset_id)\n # call computeResults method\n results = self.compute_results(component_id_list)\n # write to the output file\n self.write_to_file(results)"
] | [
"0.65733397",
"0.65376616",
"0.6398993",
"0.6282908",
"0.6274721",
"0.62072086",
"0.6165067",
"0.6141804",
"0.6075014",
"0.60728824",
"0.60719407",
"0.603311",
"0.60245275",
"0.59826034",
"0.5944425",
"0.5938252",
"0.5937966",
"0.5919031",
"0.5869347",
"0.5869213",
"0.58630985",
"0.5840374",
"0.58063483",
"0.5806348",
"0.57970697",
"0.5780443",
"0.5770415",
"0.57623893",
"0.5758673",
"0.5739641"
] | 0.68240047 | 0 |
locate droplets in a (potentially periodic) data set on a Cartesian grid This function locates droplets respecting periodic boundary conditions. | def _locate_droplets_in_mask_cartesian(
grid: CartesianGridBase, mask: np.ndarray
) -> Emulsion:
if mask.shape != grid.shape:
raise ValueError(
f"The shape {mask.shape} of the data is not compatible with the grid "
f"shape {grid.shape}"
)
# pad the array to simulate periodic boundary conditions
offset = np.array([dim if p else 0 for p, dim in zip(grid.periodic, grid.shape)])
pad = np.c_[offset, offset].astype(np.intc)
mask_padded = np.pad(mask, pad, mode="wrap")
assert np.all(mask_padded.shape == np.array(grid.shape) + 2 * offset)
# locate individual clusters in the padded image
labels, num_labels = ndimage.label(mask_padded)
if num_labels == 0:
return Emulsion([], grid=grid)
indices = range(1, num_labels + 1)
# create and emulsion from this of droplets
grid._logger.info(f"Found {num_labels} droplet candidate(s)")
# determine position from binary image and scale it to real space
positions = ndimage.measurements.center_of_mass(mask_padded, labels, index=indices)
# correct for the additional padding of the array
positions = grid.cell_to_point(positions - offset)
# determine volume from binary image and scale it to real space
volumes = ndimage.measurements.sum(mask_padded, labels, index=indices)
volumes = np.asanyarray(volumes) * np.prod(grid.discretization)
# only retain droplets that are inside the central area
droplets = (
SphericalDroplet.from_volume(position, volume)
for position, volume in zip(positions, volumes)
if grid.cuboid.contains_point(position)
)
# filter overlapping droplets (e.g. due to duplicates)
emulsion = Emulsion(droplets, grid=grid)
num_candidates = len(emulsion)
if num_candidates < num_labels:
grid._logger.info(f"Only {num_candidates} candidate(s) inside bounds")
emulsion.remove_overlapping()
if len(emulsion) < num_candidates:
grid._logger.info(f"Only {num_candidates} candidate(s) not overlapping")
return emulsion | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _locate_droplets_in_mask_cylindrical(\n grid: CylindricalSymGrid, mask: np.ndarray\n) -> Emulsion:\n assert np.all(mask.shape == grid.shape)\n\n if grid.periodic[1]:\n # locate droplets respecting periodic boundary conditions in z-direction\n\n # pad the array to simulate periodic boundary conditions\n dim_r, dim_z = grid.shape\n mask_padded = np.pad(mask, [[0, 0], [dim_z, dim_z]], mode=\"wrap\")\n assert mask_padded.shape == (dim_r, 3 * dim_z)\n\n # locate droplets in the extended image\n candidates = _locate_droplets_in_mask_cylindrical_single(grid, mask_padded)\n grid._logger.info(f\"Found {len(candidates)} droplet candidates.\")\n\n # keep droplets that are inside the central area\n droplets = Emulsion(grid=grid)\n for droplet in candidates:\n # correct for the additional padding of the array\n droplet.position[2] -= grid.length\n # check whether the droplet lies in the original box\n if grid.contains_point(droplet.position):\n droplets.append(droplet)\n\n grid._logger.info(f\"Kept {len(droplets)} central droplets.\")\n\n # filter overlapping droplets (e.g. due to duplicates)\n droplets.remove_overlapping()\n\n else:\n # simply locate droplets in the mask\n droplets = _locate_droplets_in_mask_cylindrical_single(grid, mask)\n\n return droplets",
"def locate_droplets_in_mask(grid: GridBase, mask: np.ndarray) -> Emulsion:\n if isinstance(grid, CartesianGridBase):\n return _locate_droplets_in_mask_cartesian(grid, mask)\n elif isinstance(grid, SphericalSymGridBase):\n return _locate_droplets_in_mask_spherical(grid, mask)\n elif isinstance(grid, CylindricalSymGrid):\n return _locate_droplets_in_mask_cylindrical(grid, mask)\n elif isinstance(grid, GridBase):\n raise NotImplementedError(f\"Locating droplets is not possible for grid {grid}\")\n else:\n raise ValueError(f\"Invalid grid {grid}\")",
"def _locate_droplets_in_mask_cylindrical_single(\n grid: CylindricalSymGrid, mask: np.ndarray\n) -> Emulsion:\n # locate the individual clusters\n labels, num_features = ndimage.label(mask)\n if num_features == 0:\n return Emulsion([], grid=grid)\n\n # locate clusters on the symmetry axis\n object_slices = ndimage.measurements.find_objects(labels)\n indices = []\n for index, slices in enumerate(object_slices, 1):\n if slices[0].start == 0: # contains point on symmetry axis\n indices.append(index)\n else:\n logger = logging.getLogger(grid.__class__.__module__)\n logger.warning(\"Found object not located on symmetry axis\")\n\n # determine position from binary image and scale it to real space\n pos = ndimage.measurements.center_of_mass(mask, labels, index=indices)\n pos = grid.cell_to_point(pos)\n\n # determine volume from binary image and scale it to real space\n vol_r, dz = grid.cell_volume_data\n cell_volumes = vol_r * dz\n vol = ndimage.measurements.sum(cell_volumes, labels, index=indices)\n\n # return an emulsion of droplets\n droplets = (\n SphericalDroplet.from_volume(np.array([0, 0, p[2]]), v)\n for p, v in zip(pos, vol)\n )\n return Emulsion(droplets, grid=grid)",
"def _locate_droplets_in_mask_spherical(\n grid: SphericalSymGridBase, mask: np.ndarray\n) -> Emulsion:\n assert np.all(mask.shape == grid.shape)\n\n # locate clusters in the binary image\n labels, num_labels = ndimage.label(mask)\n if num_labels == 0:\n return Emulsion([], grid=grid)\n\n # locate clusters around origin\n object_slices = ndimage.measurements.find_objects(labels)\n droplet = None\n for slices in object_slices:\n if slices[0].start == 0: # contains point around origin\n radius = grid.cell_to_point(slices[0].stop).flat[-1]\n droplet = SphericalDroplet(np.zeros(grid.dim), radius=radius)\n else:\n logger = logging.getLogger(grid.__class__.__module__)\n logger.warning(\"Found object not located at origin\")\n\n # return an emulsion of droplets\n if droplet:\n return Emulsion([droplet], grid=grid)\n else:\n return Emulsion([], grid=grid)",
"def extract_polygons_lattice(xy, BL, NL=None, KL=None, PVx=None, PVy=None, PVxydict=None, viewmethod=False,\n check=False, eps=1e-10):\n viewmethod = True\n NP = len(xy)\n\n if KL is None or NL is None:\n NL, KL = BL2NLandKL(BL, NP=NP, NN='min')\n if (BL < 0).any():\n if len(PVxydict) > 0:\n PVx, PVy = PVxydict2PVxPVy(PVxydict, NL, KL)\n else:\n raise RuntimeError('Must specify either PVxydict or KL and NL in extract_polygons_lattice()' +\n ' when periodic bonds exist!')\n elif (BL < 0).any():\n if PVx is None or PVy is None:\n if PVxydict is None:\n raise RuntimeError('Must specify either PVxydict or PVx and PVy in extract_polygons_lattice()' +\n ' when periodic bonds exist!')\n else:\n PVx, PVy = PVxydict2PVxPVy(PVxydict, NL, KL)\n\n NN = np.shape(KL)[1]\n # Remove dangling bonds\n # dangling bonds have one particle with only one neighbor\n finished_dangles = False\n while not finished_dangles:\n dangles = np.where([np.count_nonzero(row) == 1 for row in KL])[0]\n if len(dangles) > 0:\n # Check if need to build PVxy dictionary from PVx and PVy before changing NL and KL\n if (BL < 0).any() and len(PVxydict) == 0:\n PVxydict = PVxy2PVxydict(PVx, PVy, NL, KL=KL)\n\n # Make sorted bond list of dangling bonds\n dpair = np.sort(np.array([[d0, NL[d0, np.where(KL[d0] != 0)[0]]] for d0 in dangles]), axis=1)\n # Remove those bonds from BL\n BL = dh.setdiff2d(BL, dpair.astype(BL.dtype))\n # print 'dpair = ', dpair\n # print 'ending BL = ', BL\n NL, KL = BL2NLandKL(BL, NP=NP, NN=NN)\n\n # Now that NL and KL rebuilt (changed), (re)build PVx and PVy if periodic bcs\n if (BL < 0).any():\n if len(PVxydict) > 0:\n PVx, PVy = PVxydict2PVxPVy(PVxydict, NL, KL)\n else:\n finished_dangles = True\n\n if viewmethod or check:\n print 'Plotting result after chopped dangles, if applicable...'\n display_lattice_2D(xy, BL, NL=NL, KL=KL, PVx=PVx, PVy=PVy, PVxydict=PVxydict,\n title='Result after chopping dangling bonds', close=False)\n for i in range(len(xy)):\n plt.text(xy[i, 0] + 0.2, xy[i, 1], str(i))\n plt.show()\n\n # bond markers for counterclockwise, clockwise\n used = np.zeros((len(BL), 2), dtype=bool)\n polygons = []\n finished = False\n if viewmethod:\n f, (ax1, ax2) = plt.subplots(1, 2)\n\n # For periodicity, remember which bonds span periodic boundary\n periB = np.array([(row < 0).any() for row in BL])\n\n if periB.any() and PVxydict is None and (PVx is None or PVy is None):\n raise RuntimeError('Periodic boundaries have been detected, but no periodic vectors supplied to ' +\n 'extract_polygons_lattice()')\n\n if not periB.any():\n print 'no PBCs, calculating polygons...'\n while not finished:\n # Check if all bond markers are used in order A-->B\n # print 'Checking AB (A-->B): '\n todoAB = np.where(~used[:, 0])[0]\n # print 'len(todoAB) = ', len(todoAB)\n # print 'used = ', used\n # print 'todoAB = ', todoAB\n # print polygons\n if len(todoAB) > 0:\n bond = BL[todoAB[0]]\n # if (bond == [21, 22]).all():\n # for todoab in todoAB:\n # ax1.plot([xy[BL[todoab, 0], 0], xy[BL[todoab, 1], 0]],\n # [xy[BL[todoab, 0], 1], xy[BL[todoab, 1], 1]], 'b-', lw=3)\n # todoBA = np.where(~used[:, 1])[0]\n # for todoba in todoBA:\n # ax1.plot([xy[BL[todoba, 0], 0], xy[BL[todoba, 1], 0]],\n # [xy[BL[todoba, 0], 1], xy[BL[todoba, 1], 1]], 'g--')\n # print 'bond = ', bond\n # plt.pause(40)\n # sys.exit()\n\n # bb will be list of polygon indices\n # Start with orientation going from bond[0] to bond[1]\n nxt = bond[1]\n bb = [bond[0], nxt]\n dmyi = 1\n\n # Now mark the new bond that has now been added to bb as used\n # Get index of used matching thisbond\n mark_used = np.where((np.logical_or(BL == bb[0], BL == bb[1])).all(axis=1))\n # print 'marking bond [', thisbond, '] as used'\n used[mark_used, 0] = True\n\n ###############\n # check\n if viewmethod:\n ax1.plot(xy[:, 0], xy[:, 1], 'k.')\n ax1.annotate(\"\", xy=(xy[bb[dmyi], 0], xy[bb[dmyi], 1]), xycoords='data',\n xytext=(xy[nxt, 0], xy[nxt, 1]), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",\n color=\"r\",\n shrinkA=5, shrinkB=5,\n patchA=None,\n patchB=None,\n connectionstyle=\"arc3,rad=0.2\", ), )\n for i in range(len(xy)):\n ax1.text(xy[i, 0] + 0.2, xy[i, 1], str(i))\n ax2.imshow(used)\n ax1.set_aspect('equal')\n ###############\n\n # as long as we haven't completed the full outer polygon, add next index\n while nxt != bond[0]:\n n_tmp = NL[nxt, np.argwhere(KL[nxt]).ravel()]\n # Exclude previous boundary particle from the neighbors array, unless its the only one\n # (It cannot be the only one, if we removed dangling bonds)\n if len(n_tmp) == 1:\n '''The bond is a lone bond, not part of a triangle.'''\n neighbors = n_tmp\n else:\n neighbors = np.delete(n_tmp, np.where(n_tmp == bb[dmyi - 1])[0])\n\n angles = np.mod(np.arctan2(xy[neighbors, 1] - xy[nxt, 1], xy[neighbors, 0] - xy[nxt, 0]).ravel() \\\n - np.arctan2(xy[bb[dmyi - 1], 1] - xy[nxt, 1],\n xy[bb[dmyi - 1], 0] - xy[nxt, 0]).ravel(), 2 * np.pi)\n nxt = neighbors[angles == max(angles)][0]\n bb.append(nxt)\n\n ###############\n # # Check\n # if viewmethod:\n # plt.annotate(\"\", xy=(xy[bb[dmyi],0],xy[bb[dmyi],1] ), xycoords='data',\n # xytext=(xy[nxt,0], xy[nxt,1]), textcoords='data',\n # arrowprops=dict(arrowstyle=\"->\",\n # color=\"r\",\n # shrinkA=5, shrinkB=5,\n # patchA=None,\n # patchB=None,\n # connectionstyle=\"arc3,rad=0.2\",), )\n #\n ###############\n\n # Now mark the new bond that has now been extended (added) as used\n thisbond = [bb[dmyi], bb[dmyi + 1]]\n # Get index of used matching thisbond\n mark_used = np.where((np.logical_or(BL == bb[dmyi], BL == bb[dmyi + 1])).all(axis=1))\n\n # mark_used = np.where((BL == thisbond).all(axis=1))\n if not used[mark_used, 0]:\n # print 'marking bond [', thisbond, '] as used'\n used[mark_used, 0] = True\n else:\n # Get index of used matching reversed thisbond (this list boolean is directional)\n # mark_used = np.where((BL == thisbond[::-1]).all(axis=1))\n # Used this bond in reverse order\n used[mark_used, 1] = True\n # print 'used = ', used\n dmyi += 1\n\n polygons.append(bb)\n ###############\n # Check new polygon\n if viewmethod:\n ax1.plot(xy[:, 0], xy[:, 1], 'k.')\n for i in range(len(xy)):\n ax1.text(xy[i, 0] + 0.2, xy[i, 1], str(i))\n for dmyi in range(len(bb)):\n nxt = bb[np.mod(dmyi + 1, len(bb))]\n ax1.annotate(\"\", xy=(xy[bb[dmyi], 0], xy[bb[dmyi], 1]), xycoords='data',\n xytext=(xy[nxt, 0], xy[nxt, 1]), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",\n color=\"r\",\n shrinkA=5, shrinkB=5,\n patchA=None,\n patchB=None,\n connectionstyle=\"arc3,rad=0.2\", ), )\n ax2.cla()\n ax2.imshow(used)\n plt.pause(0.00001)\n ###############\n\n else:\n # Check for remaining bonds unused in reverse order (B-->A)\n # print 'CHECKING REVERSE (B-->A): '\n todoBA = np.where(~used[:, 1])[0]\n if len(todoBA) > 0:\n bond = BL[todoBA[0]]\n\n ###############\n # # check\n # if viewmethod:\n # plt.annotate(\"\", xy=(xy[bb[dmyi],0],xy[bb[dmyi],1] ), xycoords='data',\n # xytext=(xy[nxt,0], xy[nxt,1]), textcoords='data',\n # arrowprops=dict(arrowstyle=\"->\",\n # color=\"b\",\n # shrinkA=5, shrinkB=5,\n # patchA=None,\n # patchB=None,\n # connectionstyle=\"arc3,rad=0.6\",), )\n # ###############\n\n # bb will be list of polygon indices\n # Start with orientation going from bond[0] to bond[1]\n nxt = bond[0]\n bb = [bond[1], nxt]\n dmyi = 1\n\n # Now mark the new bond that has now been added to bb as used\n # Get index of used matching thisbond\n thisbond = [bb[dmyi], bb[dmyi - 1]]\n mark_used = np.where((BL == thisbond).all(axis=1))\n # print 'marking bond [', thisbond, '] as used'\n used[mark_used, 1] = True\n\n # as long as we haven't completed the full outer polygon, add nextIND\n while nxt != bond[1]:\n n_tmp = NL[nxt, np.argwhere(KL[nxt]).ravel()]\n # Exclude previous boundary particle from the neighbors array, unless its the only one\n # (It cannot be the only one, if we removed dangling bonds)\n if len(n_tmp) == 1:\n '''The bond is a lone bond, not part of a triangle.'''\n neighbors = n_tmp\n else:\n neighbors = np.delete(n_tmp, np.where(n_tmp == bb[dmyi - 1])[0])\n\n angles = np.mod(np.arctan2(xy[neighbors, 1] - xy[nxt, 1], xy[neighbors, 0] - xy[nxt, 0]).ravel() \\\n - np.arctan2(xy[bb[dmyi - 1], 1] - xy[nxt, 1],\n xy[bb[dmyi - 1], 0] - xy[nxt, 0]).ravel(), 2 * np.pi)\n nxt = neighbors[angles == max(angles)][0]\n bb.append(nxt)\n\n ###############\n # Check\n # if viewmethod:\n # plt.annotate(\"\", xy=(xy[bb[dmyi],0],xy[bb[dmyi],1] ), xycoords='data',\n # xytext=(xy[nxt,0], xy[nxt,1]), textcoords='data',\n # arrowprops=dict(arrowstyle=\"->\",\n # color=\"b\",\n # shrinkA=5, shrinkB=5,\n # patchA=None,\n # patchB=None,\n # connectionstyle=\"arc3,rad=0.6\", #connectionstyle,\n # ), )\n ###############\n\n # Now mark the current bond as used --> note the inversion of the bond order to match BL\n thisbond = [bb[dmyi + 1], bb[dmyi]]\n # Get index of used matching [bb[dmyi-1],nxt]\n mark_used = np.where((BL == thisbond).all(axis=1))\n if len(mark_used) > 0:\n used[mark_used, 1] = True\n else:\n raise RuntimeError('Cannot mark polygon bond as used: this bond was already used '\n 'in its attempted orientation. (All bonds in first column '\n 'should already be marked as used.)')\n\n dmyi += 1\n\n polygons.append(bb)\n\n # Check new polygon\n if viewmethod:\n ax1.plot(xy[:, 0], xy[:, 1], 'k.')\n for i in range(len(xy)):\n ax1.text(xy[i, 0] + 0.2, xy[i, 1], str(i))\n for dmyi in range(len(bb)):\n nxt = bb[np.mod(dmyi + 1, len(bb))]\n ax1.annotate(\"\", xy=(xy[bb[dmyi], 0], xy[bb[dmyi], 1]), xycoords='data',\n xytext=(xy[nxt, 0], xy[nxt, 1]), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",\n color=\"b\",\n shrinkA=5, shrinkB=5,\n patchA=None,\n patchB=None,\n connectionstyle=\"arc3,rad=0.6\", ), )\n ax2.cla()\n ax2.imshow(used)\n plt.pause(0.00001)\n ###############\n\n else:\n # All bonds have been accounted for\n finished = True\n else:\n print 'detected periodicity...'\n # get particles on the finite (non-periodic) system's boundary. This allows massive speedup.\n KLfin = np.zeros_like(KL)\n KLfin[KL > 0] = 1\n # Create BLfin to pass to extract_boundary()\n prows = np.where(BL < 0)[0]\n nprows = np.setdiff1d(np.arange(len(BL)), prows)\n if check:\n print 'rows of BL that are periodic: ', prows\n print 'BL[prows] = ', BL[prows]\n BLfin = BL[nprows]\n finbd = extract_boundary(xy, NL, KLfin, BLfin, check=check)\n\n # If there were dangling points in the non-periodic representation, then we need to add those to finbd because\n # they will have periodic bonds attached to them.\n dangles = np.where(~KLfin.any(axis=1))[0]\n print 'dangles = ', dangles\n if len(dangles) > 0:\n print 'Found dangling points in the finite/non-periodic representation. Adding to finbd...'\n finbd = np.hstack((finbd, np.array(dangles)))\n\n if check:\n print 'finite boundary: finbd = ', finbd\n plt.clf()\n display_lattice_2D(xy, BL, NL=NL, KL=KLfin, PVx=PVx, PVy=PVy, PVxydict=PVxydict,\n title='Identified finite boundary', close=False)\n for i in range(len(xy)):\n plt.text(xy[i, 0] + 0.2, xy[i, 1], str(i))\n plt.plot(xy[finbd, 0], xy[finbd, 1], 'ro')\n plt.show()\n first_check = True\n\n # Then erase periodicity in BL\n BL = np.abs(BL)\n\n while not finished:\n if len(polygons) % 20 == 0:\n print 'constructed ', len(polygons), ' polygons...'\n # Check if all bond markers are used in order A-->B\n # print 'Checking AB (A-->B): '\n todoAB = np.where(~used[:, 0])[0]\n # print 'len(todoAB) = ', len(todoAB)\n # print 'used = ', used\n # print 'todoAB = ', todoAB\n if len(todoAB) > 0:\n bond = BL[todoAB[0]]\n\n # bb will be list of polygon indices\n # Start with orientation going from bond[0] to bond[1]\n nxt = bond[1]\n bb = [bond[0], nxt]\n dmyi = 1\n\n # define 'previous angle' as backwards of current angle -- ie angle(prev-current_pos)\n # Must include effect of PV on this angle -- do in ref frame of nxt particle\n PVind = np.argwhere(NL[nxt] == bond[0])[0][0]\n addx = PVx[nxt, PVind]\n addy = PVy[nxt, PVind]\n xyb0 = xy[bond[0], :] + np.array([addx, addy])\n prev_angle = np.arctan2(xyb0[1] - xy[nxt, 1], xyb0[0] - xy[nxt, 0]).ravel()\n\n ###############\n # check\n if viewmethod:\n if first_check:\n ax1.plot(xy[:, 0], xy[:, 1], 'k.')\n for i in range(len(xy)):\n ax1.text(xy[i, 0] + 0.2, xy[i, 1], str(i))\n first_check = False\n\n ax1.annotate(\"\", xy=(xy[bb[dmyi - 1], 0], xy[bb[dmyi - 1], 1]), xycoords='data',\n xytext=(xy[nxt, 0], xy[nxt, 1]), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",\n color=\"r\",\n shrinkA=5, shrinkB=5,\n patchA=None,\n patchB=None,\n connectionstyle=\"arc3,rad=0.2\", ), )\n ax2.imshow(used, aspect=1. / len(used), interpolation='none')\n ax1.set_aspect('equal')\n ###############\n # define the displacment from the starting point that we have moved so far\n displ = xy[nxt] - xyb0\n\n # as long as we haven't completed the full outer polygon, add next index\n while nxt != bond[0] or abs(displ[0]**2 + displ[1]**2) > eps:\n # print nxt\n # o o neighbors\n # \\ /\n # \\ /\n # o nxt\n # /\n # /\n # o bb[dmyi-1]\n #\n n_tmp = NL[nxt, np.argwhere(KL[nxt]).ravel()]\n # Exclude previous boundary particle from the neighbors array, unless its the only one\n # (It cannot be the only one, if we removed dangling bonds)\n if len(n_tmp) == 1:\n '''The bond is a lone bond, not part of a triangle/polygon.'''\n neighbors = n_tmp\n else:\n # Remove the current particle from the list of its next nearest neighbors\n # Note that we may add this particle back later if bb[dmyi - 1] is its own NNN\n neighbors = np.delete(n_tmp, np.where(n_tmp == bb[dmyi - 1])[0])\n # Here, handle the case where a periodic bond links the neighbor back to the original particle,\n # as in the bond linkage of 0-1-0.\n if len(neighbors) == 0:\n neighbors = n_tmp\n\n # check if neighbors CAN be connected across periodic bc--\n # ie if particle on finite boundary (finbd)\n if nxt in finbd:\n # Since on finite system boundary, particle could have periodic bonds\n # Find x values to add to neighbors, by first getting indices of row of\n # PV (same as of NL) matching neighbors\n # PVinds = [np.argwhere(NL[nxt] == nnn)[0][0] for nnn in neighbors] <--- this assumed no 0-1-0\n PVinds = []\n for nnn in dh.unique_nosort(neighbors):\n okinds = np.ravel(np.argwhere(np.logical_and(NL[nxt] == nnn, np.abs(KL[nxt]) > eps)))\n # print 'neighbors = ', neighbors\n # print 'okinds = ', okinds\n # print 'NL = ', NL\n # print 'KL = ', KL\n # print NL[nxt] == nnn, np.abs(KL[nxt]) > eps\n # print np.argwhere(np.logical_and(NL[nxt] == nnn, np.abs(KL[nxt]) > eps))\n for okind in okinds:\n PVinds.append(okind)\n\n addx = PVx[nxt, PVinds]\n addy = PVy[nxt, PVinds]\n\n # print 'nxt = ', nxt\n # print 'PVinds', PVinds\n # print 'xy[neighbors, :] = ', xy[neighbors, :]\n # print 'np.dstack([addx, addy])[0] = ', np.dstack([addx, addy])[0]\n\n xynb = xy[neighbors, :] + np.dstack([addx, addy])[0]\n xynxt = xy[nxt, :]\n current_angles = np.arctan2(xynb[:, 1] - xynxt[1], xynb[:, 0] - xynxt[0]).ravel()\n angles = np.mod(current_angles - prev_angle, 2 * np.pi)\n\n if check:\n print '\\n'\n print 'particle ', nxt, ' is on finbd'\n print 'nxt = ', nxt\n print 'neighbors = ', neighbors\n print 'xy[neighbors,:] =', xy[neighbors, :]\n print 'addxy = ', np.dstack([addx, addy])[0]\n print 'xynb = ', xynb\n print 'xynxt = ', xynxt\n print 'current_angles = ', current_angles\n print 'prev_angle = ', prev_angle\n print 'angles = ', angles\n print 'redefining nxt = ', neighbors[angles == max(angles)][0]\n\n # redefine previous angle as backwards of current angle -- ie angle(prev-current_pos)\n prev_angletmp = np.arctan2(xynxt[1] - xynb[:, 1], xynxt[0] - xynb[:, 0]).ravel()\n prev_angle = prev_angletmp[angles == max(angles)][0]\n\n # CHECK\n # ax1 = plt.gca()\n # ax1.plot(xy[:,0],xy[:,1],'k.')\n # for i in range(len(xy)):\n # ax1.text(xy[i,0]+0.2,xy[i,1],str(i))\n # plt.show()\n\n else:\n current_angles = np.arctan2(xy[neighbors, 1] - xy[nxt, 1],\n xy[neighbors, 0] - xy[nxt, 0]).ravel()\n angles = np.mod(current_angles - prev_angle, 2 * np.pi)\n # redefine previous angle as backwards of current angle -- ie angle(prev-current_pos)\n # prev_angle = np.arctan2(xy[bb[dmyi-1],1] - xynxt[1], xy[bb[dmyi-1],0] - xynxt[0] ).ravel()\n xynxt = xy[nxt, :]\n xynb = xy[neighbors, :]\n prev_angletmp = np.arctan2(xynxt[1] - xy[neighbors, 1], xynxt[0] - xy[neighbors, 0]).ravel()\n prev_angle = prev_angletmp[angles == max(angles)][0]\n\n nxt = neighbors[angles == max(angles)][0]\n bb.append(nxt)\n # update displacement\n displ += xynb[angles == max(angles)][0] - xynxt\n\n ###############\n # Check bond\n if viewmethod:\n # Check individually\n # ax1 = plt.gca()\n # ax1.plot(xy[:,0],xy[:,1],'k.')\n if first_check:\n for i in range(len(xy)):\n ax1.text(xy[i, 0] + 0.2, xy[i, 1], str(i))\n\n plt.annotate(\"\", xy=(xy[bb[dmyi], 0], xy[bb[dmyi], 1]), xycoords='data',\n xytext=(xy[nxt, 0], xy[nxt, 1]), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",\n color=\"r\",\n shrinkA=5, shrinkB=5,\n patchA=None,\n patchB=None,\n connectionstyle=\"arc3,rad=0.2\", ), )\n\n ###############\n\n # Now mark the current bond as used\n # thisbond = [bb[dmyi-1], bb[dmyi]]\n # Get index of used matching thisbond\n mark_used = np.where((np.logical_or(BL == bb[dmyi - 1], BL == bb[dmyi])).all(axis=1))[0]\n # mark_used = np.where((BL == thisbond).all(axis=1))\n # print 'mark_used = ', mark_used\n # I adjusted the line below to allow multiple entries in mark_used (2018-04-26)'\n if not (used[mark_used, 0]).all():\n # print 'marking bond [', thisbond, '] as used'\n marking, kk = True, 0\n while marking:\n if not used[mark_used[kk], 0]:\n used[mark_used[kk], 0] = True\n marking = False\n kk += 1\n else:\n # Get index of used matching reversed thisbond (this list boolean is directional)\n # mark_used = np.where((BL == thisbond[::-1]).all(axis=1))\n # Used this bond in reverse order\n marking, kk = True, 0\n while marking:\n print 'mark_used = ', mark_used\n print 'mark_used[kk] = ', mark_used[kk]\n print 'used[mark_used[kk]] = ', used[mark_used[kk]]\n print '--------------------------'\n if not used[mark_used[kk], 1]:\n used[mark_used[kk], 1] = True\n marking = False\n # except IndexError:\n # print 'mark_used = ', mark_used\n # print 'used[mark_used] = ', used[mark_used[kk]]\n # print 'marking bond ', BL[mark_used[kk]]\n # print 'kk = ', kk\n # print 'bb = ', bb\n # print 'Encountered index error in marking bond used'\n # plt.show()\n # sys.exit()\n kk += 1\n if kk == len(mark_used):\n marking = False\n\n # print 'used = ', used\n dmyi += 1\n if check:\n print 'bb = ', bb\n\n polygons.append(bb)\n ###############\n # Check new polygon\n if viewmethod:\n if first_check:\n ax1.plot(xy[:, 0], xy[:, 1], 'k.')\n for i in range(len(xy)):\n ax1.text(xy[i, 0] + 0.2, xy[i, 1], str(i))\n\n for dmyi in range(len(bb)):\n nxt = bb[np.mod(dmyi + 1, len(bb))]\n ax1.annotate(\"\", xy=(xy[bb[dmyi], 0], xy[bb[dmyi], 1]), xycoords='data',\n xytext=(xy[nxt, 0], xy[nxt, 1]), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",\n color=\"r\",\n shrinkA=5, shrinkB=5,\n patchA=None,\n patchB=None,\n connectionstyle=\"arc3,rad=0.2\", ), )\n ax2.cla()\n ax2.imshow(used, aspect=1. / len(used), interpolation='none')\n print 'polygons = ', polygons\n # plt.show()\n plt.pause(0.00001)\n ###############\n\n else:\n # Check for remaining bonds unused in reverse order (B-->A)\n # print 'CHECKING REVERSE (B-->A): '\n todoBA = np.where(~used[:, 1])[0]\n # print 'len(todoBA) = ', len(todoBA)\n if len(todoBA) > 0:\n bond = BL[todoBA[0]]\n\n ###############\n # # check\n if viewmethod:\n plt.annotate(\"\", xy=(xy[bb[dmyi], 0], xy[bb[dmyi], 1]), xycoords='data',\n xytext=(xy[nxt, 0], xy[nxt, 1]), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",\n color=\"b\",\n shrinkA=5, shrinkB=5,\n patchA=None,\n patchB=None,\n connectionstyle=\"arc3,rad=0.6\", ), )\n # ###############\n\n # bb will be list of polygon indices\n # Start with orientation going from bond[0] to bond[1]\n nxt = bond[0]\n bb = [bond[1], nxt]\n dmyi = 1\n\n # define 'previous angle' as backwards of current angle -- ie angle(prev-current_pos)\n # Must include effect of PV on this angle -- do in ref frame of nxt particle\n PVind = np.argwhere(NL[nxt] == bond[1])[0][0]\n addx = PVx[nxt, PVind]\n addy = PVy[nxt, PVind]\n xyb0 = xy[bond[1], :] + np.array([addx, addy])\n prev_angle = np.arctan2(xyb0[1] - xy[nxt, 1], xyb0[0] - xy[nxt, 0]) # .ravel()\n\n # as long as we haven't completed the full outer polygon, add nextIND\n # define the displacment from the starting point that we have moved so far\n displ = xy[nxt] - xyb0\n\n # as long as we haven't completed the full outer polygon, add next index\n while nxt != bond[1] or abs(displ[0] ** 2 + displ[1] ** 2) > eps:\n n_tmp = NL[nxt, np.argwhere(KL[nxt]).ravel()]\n # Exclude previous boundary particle from the neighbors array, unless its the only one\n # (It cannot be the only one, if we removed dangling bonds)\n if len(n_tmp) == 1:\n '''The bond is a lone bond, not part of a triangle.'''\n neighbors = n_tmp\n else:\n neighbors = np.delete(n_tmp, np.where(n_tmp == bb[dmyi - 1])[0])\n # Add neighbors back in if this bond is not dangling but we have a NNN structure of 0-1-0\n if len(neighbors) == 0:\n neighbors = n_tmp\n\n ########\n # check if neighbors CAN be connected across periodic bc-- ie if particle is\n # on the finite boundary (finbd)\n if nxt in finbd:\n # Since on finite system boundary, particle could have periodic bonds\n # Find x values to add to neighbors, by first getting indices of row of PV\n # (same as of NL) matching neighbors\n # ALL CALCS in frame of reference of NXT particle\n # PVinds = [np.argwhere(NL[nxt] == nnn)[0][0] for nnn in neighbors]\n PVinds = []\n for nnn in dh.unique_nosort(neighbors):\n okinds = np.ravel(np.argwhere(np.logical_and(NL[nxt] == nnn, np.abs(KL[nxt]) > eps)))\n for okind in okinds:\n PVinds.append(okind)\n\n addx = PVx[nxt, PVinds]\n addy = PVy[nxt, PVinds]\n\n xynb = xy[neighbors, :] + np.dstack([addx, addy])[0]\n xynxt = xy[nxt, :]\n # print '\\n'\n # print 'nxt = ', nxt\n # print 'neighbors = ', neighbors\n # print 'xy[neighbors,:] =', xy[neighbors,:]\n # print 'addxy = ', np.dstack([addx, addy])[0]\n # print 'xynb = ', xynb\n # print 'xynxt = ', xynxt\n current_angles = np.arctan2(xynb[:, 1] - xynxt[1], xynb[:, 0] - xynxt[0]).ravel()\n angles = np.mod(current_angles - prev_angle, 2 * np.pi)\n selectIND = np.where(angles == max(angles))[0][0]\n # print 'selectIND = ', selectIND\n # print 'current_angles = ', current_angles/np.pi\n # print 'prev_angle = ', prev_angle/np.pi\n # print 'angles = ', angles/np.pi\n\n # redefine previous angle as backwards of current angle -- ie angle(nxt - neighbor )\n prev_angletmp = np.arctan2(xynxt[1] - xynb[:, 1], xynxt[0] - xynb[:, 0]).ravel()\n prev_angle = prev_angletmp[selectIND]\n\n # print 'new prev_angle = ', prev_angle/np.pi\n # print 'NL[nxt] = ', NL[nxt]\n # print 'bb = ', bb\n # # CHECK\n # ax1 = plt.gca()\n # ax1.plot(xy[:,0],xy[:,1],'k.')\n # for i in range(len(xy)):\n # ax1.text(xy[i,0]+0.2,xy[i,1],str(i))\n # plt.arrow(xynxt[0], xynxt[1], np.cos(angles[selectIND]),\n # np.sin(angles[selectIND]),fc='r', ec='r')\n # plt.arrow(xynb[selectIND,0], xynb[selectIND,1],\n # np.cos(prev_angle), np.sin(prev_angle),fc='b', ec='b')\n # plt.show()\n\n else:\n current_angles = np.arctan2(xy[neighbors, 1] - xy[nxt, 1],\n xy[neighbors, 0] - xy[nxt, 0]).ravel()\n angles = np.mod(current_angles - prev_angle, 2 * np.pi)\n # redefine previous angle as backwards of current angle -- ie angle(prev-current_pos)\n xynxt = xy[nxt, :]\n xynb = xy[neighbors, :]\n prev_angletmp = np.arctan2(xynxt[1] - xynb[:, 1], xynxt[0] - xynb[:, 0]).ravel()\n selectIND = np.where(angles == max(angles))[0][0]\n # print '\\n'\n # print 'nxt = ', nxt\n # print 'bb = ', bb\n # print 'neighbors = ', neighbors\n # print 'current_angles = ', current_angles/np.pi\n # print 'prev_angle = ', prev_angle/np.pi\n # print 'angles = ', angles/np.pi\n # print 'selectIND = ', selectIND\n # print('xynxt[1] - xynb[:,1], xynxt[0] - xynb[:,0] = ', xynxt[1] - xynb[:,1],\n # xynxt[0] - xynb[:,0])\n # print('np.arctan2(xynxt[1] - xynb[:,1], xynxt[0] - xynb[:,0]) = ',\n # np.arctan2(xynxt[1] - xynb[:,1], xynxt[0] - xynb[:,0]))\n # print 'prev_angletmp = ', prev_angletmp/np.pi\n\n prev_angle = prev_angletmp[selectIND]\n # print 'new prev_angle = ', prev_angle/np.pi\n\n ###############\n nxt = neighbors[angles == max(angles)][0]\n bb.append(nxt)\n # update displacement of particle at nxt from first site (keeping track of periodic bonds)\n displ += xynb[angles == max(angles)][0] - xynxt\n\n ###############\n # Check\n if viewmethod:\n # If checking individual bonds\n # ax1 = plt.gca()\n # ax1.plot(xy[:,0],xy[:,1],'k.')\n # for i in range(len(xy)):\n # ax1.text(xy[i,0]+0.2,xy[i,1],str(i))\n\n plt.annotate(\"\", xy=(xy[bb[dmyi], 0], xy[bb[dmyi], 1]), xycoords='data',\n xytext=(xy[nxt, 0], xy[nxt, 1]), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",\n color=\"b\",\n shrinkA=5, shrinkB=5,\n patchA=None,\n patchB=None,\n connectionstyle=\"arc3,rad=0.6\",\n ), )\n # plt.show()\n ###############\n\n # Now mark the current bond as used --> note the inversion of the bond order to match BL\n thisbond = [bb[dmyi], bb[dmyi - 1]]\n # Get index of used matching [bb[dmyi-1],nxt]\n mark_used = np.where((BL == thisbond).all(axis=1))\n if len(mark_used) > 0:\n used[mark_used, 1] = True\n else:\n messg = 'Cannot mark polygon bond as used: this bond was already used in its attempted' + \\\n ' orientation. (All bonds in first column should already be marked as used.)'\n raise RuntimeError(messg)\n\n dmyi += 1\n\n polygons.append(bb)\n # print 'added polygon = ', bb\n\n # Check new polygon\n if viewmethod:\n if first_check:\n ax1.plot(xy[:, 0], xy[:, 1], 'k.')\n for i in range(len(xy)):\n ax1.text(xy[i, 0] + 0.2, xy[i, 1], str(i))\n\n for dmyi in range(len(bb)):\n nxt = bb[np.mod(dmyi + 1, len(bb))]\n ax1.annotate(\"\", xy=(xy[bb[dmyi], 0], xy[bb[dmyi], 1]), xycoords='data',\n xytext=(xy[nxt, 0], xy[nxt, 1]), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",\n color=\"b\",\n shrinkA=5, shrinkB=5,\n patchA=None,\n patchB=None,\n connectionstyle=\"arc3,rad=0.6\", ), )\n ax2.cla()\n ax2.imshow(used)\n # plt.show()\n plt.pause(0.0001)\n ###############\n\n else:\n # All bonds have been accounted for\n print 'all finished with finding polygons...'\n finished = True\n # check\n if viewmethod:\n plt.show()\n\n # Check for duplicates (up to cyclic permutations and inversions) in polygons\n # Note that we need to ignore the last element of each polygon (which is also starting pt)\n keep = np.ones(len(polygons), dtype=bool)\n for ii in range(len(polygons)):\n print 'ii = ', ii\n polyg = polygons[ii]\n for p2 in polygons[ii + 1:]:\n if is_cyclic_permutation(polyg[:-1], p2[:-1]):\n keep[ii] = False\n\n polygons = [polygons[i] for i in np.where(keep)[0]]\n\n # Remove duplicates via inversion (maybe not necessary?)\n\n # Remove the polygon which is the entire lattice boundary, except dangling bonds\n if not periB.any():\n print 'le.extract_polygons_lattice: Removing entire lattice boundary from list of polygons...'\n boundary = extract_boundary(xy, NL, KL, BL)\n # print 'boundary = ', boundary\n keep = np.ones(len(polygons), dtype=bool)\n for ii in range(len(polygons)):\n polyg = polygons[ii]\n if is_cyclic_permutation(polyg[:-1], boundary.tolist()):\n keep[ii] = False\n elif is_cyclic_permutation(polyg[:-1], boundary[::-1].tolist()):\n keep[ii] = False\n\n polygons = [polygons[i] for i in np.where(keep)[0]]\n\n # Check order of each polygon so that it is oriented counterclockwise\n # for polys in polygons:\n # angle_poly = 0\n # # Make sure that oriented counterclockwise\n # print 'polys = ', polys\n # for i in range(len(polys)):\n # p0 = polys[ np.mod(i-1, len(polys)-1)]\n # p1 = polys[i]\n # p2 = polys[ np.mod(i+1,len(polys)-1) ]\n # print 'p0,p1,p2 = ', p0, p1, p2\n # angle_tmp = np.mod(np.arctan2(xy[p2,1]-xy[p1,1], xy[p2,0]-xy[p1,0]) - np.arctan2( xy[p1,1]-xy[p0,1],\n # xy[p1,0]-xy[p0,0] ), 2*np.pi)\n # print 'angle_tmp = ', angle_tmp\n # angle_poly += angle_tmp\n #\n # print 'angle = ', angle_poly/6.\n print 'le: polygons = ', polygons\n if check:\n polygons2PPC(xy, polygons, BL=BL, PVxydict=PVxydict, check=True)\n\n return polygons",
"def locate_droplets(\n phase_field: ScalarField,\n threshold: Union[float, str] = 0.5,\n modes: int = 0,\n minimal_radius: float = 0,\n refine: bool = False,\n interface_width: Optional[float] = None,\n) -> Emulsion:\n assert isinstance(phase_field, ScalarField)\n dim = phase_field.grid.dim # dimensionality of the space\n\n if modes > 0 and dim not in [2, 3]:\n raise ValueError(\"Perturbed droplets only supported for 2d and 3d\")\n\n # determine actual threshold\n if threshold == \"auto\":\n threshold = float(phase_field.data.min() + phase_field.data.max()) / 2\n else:\n threshold = float(threshold)\n\n # locate droplets in thresholded image\n img_binary = phase_field.data > threshold\n candidates = locate_droplets_in_mask(phase_field.grid, img_binary)\n\n if minimal_radius > -np.inf:\n candidates.remove_small(minimal_radius)\n\n droplets = []\n for droplet in candidates:\n # check whether we need to add the interface width\n droplet_class = droplet.__class__\n args: Dict[str, NumberOrArray] = {}\n\n # change droplet class when interface width is given\n if interface_width is not None:\n droplet_class = DiffuseDroplet\n args[\"interface_width\"] = interface_width\n\n # change droplet class when perturbed droplets are requested\n if modes > 0:\n if dim == 2:\n droplet_class = PerturbedDroplet2D\n elif dim == 3:\n droplet_class = PerturbedDroplet3D\n else:\n raise NotImplementedError(f\"Dimension {dim} is not supported\")\n args[\"amplitudes\"] = np.zeros(modes)\n\n # recreate a droplet of the correct class\n if droplet_class != droplet.__class__:\n droplet = droplet_class.from_droplet(droplet, **args)\n\n # refine droplets if necessary\n if refine:\n try:\n droplet = refine_droplet(phase_field, droplet)\n except ValueError:\n continue # do not add the droplet to the list\n droplets.append(droplet)\n\n # return droplets as an emulsion\n emulsion = Emulsion(droplets, grid=phase_field.grid)\n if minimal_radius > -np.inf:\n emulsion.remove_small(minimal_radius)\n return emulsion",
"def cut_bonds_z_random(xy, NL, KL, BL, target_z, min_coord=2, bulk_determination='Triangulation', check=False):\n print ' Cutting bonds z...'\n NP = len(xy)\n NN = np.shape(NL)[1]\n\n # Identify boundary pts, bulk pts\n print ' cut_bonds_z : extract boundary...'\n boundary = extract_boundary(xy, NL, KL, BL)\n # print 'boundary = ', boundary\n bulk = np.setdiff1d(np.arange(NP), boundary)\n NP_bulk = len(bulk)\n NP_bound = len(np.unique(boundary))\n print 'NP_bound = ', NP_bound\n print 'NP_bulk = ', NP_bulk\n\n if bulk_determination == 'Triangulation':\n # Form indices of BL in bulk. Bulk bonds appear in two simplices.\n # CHANGE THIS TO TEST IF BOND TWO SIMPLICES\n TRI = BL2TRI(BL, xy)\n Binds_list = []\n for ii in range(len(BL)):\n row = BL[ii]\n # get rows of TRI where each elem of row lives\n is_a = np.where(TRI == row[0])[0]\n is_b = np.where(TRI == row[1])[0]\n # The intersection of those rows gives where both live\n simplices = np.intersect1d(is_a, is_b)\n # print 'simplices = ', simplices\n # print 'np.size(simplices) = ', np.size(simplices)\n # If more than one simplex, bulk bond\n if np.size(simplices) < 2:\n # add to boundary list\n Binds_list.append(ii)\n # print ' --> Binds = ', Binds_list\n\n Binds = np.array(Binds_list).ravel()\n # Get the BL indices of bulk bonds --> (binds)\n binds = np.setdiff1d(np.arange(len(BL)), Binds)\n\n elif bulk_determination == 'Endpts':\n # Define bulk bonds as connecting at least one bulk particle\n is_a = np.in1d(BL[:, 0], bulk)\n is_b = np.in1d(BL[:, 1], bulk)\n binds = np.where(np.logical_or(is_a, is_b))[0]\n Binds = np.setdiff1d(np.arange(len(BL)), binds)\n else:\n raise RuntimeError('ERROR: argument <bulk_determination> did not match known method!')\n\n # print 'binds = ', binds\n # print 'Binds = ', Binds\n print 'len(binds) = ', len(binds)\n print 'len(Binds) = ', len(Binds)\n\n # Check\n if check:\n # plt.triplot(xy[:,0], xy[:,1], TRI, 'bo-')\n for bii in binds:\n XX = xy[BL[bii], 0]\n YY = xy[BL[bii], 1]\n plt.plot(XX, YY, 'b-')\n for Bii in Binds:\n XX = xy[BL[Bii], 0]\n YY = xy[BL[Bii], 1]\n plt.plot(XX, YY, 'r-')\n # for i in range(len(xy)):\n # plt.text(xy[i,0]+0.2,xy[i,1],str(i))\n plt.gca().set_aspect('equal')\n plt.show()\n\n # Compute the starting z in the bulk\n countKL = [KL[jj] for jj in bulk]\n # print 'found = ', np.count_nonzero(countKL), ' connections for ', NP_bulk, ' bulk particles...'\n z_start = float(np.count_nonzero(countKL)) / float(NP_bulk)\n print 'z_start = ', z_start\n print 'target_z = ', target_z\n\n # number of bonds to cut in the bulk\n # Be sure to divide the number of bonds by 2, since each bond double counts\n nbulk2cut = int(max([0, round((z_start - target_z) * 0.5 * float(NP_bulk))]))\n print 'nbulk2cut = ', nbulk2cut\n # number of bonds to cut in the boundary = nbulk2cut * (# boundary bonds)/(#bulk bonds)\n nB2cut = int(round(nbulk2cut * float(len(Binds)) / float(len(binds))))\n print 'nB2cut = ', nB2cut\n\n # CUT RANDOM BONDS\n\n ############################################\n ## DO BOUNDARY FIRST --> to avoid dangling particles\n # Choose nB2cut randomly from bulk\n # Shuffle bulk in-place\n np.random.shuffle(Binds)\n # Now work slowly towards selecting nbulk2cut: of the bonds,\n # but ensure that never leave a particle dangling without bonds\n done_cutting = False\n dmyi = 0\n # Set up mask for BL\n mask = np.ones(len(BL), dtype=bool)\n\n #################################\n # # Check :\n # plt.figure()\n # plt.gca().set_aspect('equal')\n # for ii in range(len(BL)):\n # XX = xy[BL[ii],0]\n # YY = xy[BL[ii],1]\n # plt.plot(XX, YY, 'b-')\n # plt.text(np.mean(XX), np.mean(YY), str(ii))\n # plt.show()\n #################################\n\n while not done_cutting:\n if len(np.where(mask == False)[0]) == nB2cut:\n done_cutting = True\n else:\n if np.mod(dmyi, 200) == 1:\n print 'cutting boundary bond: pass ', dmyi, ' (need to cut', nB2cut, ')'\n # consider adding dmyi element of bind to cut (make a test list)\n test = copy.deepcopy(mask)\n test[Binds[dmyi]] = False\n BLtmp = BL[test]\n # Check that BL leads to no dangling particles\n KLtmp = BL2KL(BLtmp, NL)\n # if all the rows in KLtmp have at least one nonzero bond, add dmyi to cut\n # print 'KLtmp.any(axis=1) = ', KLtmp.any(axis=1)\n if (np.where(~KLtmp.any(axis=1))[0]).size > 0:\n dmyi += 1\n else:\n mask[Binds[dmyi]] = False\n dmyi += 1\n\n ############################################\n # Choose nbulk2cut randomly from bulk\n # Shuffle bulk in-place\n np.random.shuffle(binds)\n # print 'binds = ', binds\n # Now work slowly towards selecting nbulk2cut: of the bonds,\n # but ensure that never leave a particle dangling without bonds\n done_cutting = False\n dmyi = 0\n while not done_cutting:\n if len(np.where(mask == False)[0]) == nB2cut + nbulk2cut:\n done_cutting = True\n else:\n if np.mod(dmyi, 200) == 1:\n print 'cutting bulk bond: pass ', dmyi, ' (need to cut', nbulk2cut, ')'\n # consider adding dmyi element of bind to cut (make a test list)\n test = copy.deepcopy(mask)\n test[binds[dmyi]] = False\n BLtmp = BL[test]\n # Check that BL leads to no dangling particles\n KLtmp = BL2KL(BLtmp, NL)\n # print 'KL = ', KLtmp\n # print 'np.where(~KLtmp.any(axis=1))[0] = ', np.where(~KLtmp.any(axis=1))[0]\n # if all the rows in KLtmp have at least one nonzero bond, add dmyi to cut\n if (np.where(~KLtmp.any(axis=1))[0]).size > min_coord - 1:\n dmyi += 1\n else:\n mask[binds[dmyi]] = False\n dmyi += 1\n\n # drop the nbulk2cut + nB2cut rows from total Bond List\n BL = BL[mask]\n # print 'BLout = ', BLout\n NL, KL = BL2NLandKL(BL, NN=NN)\n if check:\n display_lattice_2D(xy, BL)\n\n print '\\nReturning lattice with ', len(BL), ' bonds for ', NP, ' particles...'\n print 'KL[bulk] = ', KL[bulk]\n\n return NL, KL, BL",
"def _triangulate_periodic(self,x):\n\n #1. Tile cell positions 9-fold to perform the periodic triangulation\n # Calculates y from x. y is (9nc x 2) matrix, where the first (nc x 2) are the \"true\" cell positions,\n # and the rest are translations\n y = make_y(x,self.L*self.grid_xy)\n\n\n #2. Perform the triangulation on y\n # The **triangle** package (tr) returns a dictionary, containing the triangulation.\n # This triangulation is extracted and saved as tri\n t = tr.triangulate({\"vertices\": y})\n tri = t[\"triangles\"]\n\n # Del = Delaunay(y)\n # tri = Del.simplices\n n_c = x.shape[0]\n\n #3. Find triangles with **at least one** cell within the \"true\" frame (i.e. with **at least one** \"normal cell\")\n # (Ignore entries with -1, a quirk of the **triangle** package, which denotes boundary triangles\n # Generate a mask -- one_in -- that considers such triangles\n # Save the new triangulation by applying the mask -- new_tri\n tri = tri[(tri != -1).all(axis=1)]\n one_in = (tri<n_c).any(axis=1)\n new_tri = tri[one_in]\n\n #4. Remove repeats in new_tri\n # new_tri contains repeats of the same cells, i.e. in cases where triangles straddle a boundary\n # Use remove_repeats function to remove these. Repeats are flagged up as entries with the same trio of\n # cell ids, which are transformed by the mod function to account for periodicity. See function for more details\n n_tri = self.remove_repeats(new_tri,n_c)\n\n # tri_same = (self.tris == n_tri).all()\n\n #6. Store outputs\n self.n_v = n_tri.shape[0]\n self.tris = n_tri\n self.Cents = x[self.tris]\n self.vs = self.get_vertex_periodic()\n\n #7. Manually calculate the neighbours. See doc_string for conventions.\n n_neigh = get_neighbours(n_tri)\n self.v_neighbours = n_neigh\n self.neighbours = self.vs[n_neigh]",
"def buffered_pts_to_periodicstrip(xy, BL, LL, BBox='auto', check=False):\n if BBox == 'auto':\n # Assuming that BBox is centered and has width, height of LL[0], LL[1]\n BBox = 0.5 * np.array([[-LL[0], -LL[1]], [LL[0], -LL[1]], [LL[0], LL[1]], [-LL[0], LL[1]]])\n keep = np.where(np.logical_and(abs(xy[:, 0]) < LL[0] * 0.5, abs(xy[:, 1]) < LL[1] * 0.5))[0]\n else:\n bpath = mplpath.Path(BBox)\n keep = np.where(bpath.contains_points(xy))[0]\n if check:\n print 'checking that keep is not a logical ==> '\n print ' this would be bool keep = ', bpath.contains_points(xy)\n print ' and this is keep = ', keep\n\n minX = np.min(BBox[:, 0])\n maxX = np.max(BBox[:, 0])\n minY = np.min(BBox[:, 1])\n maxY = np.max(BBox[:, 1])\n PVdict = {'e': np.array([LL[0], 0.0]),\n 'n': np.array([0.0, LL[1]]),\n 'w': np.array([-LL[0], 0.0]),\n 's': np.array([0.0, -LL[1]]),\n 'ne': np.array([LL[0], LL[1]]),\n 'nw': np.array([-LL[0], LL[1]]),\n 'sw': np.array([-LL[0], -LL[1]]),\n 'se': np.array([LL[0], -LL[1]])}\n\n # Create a kd tree of the points\n tree = scipy.spatial.KDTree(xy)\n\n # Find bonds that will be cut. For each bond, match to other particle and add pair to BL and PVxydict\n BLcut, cutIND = find_cut_bonds(BL, keep)\n\n if check:\n plt.scatter(xy[:, 0], xy[:, 1], c='g', marker='x')\n plt.scatter(xy[keep, 0], xy[keep, 1], c='b', marker='o')\n highlight_bonds(xy, BL, ax=plt.gca(), color='b', show=False)\n highlight_bonds(xy, BLcut, ax=plt.gca(), color='r', lw=5, alpha=0.4, show=False)\n xxtmp = np.hstack((BBox[:, 0], np.array(BBox[:, 0])))\n print 'xxtmp = ', xxtmp\n yytmp = np.hstack((BBox[:, 1], np.array(BBox[:, 1])))\n print 'yytmp = ', yytmp\n plt.plot(xxtmp, yytmp, 'k-', lw=1)\n plt.title('Showing bonds that are cut, btwn original and mirrored network')\n plt.show()\n\n # preallocate BL2add and PVs\n BL2add = np.zeros((len(BLcut), 2), dtype=int)\n PVd = {} # = np.zeros((len(BLcut),2), dtype=float)\n kk = 0\n for bond in BLcut:\n # which endpt is outside?\n ptA = bond[0]\n ptB = bond[1]\n # mpt is short for 'mirror point', the point outside the bounding box\n if ptA not in keep:\n mpt, kpt = ptA, ptB\n else:\n mpt, kpt = ptB, ptA\n\n # Assume that the bond should remain broken unless the PV is 'e' or 'w' (east or west)\n ok_stripbc = False\n if xy[mpt, 0] < minX:\n if xy[mpt, 1] < minY:\n # Mirror particle is SW\n PV = PVdict['sw']\n elif xy[mpt, 1] > maxY:\n # Mirror particle is NW\n PV = PVdict['nw']\n else:\n # Mirror particle is West\n PV = PVdict['w']\n ok_stripbc = True\n elif xy[mpt, 0] > maxX:\n if xy[mpt, 1] < minY:\n # Mirror particle is SE\n PV = PVdict['se']\n elif xy[mpt, 1] > maxY:\n # Mirror particle is NE\n PV = PVdict['ne']\n else:\n # Mirror particle is East\n PV = PVdict['e']\n ok_stripbc = True\n elif xy[mpt, 1] < minY:\n # Mirror particle is South\n PV = PVdict['s']\n else:\n # Mirror particle is North\n PV = PVdict['n']\n\n if ok_stripbc:\n # Get index of the particle that resides a vector -PV away from mirror particle\n dist, ind = tree.query(xy[mpt] - PV)\n if (kpt, ind) not in PVd and (ind, kpt) not in PVd:\n BL2add[kk] = np.array([-kpt, -ind])\n PVd[(kpt, ind)] = PV\n print 'adding (kpt, ind) = ', (kpt, ind)\n kk += 1\n\n BL2add = BL2add[0:kk]\n\n if check:\n print 'PVd = ', PVd\n display_lattice_2D(xy, np.abs(BL), title=\"showing extended lattice (w/o strip PBCs)\")\n\n # Crop network, and add back cut bonds as periodic ones\n BL = np.vstack((BL, BL2add))\n xytrim, NL, KL, BLtrim, PVxydict = remove_pts(keep, xy, BL)\n # Adjusting BL2add to account for smaller #npts (post-cropping) is already done in remove_pts\n # Adjust PVs to account for smaller #npts (post-cropping)\n remove = np.setdiff1d(np.arange(len(xy)), keep)\n PVxydict = {}\n for key in PVd:\n # adjust key to lower indices\n # count how many pts in remove are lower than key[0] and key[1], respectively\n lower0 = np.sum(remove < key[0])\n lower1 = np.sum(remove < key[1])\n newkey = (key[0] - lower0, key[1] - lower1)\n PVxydict[newkey] = PVd[key]\n\n if check:\n # Plot lattice without PBCs\n display_lattice_2D(xytrim, np.abs(BLtrim), title=\"showing lattice connectivity w/o strip PBCs\")\n display_lattice_2D(xytrim, BLtrim, PVxydict=PVxydict, title=\"showing lattice connectivity with strip PBCs\")\n\n return xytrim, NL, KL, BLtrim, PVxydict",
"def extract_boundary(xy, NL, KL, BL, check=False):\n # Clear periodic bonds from KL\n pbonds = np.where(KL.ravel() < 0)[0]\n if len(pbonds) > 0:\n print 'le: Found periodic bonds in le.extract_boundary(), clearing...'\n KLr = KL.ravel()\n KLr[pbonds] = 0\n KL = KLr.reshape(np.shape(KL))\n print 'le: pbonds = ', pbonds\n\n # If there are dangling points, remove them for now and adjust indices later\n dangles = np.where(~KL.any(axis=1))[0]\n if len(dangles) > 0:\n print 'le: extract_boundary: Removing dangling points: dangles = ', dangles\n if check:\n plt.plot(xy[:, 0], xy[:, 1], 'b.')\n for ii in range(len(xy)):\n plt.text(xy[ii, 0] + 0.1, xy[ii, 1], str(ii))\n plt.plot(xy[dangles, 0], xy[dangles, 1], 'ro')\n plt.title('Original point indices, before removing dangles. Dangles circled in red.')\n plt.show()\n\n translate_at_end = True\n\n NP = len(xy)\n\n nondangles = np.setdiff1d(np.arange(NP), dangles)\n # Note that remove_pts can handle periodic BL\n\n if len(nondangles) == 0:\n print 'There are no particles that are not part of dangling bonds. All particles are part of the boundary.'\n return np.arange(len(xy))\n\n xy, NL, KL, BL, PVxydict = remove_pts(nondangles, xy, BL)\n\n # Remove bonds which were periodic.\n pbonds = np.where(KL.ravel() < 0)[0]\n print 'le: pbonds = ', pbonds\n if pbonds:\n print 'le: Found periodic bonds in extract_boundary(), clearing...'\n KLr = KL.ravel()\n KLr[pbonds] = 0\n KL = KLr.reshape(np.shape(KL))\n print 'le: pbonds = ', pbonds\n\n if check:\n print 'le: NL = ', NL\n display_lattice_2D(xy, BL, NL=NL, KL=KL, title='Removed points in extract_boundary()')\n\n # xy = xy[nondangles]\n # NL = NL[nondangles]\n # KL = KL[nondangles]\n\n # translation converts indices of long old xy to small new xy\n # backtrans converts indices of small, new xy to indices of long, old xy\n # .1 .0\n # .0 trans ----->\n # . 2 <----- backtrans .1\n # .3 .2\n translation = np.arange(NP, dtype=int)\n for IND in dangles:\n translation[IND:] -= 1\n # mark the removed point by -5\n translation[IND] = -5\n\n backtrans = np.where(translation > -1)[0]\n if check:\n print 'le: backtrans = ', backtrans\n print 'le: translation = ', translation\n\n # translation = np.where()\n\n else:\n translate_at_end = False\n\n # Initialize the list of boundary indices to be larger than necessary\n bb = np.zeros(2 * len(xy), dtype=int)\n\n # Start with the rightmost point, which is guaranteed to be\n # at the convex hull and thus also at the outer edge.\n # Then take the first step to be along the minimum angle bond\n rightIND = np.where(xy[:, 0] == max(xy[:, 0]))[0]\n # If there are more than one rightmost point, choose one\n if rightIND.size > 1:\n rightIND = rightIND[0]\n\n if check:\n print 'le.extract_boundary(): Found rightmost pt: ', rightIND\n print 'le.extract_boundary(): with neighbors: ', NL[rightIND]\n print 'le.extract_boundary(): with connectns: ', KL[rightIND]\n plt.plot(xy[:, 0], xy[:, 1], 'k.')\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'bo')\n for ii in range(len(xy)):\n plt.text(xy[ii, 0] + 0.1, xy[ii, 1], str(ii))\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'ro')\n plt.pause(0.01)\n\n # Grab the true neighbors of this starting point\n print 'le.extract_boundary(): NL[rightIND, :] = ', NL[rightIND, :]\n neighbors = NL[rightIND, np.argwhere(KL[rightIND].ravel()).ravel()]\n print 'le.extract_boundary(): neighbors = ', neighbors\n print 'le.extract_boundary(): rightIND = ', rightIND\n\n # Compute the angles of the neighbor bonds\n angles = np.mod(np.arctan2(xy[neighbors, 1] - xy[rightIND, 1], xy[neighbors, 0] - xy[rightIND, 0]).ravel(),\n 2 * np.pi)\n if check:\n print 'KL[rightIND] = ', KL[rightIND]\n print 'KL[rightIND,0] = ', KL[rightIND, 0]\n print 'KL[rightIND,0] ==0 ', KL[rightIND, 0] == 0\n print 'np.argwhere(KL[rightIND]) = ', np.argwhere(KL[rightIND])\n print 'np.argwhere(KL[rightIND].ravel())= ', np.argwhere(KL[rightIND].ravel())\n print 'neighbors = ', neighbors\n print 'angles = ', angles\n\n # Take the second particle to be the one with the lowest bond angle (will be >= pi/2)\n # print ' angles==min--> ', angles==min(angles)\n nextIND = neighbors[angles == min(angles)][0]\n bb[0] = rightIND\n\n dmyi = 1\n # as long as we haven't completed the full outer edge/boundary, add nextIND\n while nextIND != rightIND:\n # print '\\n nextIND = ', nextIND\n # print 'np.argwhere(KL[nextIND]) = ', np.argwhere(KL[nextIND]).ravel()\n bb[dmyi] = nextIND\n angles, neighbors = bond_angles_wrt_bond(bb[dmyi - 1], nextIND, xy, NL, KL)\n nextIND = neighbors[angles == min(angles)][0]\n # print 'nextIND = ', nextIND\n\n if check:\n # plt.plot(xy[:,0],xy[:,1],'k.')\n XY = np.vstack([xy[bb[dmyi], :], xy[nextIND, :]])\n plt.plot(XY[:, 0], XY[:, 1], 'r-')\n # for i in range(len(xy)):\n # plt.text(xy[i,0]+0.2,xy[i,1],str(i))\n plt.gca().set_aspect('equal')\n plt.pause(0.01)\n\n dmyi += 1\n\n # Truncate the list of boundary indices\n boundary = bb[0:dmyi]\n\n # Since some points were removed from the boundary identification, translate\n # indices back to indices of original xy\n if translate_at_end:\n print 'le.extract_boundary(): Translating boundary points back into original indices...'\n # print 'boundary = ', boundary\n # print 'translation = ', translation\n # print 'backtrans = ', backtrans\n boundary = backtrans[boundary]\n\n return boundary",
"def swath_from_cartesian_grid(cart_grid, lons, lats, data,\n radius_of_influence):\n\n valid_index = get_valid_index_from_cartesian_grid(cart_grid, lons, lats,\n radius_of_influence)\n\n lons = lons[valid_index]\n lats = lats[valid_index]\n data = data[valid_index]\n\n return lons, lats, data",
"def indices_and_currents_TSC_2D( charge_electron, positions_x, positions_y, velocity_x, velocity_y,\\\n x_grid, y_grid, ghost_cells, length_domain_x, length_domain_y, dt ):\n \n \n positions_x_new = positions_x + velocity_x * dt\n positions_y_new = positions_y + velocity_y * dt\n\n base_indices_x = af.data.constant(0, positions_x.elements(), dtype=af.Dtype.u32)\n base_indices_y = af.data.constant(0, positions_x.elements(), dtype=af.Dtype.u32)\n\n dx = af.sum(x_grid[1] - x_grid[0])\n dy = af.sum(y_grid[1] - y_grid[0])\n\n\n # Computing S0_x and S0_y\n ###########################################################################################\n \n # Determining the grid cells containing the respective particles\n \n x_zone = (((af.abs(positions_x - af.sum(x_grid[0])))/dx).as_type(af.Dtype.u32))\n y_zone = (((af.abs(positions_y - af.sum(y_grid[0])))/dy).as_type(af.Dtype.u32))\n\n \n # Determing the indices of the closest grid node in x direction\n\n temp = af.where(af.abs(positions_x-x_grid[x_zone]) < \\\n af.abs(positions_x-x_grid[x_zone + 1])\\\n )\n\n if(temp.elements()>0):\n base_indices_x[temp] = x_zone[temp]\n\n temp = af.where(af.abs(positions_x - x_grid[x_zone]) >= \\\n af.abs(positions_x-x_grid[x_zone + 1])\\\n )\n\n if(temp.elements()>0):\n base_indices_x[temp] = (x_zone[temp] + 1).as_type(af.Dtype.u32) \n\n\n # Determing the indices of the closest grid node in y direction\n\n temp = af.where(af.abs(positions_y-y_grid[y_zone]) < \\\n af.abs(positions_y-y_grid[y_zone + 1])\\\n )\n\n if(temp.elements()>0):\n base_indices_y[temp] = y_zone[temp]\n\n temp = af.where(af.abs(positions_y - y_grid[y_zone])>=af.abs(positions_y-x_grid[y_zone + 1]))\n\n if(temp.elements()>0):\n base_indices_y[temp] = (y_zone[temp] + 1).as_type(af.Dtype.u32) \n\n # Concatenating the index list for near by grid nodes in x direction\n # TSC affect 5 nearest grid nodes around in 1 Dimensions\n\n base_indices_minus_two = (base_indices_x - 2).as_type(af.Dtype.u32) \n base_indices_minus = (base_indices_x - 1).as_type(af.Dtype.u32) \n base_indices_plus = (base_indices_x + 1).as_type(af.Dtype.u32) \n base_indices_plus_two = (base_indices_x + 2).as_type(af.Dtype.u32) \n\n\n\n index_list_x = af.join( 1,\\\n af.join(1, base_indices_minus_two, base_indices_minus, base_indices_x),\\\n af.join(1, base_indices_plus, base_indices_plus_two),\\\n )\n\n\n\n # Concatenating the index list for near by grid nodes in y direction\n # TSC affect 5 nearest grid nodes around in 1 Dimensions\n \n base_indices_minus_two = (base_indices_y - 2).as_type(af.Dtype.u32) \n base_indices_minus = (base_indices_y - 1).as_type(af.Dtype.u32) \n base_indices_plus = (base_indices_y + 1).as_type(af.Dtype.u32) \n base_indices_plus_two = (base_indices_y + 2).as_type(af.Dtype.u32) \n\n\n index_list_y = af.join( 1,\\\n af.join(1, base_indices_minus_two, base_indices_minus, base_indices_y),\\\n af.join(1, base_indices_plus, base_indices_plus_two),\\\n )\n\n # Concatenating the positions_x for determining weights for near by grid nodes in y direction\n # TSC affect 5 nearest grid nodes around in 1 Dimensions\n\n positions_x_5x = af.join( 0,\\\n af.join(0, positions_x, positions_x, positions_x),\\\n af.join(0, positions_x, positions_x),\\\n )\n\n positions_y_5x = af.join( 0,\\\n af.join(0, positions_y, positions_y, positions_y),\\\n af.join(0, positions_y, positions_y),\\\n )\n\n\n\n\n # Determining S0 for positions at t = n * dt\n\n\n distance_nodes_x = x_grid[af.flat(index_list_x)]\n\n distance_nodes_y = y_grid[af.flat(index_list_y)]\n\n\n W_x = 0 * distance_nodes_x.copy()\n W_y = 0 * distance_nodes_y.copy()\n\n\n # Determining weights in x direction\n\n temp = af.where(af.abs(distance_nodes_x - positions_x_5x) < (0.5*dx) )\n\n if(temp.elements()>0):\n W_x[temp] = 0.75 - (af.abs(distance_nodes_x[temp] - positions_x_5x[temp])/dx)**2\n\n temp = af.where((af.abs(distance_nodes_x - positions_x_5x) >= (0.5*dx) )\\\n * (af.abs(distance_nodes_x - positions_x_5x) < (1.5 * dx) )\\\n )\n\n if(temp.elements()>0):\n W_x[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_x[temp] - positions_x_5x[temp])/dx))**2\n\n\n\n # Determining weights in y direction\n\n temp = af.where(af.abs(distance_nodes_y - positions_y_5x) < (0.5*dy) )\n\n if(temp.elements()>0):\n W_y[temp] = 0.75 - (af.abs(distance_nodes_y[temp] - positions_y_5x[temp])/dy)**2\n\n temp = af.where((af.abs(distance_nodes_y - positions_y_5x) >= (0.5*dy) )\\\n * (af.abs(distance_nodes_y - positions_y_5x) < (1.5 * dy) )\\\n )\n\n if(temp.elements()>0):\n W_y[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_y[temp] - positions_y_5x[temp])/dy))**2\n\n # Restructering W_x and W_y for visualization and ease of understanding\n\n W_x = af.data.moddims(W_x, positions_x.elements(), 5)\n W_y = af.data.moddims(W_y, positions_y.elements(), 5)\n\n # Tiling the S0_x and S0_y for the 25 indices around the particle\n \n S0_x = af.tile(W_x, 1, 1, 5)\n S0_y = af.tile(W_y, 1, 1, 5)\n\n\n S0_y = af.reorder(S0_y, 0, 2, 1)\n\n\n\n #Computing S1_x and S1_y\n ###########################################################################################\n\n positions_x_5x_new = af.join( 0,\\\n af.join(0, positions_x_new, positions_x_new, positions_x_new),\\\n af.join(0, positions_x_new, positions_x_new),\\\n )\n\n positions_y_5x_new = af.join( 0,\\\n af.join(0, positions_y_new, positions_y_new, positions_y_new),\\\n af.join(0, positions_y_new, positions_y_new),\\\n )\n\n\n\n\n # Determining S0 for positions at t = n * dt\n\n W_x = 0 * distance_nodes_x.copy()\n W_y = 0 * distance_nodes_y.copy()\n\n\n # Determining weights in x direction\n\n temp = af.where(af.abs(distance_nodes_x - positions_x_5x_new) < (0.5*dx) )\n\n if(temp.elements()>0):\n W_x[temp] = 0.75 - (af.abs(distance_nodes_x[temp] - positions_x_5x_new[temp])/dx)**2\n\n temp = af.where((af.abs(distance_nodes_x - positions_x_5x_new) >= (0.5*dx) )\\\n * (af.abs(distance_nodes_x - positions_x_5x_new) < (1.5 * dx) )\\\n )\n\n if(temp.elements()>0):\n W_x[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_x[temp] \\\n - positions_x_5x_new[temp])/dx\\\n )\\\n )**2\n\n\n\n # Determining weights in y direction\n\n temp = af.where(af.abs(distance_nodes_y - positions_y_5x_new) < (0.5*dy) )\n\n if(temp.elements()>0):\n W_y[temp] = 0.75 - (af.abs(distance_nodes_y[temp] \\\n - positions_y_5x_new[temp]\\\n )/dy\\\n )**2\n\n temp = af.where((af.abs(distance_nodes_y - positions_y_5x_new) >= (0.5*dy) )\\\n * (af.abs(distance_nodes_y - positions_y_5x_new) < (1.5 * dy) )\\\n )\n\n if(temp.elements()>0):\n W_y[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_y[temp] \\\n - positions_y_5x_new[temp])/dy\\\n )\\\n )**2\n\n # Restructering W_x and W_y for visualization and ease of understanding\n\n W_x = af.data.moddims(W_x, positions_x.elements(), 5)\n W_y = af.data.moddims(W_y, positions_x.elements(), 5)\n\n # Tiling the S0_x and S0_y for the 25 indices around the particle \n \n S1_x = af.tile(W_x, 1, 1, 5)\n S1_y = af.tile(W_y, 1, 1, 5)\n\n S1_y = af.reorder(S1_y, 0, 2, 1)\n\n\n ###########################################################################################\n\n # Determining the final weight matrix for currents in 3D matrix form factor\n\n\n W_x = (S1_x - S0_x) * (S0_y + (0.5 *(S1_y - S0_y)) )\n\n\n W_y = (S1_y - S0_y) * (S0_x + (0.5 *(S1_x - S0_x)) )\n\n\n ###########################################################################################\n\n\n # Assigning Jx and Jy according to Esirkepov's scheme\n\n Jx = af.data.constant(0, positions_x.elements(), 5, 5, dtype = af.Dtype.f64)\n Jy = af.data.constant(0, positions_x.elements(), 5, 5, dtype = af.Dtype.f64)\n\n\n Jx[:, 0, :] = -1 * charge_electron * (dx/dt) * W_x[:, 0, :].copy()\n Jx[:, 1, :] = Jx[:, 0, :] + -1 * charge_electron * (dx/dt) * W_x[:, 1, :].copy()\n Jx[:, 2, :] = Jx[:, 1, :] + -1 * charge_electron * (dx/dt) * W_x[:, 2, :].copy()\n Jx[:, 3, :] = Jx[:, 2, :] + -1 * charge_electron * (dx/dt) * W_x[:, 3, :].copy()\n Jx[:, 4, :] = Jx[:, 3, :] + -1 * charge_electron * (dx/dt) * W_x[:, 4, :].copy()\n \n # Computing current density using currents\n \n Jx = (1/(dx * dy)) * Jx\n\n\n Jy[:, :, 0] = -1 * charge_electron * (dy/dt) * W_y[:, :, 0].copy()\n Jy[:, :, 1] = Jy[:, :, 0] + -1 * charge_electron * (dy/dt) * W_y[:, :, 1].copy()\n Jy[:, :, 2] = Jy[:, :, 1] + -1 * charge_electron * (dy/dt) * W_y[:, :, 2].copy()\n Jy[:, :, 3] = Jy[:, :, 2] + -1 * charge_electron * (dy/dt) * W_y[:, :, 3].copy()\n Jy[:, :, 4] = Jy[:, :, 3] + -1 * charge_electron * (dy/dt) * W_y[:, :, 4].copy()\n \n # Computing current density using currents\n\n Jy = (1/(dx * dy)) * Jy\n\n # Preparing the final index and current vectors\n ###########################################################################################\n \n \n # Determining the x indices for charge deposition\n index_list_x_Jx = af.flat(af.tile(index_list_x, 1, 1, 5))\n\n # Determining the y indices for charge deposition\n y_current_zone = af.tile(index_list_y, 1, 1, 5)\n index_list_y_Jx = af.flat(af.reorder(y_current_zone, 0, 2, 1))\n\n\n currents_Jx = af.flat(Jx)\n\n # Determining the x indices for charge deposition\n index_list_x_Jy = af.flat(af.tile(index_list_x, 1, 1, 5))\n\n # Determining the y indices for charge deposition\n y_current_zone = af.tile(index_list_y, 1, 1, 5)\n index_list_y_Jy = af.flat(af.reorder(y_current_zone, 0, 2, 1))\n \n # Flattenning the Currents array\n currents_Jy = af.flat(Jy)\n\n af.eval(index_list_x_Jx, index_list_y_Jx)\n af.eval(index_list_x_Jy, index_list_y_Jy)\n af.eval(currents_Jx, currents_Jy)\n\n\n return index_list_x_Jx, index_list_y_Jx, currents_Jx,\\\n index_list_x_Jy, index_list_y_Jy, currents_Jy",
"def find_dirac_nodes():\n\n vasprun = Vasprun('vasprun.xml')\n dirac = False\n if vasprun.get_band_structure().get_band_gap()['energy'] < 0.1:\n efermi = vasprun.efermi\n bsp = BSPlotter(vasprun.get_band_structure('KPOINTS', line_mode=True,\n efermi=efermi))\n bands = []\n data = bsp.bs_plot_data(zero_to_efermi=True)\n for d in range(len(data['distances'])):\n for i in range(bsp._nb_bands):\n x = data['distances'][d],\n y = [data['energy'][d][str(Spin.up)][i][j]\n for j in range(len(data['distances'][d]))]\n band = [x, y]\n bands.append(band)\n\n considered = []\n for i in range(len(bands)):\n for j in range(len(bands)):\n if i != j and (j, i) not in considered:\n considered.append((j, i))\n for k in range(len(bands[i][0])):\n if ((-0.1 < bands[i][1][k] < 0.1) and\n (-0.1 < bands[i][1][k] - bands[j][1][k] < 0.1)):\n dirac = True\n return dirac",
"def find_loc_indices(loc, dir, tile):\n #returns the indices of the nearest neighbor point in the given tile, the lon/lat of the nearest neighbor, \n #and the distance (m) from the given point to the nearest neighbor grid cell\n \n filename_pattern = '*grid.tile{0}.nc'.format(tile)\n for f_name in os.listdir(dir):\n if fnmatch.fnmatch(f_name, filename_pattern):\n filename = f_name\n if not filename:\n message = 'No filenames matching the pattern {0} found in {1}'.format(filename_pattern,dir)\n logging.critical(message)\n raise Exception(message)\n \n nc_file = Dataset('{0}/{1}'.format(dir,filename))\n #read in supergrid longitude and latitude\n lon_super = np.array(nc_file['x']) #[lat,lon] or [y,x] #.swapaxes(0,1)\n lat_super = np.array(nc_file['y']) #[lat,lon] or [y,x] #.swapaxes(0,1)\n #get the longitude and latitude data for the grid centers by slicing the supergrid \n #and taking only odd-indexed values\n longitude = lon_super[1::2,1::2]\n latitude = lat_super[1::2,1::2]\n nc_file.close()\n \n adj_long = False \n #look for reversal of longitude; if found, adjust longitude so that 0-360 transition doesn't exist\n temp_loc = copy.deepcopy(loc)\n for row in longitude:\n if not (np.all(np.diff(row) >= 0) or np.all(np.diff(row) <= 0)):\n adj_long = True\n if adj_long:\n longitude[longitude < 180] += 360\n if loc[0] < 180:\n temp_loc[0] += 360\n \n #set up an array to hold the euclidean distance between the given point and every grid cell\n eucl_dist = np.zeros((longitude.shape[0],longitude.shape[1]))\n \n #get the Cartesian location of the given point\n cart_loc = np.array(sph2cart(math.radians(temp_loc[0]), math.radians(temp_loc[1]), earth_radius))\n \n for i in range(len(longitude)):\n for j in range(len(longitude[i])):\n #get the Cartesian location of all grid points\n cart_cell = np.array(sph2cart(math.radians(longitude[i,j]), math.radians(latitude[i,j]), earth_radius))\n \n #calculate the euclidean distance from the given point to the current grid cell\n eucl_dist[i,j] = np.linalg.norm(cart_loc - cart_cell)\n \n #get the indices of the grid point with the minimum euclidean distance to the given point\n i,j = np.unravel_index(eucl_dist.argmin(), eucl_dist.shape)\n \n return (i,j,longitude[i,j]%360.0, latitude[i,j], eucl_dist[i,j])",
"def droplet(r_drop=0.02): # [dm]\n alpha_pom = float(76.8)\n r_real = r_drop / np.sin(alpha_pom) # [dm]\n height = r_real * (1 - np.cos(alpha_pom)) # [dm]\n s_drop = np.pi * (4 * r_real * height - height ** 2) # [dm2]\n v_drop = np.pi * height ** 2 * (r_real - height / 3) # [dm3]\n s0 = np.pi * r_drop ** 2 # [dm2]\n return s_drop, v_drop, s0 # , h_max, s_max, v_max, s1",
"def BL2PVxydict(BL, xy, PV):\n # The ijth element of PVx is the xcomponent of the vector taking NL[i,j] to its image as seen by particle i.\n PVxydict = {}\n # check both directions along each periodic vector\n PVtmp = np.vstack((PV, -PV))\n\n # For each bond that is a periodic bond, determine its periodic boundary vector (a row of the array PV)\n pBs = np.unique(np.where(BL < 0)[0])\n print 'le: BL[pBs] = ', BL[pBs]\n print 'le: pBs = ', pBs\n for ind in pBs:\n # Find the PV (periodic vector) that brings the second particle (j) closest to the first (i).\n # This will be PVxydict[(i,j)], since particle i sees j at xy[j]+PVxydict[(i,j)]\n a1 = xy[np.abs(BL[ind, 0])]\n a2 = xy[np.abs(BL[ind, 1])]\n try:\n distxy = a2 + PVtmp - a1\n except ValueError:\n print 'a1 = ', a1\n print 'a2 = ', a2\n print 'PVtmp = ', PVtmp\n raise RuntimeError('dimensions do not match')\n dist = distxy[:, 0] ** 2 + distxy[:, 1] ** 2\n # print 'a1, a2 = ', a1, a2\n # print 'distxy = ', distxy\n # print 'PV = ', PV\n # print 'dist = ', dist\n if np.argmin(dist) > len(PV) - 1:\n PVxydict[(np.abs(BL[ind, 0]), np.abs(BL[ind, 1]))] = -PV[np.argmin(dist) % len(PV)]\n else:\n PVxydict[(np.abs(BL[ind, 0]), np.abs(BL[ind, 1]))] = PV[np.argmin(dist) % len(PV)]\n\n print 'le: PVxydict = ', PVxydict\n return PVxydict",
"def buffered_pts_to_periodic_network(xy, BL, LL, BBox=None, check=False):\n if BBox is None or isinstance(BBox, str):\n # Assuming that BBox is centered and has width, height of LL[0], LL[1]\n BBox = 0.5 * np.array([[-LL[0], -LL[1]], [LL[0], -LL[1]], [LL[0], LL[1]], [-LL[0], LL[1]]])\n keep = np.where(np.logical_and(abs(xy[:, 0]) < LL[0] * 0.5, abs(xy[:, 1]) < LL[1] * 0.5))[0]\n else:\n bpath = mplpath.Path(BBox)\n keep = np.where(bpath.contains_points(xy))[0]\n if check:\n print 'checking that keep is not a logical ==> '\n print ' this would be bool keep = ', bpath.contains_points(xy)\n print ' and this is keep = ', keep\n\n minX = np.min(BBox[:, 0])\n maxX = np.max(BBox[:, 0])\n minY = np.min(BBox[:, 1])\n maxY = np.max(BBox[:, 1])\n PVdict = {'e': np.array([LL[0], 0.0]),\n 'n': np.array([0.0, LL[1]]),\n 'w': np.array([-LL[0], 0.0]),\n 's': np.array([0.0, -LL[1]]),\n 'ne': np.array([LL[0], LL[1]]),\n 'nw': np.array([-LL[0], LL[1]]),\n 'sw': np.array([-LL[0], -LL[1]]),\n 'se': np.array([LL[0], -LL[1]])}\n\n # Create a kd tree of the points\n tree = scipy.spatial.KDTree(xy)\n\n # Find bonds that will be cut. For each bond, match to other particle and add pair to BL and PVxydict\n BLcut, cutIND = find_cut_bonds(BL, keep)\n\n if check:\n plt.scatter(xy[:, 0], xy[:, 1], c='g', marker='x')\n plt.scatter(xy[keep, 0], xy[keep, 1], c='b', marker='o')\n highlight_bonds(xy, BL, ax=plt.gca(), color='b', show=False)\n highlight_bonds(xy, BLcut, ax=plt.gca(), color='r', lw=1, show=False)\n xxtmp = np.hstack((BBox[:, 0], np.array(BBox[:, 0])))\n print 'xxtmp = ', xxtmp\n yytmp = np.hstack((BBox[:, 1], np.array(BBox[:, 1])))\n print 'yytmp = ', yytmp\n plt.plot(xxtmp, yytmp, 'k-', lw=2)\n plt.title('Showing bonds that are cut, btwn original and mirrored network')\n plt.show()\n\n # preallocate BL2add and PVs\n BL2add = np.zeros((len(BLcut), 2), dtype=int)\n PVd = {} # = np.zeros((len(BLcut),2), dtype=float)\n kk = 0\n for bond in BLcut:\n # which endpt is outside?\n ptA = bond[0]\n ptB = bond[1]\n # mpt is short for 'mirror point', the point outside the bounding box\n if ptA not in keep:\n mpt, kpt = ptA, ptB\n else:\n mpt, kpt = ptB, ptA\n if xy[mpt, 0] < minX:\n if xy[mpt, 1] < minY:\n # Mirror particle is SW\n PV = PVdict['sw']\n elif xy[mpt, 1] > maxY:\n # Mirror particle is NW\n PV = PVdict['nw']\n else:\n # Mirror particle is West\n PV = PVdict['w']\n elif xy[mpt, 0] > maxX:\n if xy[mpt, 1] < minY:\n # Mirror particle is SE\n PV = PVdict['se']\n elif xy[mpt, 1] > maxY:\n # Mirror particle is NE\n PV = PVdict['ne']\n else:\n # Mirror particle is East\n PV = PVdict['e']\n elif xy[mpt, 1] < minY:\n # Mirror particle is South\n PV = PVdict['s']\n else:\n # Mirror particle is North\n PV = PVdict['n']\n\n # Get index of the particle that resides a vector -PV away from mirror particle\n dist, ind = tree.query(xy[mpt] - PV)\n BL2add[kk] = np.array([-kpt, -ind])\n PVd[(kpt, ind)] = PV\n kk += 1\n\n if check:\n print 'PVd = ', PVd\n display_lattice_2D(xy, np.abs(BL), title=\"showing extended lattice (w/o PBCs)\")\n\n # Crop network, and add back cut bonds as periodic ones\n BL = np.vstack((BL, BL2add))\n xytrim, NL, KL, BLtrim, PVxydict = remove_pts(keep, xy, BL)\n # Adjusting BL2add to account for smaller #npts (post-cropping) is already done in remove_pts\n # Adjust PVs to account for smaller #npts (post-cropping)\n remove = np.setdiff1d(np.arange(len(xy)), keep)\n\n # PVxydict should be correct as is, from output of remove_pts...\n PVxydict_check = {}\n for key in PVd:\n # adjust key to lower indices\n # count how many pts in remove are lower than key[0] and key[1], respectively\n lower0 = np.sum(remove < key[0])\n lower1 = np.sum(remove < key[1])\n newkey = (key[0] - lower0, key[1] - lower1)\n PVxydict_check[newkey] = PVd[key]\n print 'PVxydict = ', PVxydict\n print 'PVxydict_check = ', PVxydict_check\n if PVxydict is None:\n PVxydict = PVxydict_check\n else:\n raise RuntimeError('Are these PVxydicts the same?')\n\n if check:\n # Plot lattice without PBCs\n display_lattice_2D(xytrim, np.abs(BLtrim), title=\"showing lattice connectivity w/o PBCs\")\n display_lattice_2D(xytrim, BLtrim, PVxydict=PVxydict, title=\"showing lattice connectivity with PBCs\")\n\n return xytrim, NL, KL, BLtrim, PVxydict",
"def buffer_points_for_rectangular_periodicBC(xy, LL, dist=7.0):\n # Copy some of lattice to north, south, east, west and corners\n print 'le: xy = ', xy\n print 'le: np.min(xy[:, 0]) = ', np.min(xy[:, 0])\n print 'np.sort(xy[:, 0]) = ', np.sort(xy[:, 0])\n west = np.where(xy[:, 0] < (np.nanmin(xy[:, 0]) + dist))[0]\n sout = np.where(xy[:, 1] < (np.nanmin(xy[:, 1]) + dist))[0]\n east = np.where(xy[:, 0] > (np.nanmax(xy[:, 0]) - dist))[0]\n nort = np.where(xy[:, 1] > (np.nanmax(xy[:, 1]) - dist))[0]\n swest = np.intersect1d(sout, west)\n seast = np.intersect1d(sout, east)\n neast = np.intersect1d(nort, east)\n nwest = np.intersect1d(nort, west)\n Epts = xy[west] + np.array([LL[0], 0.0])\n Npts = xy[sout] + np.array([0.0, LL[1]])\n Wpts = xy[east] + np.array([-LL[0], 0.0])\n Spts = xy[nort] + np.array([0.0, -LL[1]])\n NEpts = xy[swest] + np.array([LL[0], LL[1]])\n NWpts = xy[seast] + np.array([-LL[0], LL[1]])\n SWpts = xy[neast] + np.array([-LL[0], -LL[1]])\n SEpts = xy[nwest] + np.array([LL[0], -LL[1]])\n # print 'extrapts = ', Epts, NEpts, Npts, NWpts\n # print '...and more'\n xyout = np.vstack((xy, Epts, NEpts, Npts, NWpts, Wpts, SWpts, Spts, SEpts))\n\n return xyout",
"def periodic_polygon_indices2xy(poly, xy, BLdbl, PVxydict):\n periodicpoly = False\n tups = pairwise(poly)\n xypoly = []\n pervec = np.array([0., 0.])\n # Add first point to coordinate list\n xypoly.append((xy[tups[0][0], :] + pervec).tolist())\n for tup in tups:\n # Check if the matching row of BL is all positive --> if so, then not periodic bond\n # NOTE: If tup is positive, and bond is periodic, then will not register a match!\n match = (BLdbl[:, 0] == tup[0]) & (BLdbl[:, 1] == tup[1])\n if match.any() and (BLdbl[match, :] > -0.5).all():\n xypoly.append((xy[tup[1], :] + pervec).tolist())\n else:\n # # Check if the matching row of BL flippedlr is all positive --> if so, then not periodic bond\n # match2 = (BL[:, 0] == tup[1]) & (BL[:, 1] == tup[0])\n # if match2.any() and (BL[match2, :] > -0.5).all():\n # xypoly.append((xy[tup[0], :] + pervec).tolist())\n # xypoly.append((xy[tup[1], :] + pervec).tolist())\n # else:\n\n # Declare that this polygon exists on at least two sides\n periodicpoly = True\n # Add periodic vector (PVx, PVy) to forming polygon\n try:\n pervec += PVxydict[tup]\n except KeyError:\n pervec += -PVxydict[(tup[1], tup[0])]\n xypoly.append((xy[tup[1], :] + pervec).tolist())\n\n return xypoly, periodicpoly",
"def delaunay_periodic_network_from_pts(xy, PV, BBox='auto', check=False, target_z=-1, max_bond_length=-1,\n zmethod='random', minimum_bonds=-1, ensure_periodic=False):\n # Algorithm for handling boundaries:\n # - Copy parts of lattice to buffer up the edges\n # - Cut the bonds with the bounding box of the loaded configuration\n # - For each cut bond, match the outside endpt with its corresponding mirror particle\n xytmp = buffer_points_for_periodicBC(xy, PV)\n if check:\n plt.show()\n plt.plot(xytmp[:, 0], xytmp[:, 1], 'b.')\n plt.title('Buffered points')\n plt.show()\n xy, NL, KL, BL, BM = delaunay_lattice_from_pts(xytmp, trimbound=False, target_z=target_z,\n max_bond_length=max_bond_length,\n zmethod=zmethod, minimum_bonds=minimum_bonds,\n check=check)\n if ensure_periodic:\n BL = ensure_periodic_connectivity(xy, NL, KL, BL)\n NL, KL = BL2NLandKL(BL)\n\n # todo: allow for other shapes of periodic boundaries other than parallelogram\n xytrim, NL, KL, BLtrim, PVxydict = \\\n buffered_pts_to_periodic_network_parallelogram(xy, BL, PV, BBox=BBox, check=check)\n return xytrim, NL, KL, BLtrim, PVxydict",
"def generate_pbc(self):\n s = \" - using 2D periodic boundaries -\"\n print_text(s, cls=self)\n\n xmin = MPI.min(mpi_comm_world(), self.mesh.coordinates()[:,0].min())\n xmax = MPI.max(mpi_comm_world(), self.mesh.coordinates()[:,0].max())\n ymin = MPI.min(mpi_comm_world(), self.mesh.coordinates()[:,1].min())\n ymax = MPI.max(mpi_comm_world(), self.mesh.coordinates()[:,1].max())\n \n self.use_periodic_boundaries = True\n \n class PeriodicBoundary(SubDomain):\n \n def inside(self, x, on_boundary):\n \"\"\"\n Return True if on left or bottom boundary AND NOT on one \n of the two corners (0, 1) and (1, 0).\n \"\"\"\n return bool((near(x[0], xmin) or near(x[1], ymin)) and \\\n (not ((near(x[0], xmin) and near(x[1], ymax)) \\\n or (near(x[0], xmax) and near(x[1], ymin)))) \\\n and on_boundary)\n\n def map(self, x, y):\n \"\"\"\n Remap the values on the top and right sides to the bottom and left\n sides.\n \"\"\"\n if near(x[0], xmax) and near(x[1], ymax):\n y[0] = x[0] - xmax\n y[1] = x[1] - ymax\n elif near(x[0], xmax):\n y[0] = x[0] - xmax\n y[1] = x[1]\n elif near(x[1], ymax):\n y[0] = x[0]\n y[1] = x[1] - ymax\n else:\n y[0] = x[0]\n y[1] = x[1]\n\n self.pBC = PeriodicBoundary()",
"def distancex_periodicstrip(xy, com, LL):\n if len(LL) == 2:\n lenx = LL[0]\n else:\n lenx = LL\n if len(com) == 2:\n pos = np.abs(xy - com)[:, 0]\n pos[pos > lenx * 0.5] -= lenx\n elif len(com) == 1:\n # assume com is given just by the x coordinate\n pos = np.abs(xy[:, 0] - com)\n pos[pos > lenx * 0.5] -= lenx\n return np.abs(pos)",
"def extract_1d_boundaries(xy, NL, KL, BL, PVx, PVy, check=False):\n if PVx is None and PVy is None:\n raise RuntimeError('Not designed to allow openBC networks.')\n # PVx = np.zeros_like(KL, dtype=float)\n # PVy = np.zeros_like(KL, dtype=float)\n\n # If there are dangling points, remove them for now and adjust indices later\n dangles, xy, NL, KL, BL, backtrans = remove_dangling_points(xy, NL, KL, BL, check=check)\n # If no dangling bonds, no need to translate indices at the end\n translate_at_end = len(dangles) > 0\n\n # Initialize the list of boundary indices to be larger than necessary\n boundaries = []\n for boundaryloc in ['top', 'bottom']:\n # Initialize the boundary list to be as long as possible (will truncate later)\n bb = np.zeros(2 * len(xy), dtype=int)\n if boundaryloc == 'top':\n # Start with the topmost point, which is guaranteed to be\n # at the convex hull and thus also at the top outer edge.\n # Then take the first step to be along the minimum angle bond\n rightIND = np.where(xy[:, 1] == np.max(xy[:, 1]))[0]\n # If there are more than one rightmost point, choose one\n if rightIND.size > 1:\n rightIND = rightIND[0]\n else:\n # Start with the bottom most point, which is guaranteed to be\n # at the convex hull and thus also at the bottom outer edge.\n # Then take the first step to be along the minimum angle bond\n rightIND = np.where(xy[:, 1] == np.min(xy[:, 1]))[0]\n # If there are more than one rightmost point, choose one\n if rightIND.size > 1:\n rightIND = rightIND[0]\n\n if check:\n print 'le.extract_1d_boundaries(): Found extremal pt: ', rightIND\n print 'le.extract_1d_boundaries(): with neighbors: ', NL[rightIND]\n print 'le.extract_1d_boundaries(): with connectns: ', KL[rightIND]\n plt.plot(xy[:, 0], xy[:, 1], 'k.')\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'bo')\n for ii in range(len(xy)):\n plt.text(xy[ii, 0] + 0.1, xy[ii, 1], str(ii))\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'ro')\n plt.pause(0.01)\n\n # Grab the true neighbors of this starting point\n # print 'le.extract_boundary(): NL[rightIND, :] = ', NL[rightIND, :]\n connect = np.argwhere(np.abs(KL[rightIND]).ravel()).ravel()\n neighbors = NL[rightIND, connect]\n if check:\n print 'le.extract_1d_boundaries(): neighbors = ', neighbors\n print 'le.extract_1d_boundaries(): rightIND = ', rightIND\n\n # Compute the angles of the neighbor bonds\n angles = np.mod(np.arctan2(xy[neighbors, 1] - xy[rightIND, 1] + PVy[rightIND, connect],\n xy[neighbors, 0] - xy[rightIND, 0] + PVx[rightIND, connect]).ravel(),\n 2 * np.pi)\n if check:\n print 'le.extract_1d_boundaries(): KL[rightIND] = ', KL[rightIND]\n print 'le.extract_1d_boundaries(): KL[rightIND,0] = ', KL[rightIND, 0]\n print 'le.extract_1d_boundaries(): KL[rightIND,0] ==0 ', KL[rightIND, 0] == 0\n print 'le.extract_1d_boundaries(): np.argwhere(KL[rightIND]) = ', np.argwhere(KL[rightIND])\n print 'le.extract_1d_boundaries(): np.argwhere(KL[rightIND].ravel())= ', np.argwhere(KL[rightIND].ravel())\n print 'le.extract_1d_boundaries(): neighbors = ', neighbors\n print 'le.extract_1d_boundaries(): angles = ', angles\n\n # Assign this pvx and pvy as pvx_prev and pvy_prev for next time around.\n # Note that this must preceed the redefinition of nextIND\n pvx_prev = PVx[rightIND, connect[angles == min(angles)][0]]\n pvy_prev = PVy[rightIND, connect[angles == min(angles)][0]]\n\n # Take the second particle to be the one with the lowest bond angle (will be >= pi/2)\n nextIND = neighbors[angles == min(angles)][0]\n bb[0] = rightIND\n\n dmyi = 1\n # as long as we haven't completed the full outer edge/boundary, add nextIND\n while nextIND != rightIND:\n # print '\\n nextIND = ', nextIND\n # print 'np.argwhere(KL[nextIND]) = ', np.argwhere(KL[nextIND]).ravel()\n bb[dmyi] = nextIND\n connect = np.argwhere(np.abs(KL[nextIND]).ravel())\n n_tmp = NL[nextIND, connect]\n\n # Get position in row of NL where NL == bb[dmyi - 1] (the previous boundary particle/site)\n # and where the PVx and PVy are opposite of the last used PVx and PVy values (to make sure we\n # are looking backwards along the boundary). We will use this to get the 'backward angle' -- the\n # angle of the previous bond in the boundary\n # Note that bb[dmyi - 1] may have been index 0, so there could be multiple matches\n nlpos = np.where(np.logical_and(NL[nextIND] == bb[dmyi - 1],\n np.abs(KL[nextIND]).ravel().astype(bool)))[0]\n if len(nlpos) > 1:\n # There is more than one connection to the previous particle. Check for where PVx and PVy\n # values are opposite the previously used values.\n ind_nlpos = np.where(np.logical_and(PVx[nextIND, nlpos] == -pvx_prev,\n PVy[nextIND, nlpos] == -pvy_prev))[0]\n print 'ind_nlpos = ', ind_nlpos\n nlpos = nlpos[ind_nlpos]\n\n # Exclude previous boundary particle (the copy of that particle in the nlpos position)\n # from the neighbors array, UNLESS IT IS THE ONLY ONE,\n # since its angle with itself is zero!\n\n # Used to remove previous particle, but this assumes that boundary is more than 2\n # particles long, which might not be true for periodic_strip bcs\n if len(n_tmp) == 1:\n print 'le: The bond is a lone bond, not part of a triangle.'\n neighbors = n_tmp\n else:\n print 'n_tmp = ', n_tmp\n neighbors = np.delete(n_tmp, nlpos)\n connect = np.delete(connect, nlpos)\n print 'n_tmp = ', n_tmp\n print 'neighbors = ', neighbors\n\n # print 'le: nlpos = ', nlpos\n forward_angles = np.arctan2(xy[neighbors, 1] - xy[nextIND, 1] + PVy[nextIND, connect],\n xy[neighbors, 0] - xy[nextIND, 0] + PVx[nextIND, connect]).ravel()\n backward_angle = np.arctan2(xy[bb[dmyi - 1], 1] - xy[nextIND, 1] + PVy[nextIND, nlpos],\n xy[bb[dmyi - 1], 0] - xy[nextIND, 0] + PVx[nextIND, nlpos]).ravel()\n if check:\n print 'le: connect = ', connect\n print 'le: forward_angles = ', forward_angles\n print 'le: backward_angle = ', backward_angle\n\n angles = np.mod(forward_angles - backward_angle, 2 * np.pi)\n if check:\n print 'le: angles = ', angles\n print 'le: angles==min--> ', angles == min(angles)\n print 'le: neighbors = ', neighbors\n print 'le.extract_1d_boundaries(): angles==min--> ', angles == min(angles)\n print 'le.extract_1d_boundaries(): neighbors[angles == min(angles)] --> ', neighbors[angles == min(angles)]\n\n # Assign this pvx and pvy as pvx_prev and pvy_prev for next time around.\n # Note that this must preceed the redefinition of nextIND\n pvx_prev = PVx[nextIND, connect[angles == min(angles)][0]]\n pvy_prev = PVy[nextIND, connect[angles == min(angles)][0]]\n # Redefine nextIND to be the new boundary index\n nextIND = neighbors[angles == min(angles)][0]\n # print 'nextIND = ', nextIND\n\n if check:\n # plt.plot(xy[:,0],xy[:,1],'k.')\n XY = np.vstack([xy[bb[dmyi], :], xy[nextIND, :]])\n plt.plot(XY[:, 0], XY[:, 1], 'r-')\n # for i in range(len(xy)):\n # plt.text(xy[i,0]+0.2,xy[i,1],str(i))\n plt.gca().set_aspect('equal')\n plt.pause(0.01)\n\n dmyi += 1\n\n # Truncate the list of boundary indices\n boundary = bb[0:dmyi]\n\n # Since some points were removed from the boundary identification, translate\n # indices back to indices of original xy\n if translate_at_end:\n print 'le.extract_boundary(): Translating boundary points back into original indices...'\n # print 'boundary = ', boundary\n # print 'translation = ', translation\n # print 'backtrans = ', backtrans\n boundary = backtrans[boundary]\n\n boundaries.append(boundary)\n\n return tuple(boundaries)",
"def extract_inner_boundary(xy, NL, KL, BL, inner_pt=None, check=False):\n # Center the points around some point that is inside the inner region to be extracted\n if inner_pt is not None:\n xy -= inner_pt\n else:\n xy -= np.mean(xy, axis=0)\n\n # Clear periodic bonds from KL\n pbonds = np.where(KL.ravel() < 0)[0]\n if len(pbonds) > 0:\n print 'le: Found periodic bonds in le.extract_inner_boundary(), clearing...'\n KLr = KL.ravel()\n KLr[pbonds] = 0\n KL = KLr.reshape(np.shape(KL))\n print 'le: pbonds = ', pbonds\n\n # If there are dangling points, remove them for now and adjust indices later\n dangles, xy, NL, KL, BL, backtrans = remove_dangling_points(xy, NL, KL, BL, check=check)\n translate_at_end = len(dangles) > 0\n\n # Initialize the list of boundary indices to be larger than necessary\n bb = np.zeros(2 * len(xy), dtype=int)\n\n # Start with the centermost point that is on the right side of the y axis, which is guaranteed to be\n # at the convex hull for an annular sample and thus also at the inner edge.\n # Then take the first step to be along the minimum angle bond\n # Compute radial distance of each particle\n distr2 = xy[:, 0] ** 2 + xy[:, 1] ** 2\n xpositive = np.where(xy[:, 0] > 0)[0]\n if translate_at_end:\n # avoid choosing a dangling particle with no bonds\n selection = np.intersect1d(xpositive, nodangles)\n rightIND = np.where(distr2 == np.min(distr2[selection]))[0]\n else:\n rightIND = np.where(distr2 == np.min(distr2[xpositive]))[0]\n # print 'rightIND = ', rightIND\n # plt.plot(xy[:, 0], xy[:, ])\n # for ii in range(len(xy)):\n # plt.text(xy[ii, 0] + 0.1, xy[ii, 1], str(ii))\n # plt.show()\n # sys.exit()\n # If there are more than one rightmost point, choose one\n if rightIND.size > 1:\n rightIND = rightIND[0]\n\n if check:\n print 'le.extract_inner_boundary(): Found innermost pt: ', rightIND\n print 'le.extract_inner_boundary(): with neighbors: ', NL[rightIND]\n print 'le.extract_inner_boundary(): with connectns: ', KL[rightIND]\n plt.plot(xy[:, 0], xy[:, 1], 'k.')\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'bo')\n for ii in range(len(xy)):\n plt.text(xy[ii, 0] + 0.1, xy[ii, 1], str(ii))\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'ro')\n plt.pause(0.1)\n\n # Grab the true neighbors of this starting point\n print 'le.extract_inner_boundary(): NL[rightIND, :] = ', NL[rightIND, :]\n neighbors = NL[rightIND, np.argwhere(KL[rightIND].ravel()).ravel()]\n print 'le.extract_inner_boundary(): neighbors = ', neighbors\n print 'le.extract_inner_boundary(): rightIND = ', rightIND\n\n # Take the second particle to be the one with the smallest bond angle above pi (might be <= 3pi/2, but not\n # necessarily).\n # Compute the angles of the neighbor bonds and add pi\n angles = np.mod(np.arctan2(xy[neighbors, 1] - xy[rightIND, 1], xy[neighbors, 0] - xy[rightIND, 0]).ravel() + np.pi,\n 2 * np.pi)\n nextIND = neighbors[angles == min(angles)][0]\n bb[0] = rightIND\n dmyi = 1\n\n if check:\n print 'KL[rightIND] = ', KL[rightIND]\n print 'KL[rightIND,0] = ', KL[rightIND, 0]\n print 'KL[rightIND,0] ==0 ', KL[rightIND, 0] == 0\n print 'np.argwhere(KL[rightIND]) = ', np.argwhere(KL[rightIND])\n print 'np.argwhere(KL[rightIND].ravel())= ', np.argwhere(KL[rightIND].ravel())\n print 'neighbors = ', neighbors\n print 'angles = ', angles\n\n # This part, commented out, was a red herring\n # It is possible for the first particle to be attached to only one other site. If this is the case, then we need to\n # add its neighbor to the bb array and take the next max angle with respect to that bond instead of the min angle.\n # while len(angles) == 1:\n # print 'le.extract_inner_boundary(): there is only one neighbor for the first identified boundary particle'\n # bb[dmyi] = nextIND\n # angles, neighbors = bond_angles_wrt_bond(bb[dmyi - 1], nextIND, xy, BL, KL)\n # nextIND = neighbors[angles == max(angles)][0]\n # # print 'nextIND = ', nextIND\n\n if check:\n print 'bb = ', bb\n # sys.exit()\n # as long as we haven't completed the full outer edge/boundary, add nextIND\n while nextIND != rightIND:\n # print '\\n nextIND = ', nextIND\n # print 'np.argwhere(KL[nextIND]) = ', np.argwhere(KL[nextIND]).ravel()\n bb[dmyi] = nextIND\n angles, neighbors = bond_angles_wrt_bond(bb[dmyi - 1], nextIND, xy, NL, KL)\n nextIND = neighbors[angles == min(angles)][0]\n # print 'nextIND = ', nextIND\n\n if check:\n plt.plot(xy[:,0],xy[:,1],'k.')\n XY = np.vstack([xy[bb[dmyi], :], xy[nextIND, :]])\n plt.plot(XY[:, 0], XY[:, 1], 'r-')\n for i in range(len(xy)):\n plt.text(xy[i,0] + 0.2, xy[i, 1], str(i))\n plt.gca().set_aspect('equal')\n plt.show()\n\n dmyi += 1\n\n # Truncate the list of boundary indices\n inner_boundary = bb[0:dmyi]\n\n # Since some points were removed from the boundary identification, translate\n # indices back to indices of original xy\n if translate_at_end:\n print 'le.extract_boundary(): Translating boundary points back into original indices...'\n inner_boundary = backtrans[inner_boundary]\n\n return inner_boundary",
"def com_periodicstrip(xy, LL, masses=1., check=False):\n # test case:\n # import lepm.lattice_elasticity as le\n # import matplotlib.pyplot as plt\n # import numpy as np\n # xy = np.random.rand(100, 2) - np.array([0.5, 0.5])\n # LL = (1.0, 1.0)\n # plt.scatter(xy[:, 0], xy[:, 1])\n # com = le.com_periodic(xy, LL)\n # plt.plot(com[0], com[1], 'ro')\n # plt.show()\n if len(LL) == 2:\n lenx = LL[0]\n\n minx = np.min(xy[:, 0])\n # map to xi and zeta coordinates. Each xi element has x component and y component.\n print 'np.shape(masses) =', np.shape(masses)\n\n if isinstance(masses, np.ndarray):\n xi = np.cos(((xy[:, 0] - minx) / lenx) * 2. * np.pi) * masses\n zeta = np.sin(((xy[:, 0] - minx) / lenx) * 2. * np.pi) * masses\n else:\n raise RuntimeError('Debug: masses should not be equal for my current debugging program')\n xi = np.cos(((xy[:, 0] - minx) / lenx) * 2. * np.pi)\n zeta = np.sin(((xy[:, 0] - minx) / lenx) * 2. * np.pi)\n\n # average to get center of mass on each circle\n xibar = np.mean(xi)\n zetabar = np.mean(zeta)\n\n thetabar = np.arctan2(-zetabar, -xibar) + np.pi\n comx = lenx * thetabar / (2. * np.pi) + minx\n\n # Check it\n angles = np.arctan2(-zeta, -xi) + np.pi\n print 'le: np.shape(angles) = ', np.shape(angles)\n print 'le: np.min(angles) = ', np.min(angles)\n print 'le: np.max(angles) = ', np.max(angles)\n print 'le: thetabar = ', thetabar\n\n if check:\n print 'le: check=', check\n plt.plot(np.cos(angles), np.sin(angles), alpha=0.05)\n plt.plot(np.cos(thetabar), np.sin(thetabar), 'ro')\n plt.show()\n plt.clf()\n\n com_nonper = center_of_mass(xy, masses)\n com = np.array([comx, com_nonper[1]])\n return com",
"def identify_bonds(chosen_atom, atom_list):\n list_of_hydrogens = ['H15', 'H14', 'H13', 'H12', 'H11', 'H10', 'H9', 'H8', 'H7', 'H6', 'H5', 'H4', 'H3', 'H2', 'H1'] \n if ((chosen_atom.atom_name not in list_of_hydrogens) and (chosen_atom.residue_name != \"P1A\")):\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 2) and (abs(chosen_atom.y - atom.y) <= 2) and (abs(chosen_atom.z - atom.z) <= 2))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 2)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n elif ((chosen_atom.atom_name not in list_of_hydrogens) and (chosen_atom.residue_name == \"P1A\")):\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 2) and (abs(chosen_atom.y - atom.y) <= 2) and (abs(chosen_atom.z - atom.z) <= 2))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 1.8)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n else:\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 1.6) and (abs(chosen_atom.y - atom.y) <= 1.6) and (abs(chosen_atom.z - atom.z) <= 1.6))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 1.6)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n for elements in nearby_atoms:\n if (check_if_no_bond(chosen_atom, elements, bond_list, bond_list_3) == True):\n nearby_atoms.remove(elements)\n if (len(nearby_atoms) == len(identified_bonds)):\n return identified_bonds\n else:\n return []",
"def find_cut_bonds(BL, keep):\n # ensure that keep is int array of indices, not bool\n if keep.dtype == 'bool':\n print 'converting bool keep to int array...'\n keep = np.where(keep)[0]\n\n # Make output BLcut and the indices of BL that are cut (cutIND)\n # Find rows of BL for which both elems are in keep\n inBL0 = np.in1d(np.abs(BL[:, 0]), keep)\n inBL1 = np.in1d(np.abs(BL[:, 1]), keep)\n cutIND = np.logical_xor(inBL0, inBL1)\n BLcut = BL[cutIND, :]\n\n return BLcut, cutIND",
"def get_grid_coords(self, count, boundry_x, boundry_y, grid_size):\n\n coords = []\n\n boundry_x = int(boundry_x/10)\n boundry_y = int(boundry_y/10)\n\n while len(coords) < count:\n seed()\n\n\n x = randint(-boundry_x, boundry_x)\n y = randint(-boundry_y, boundry_y)\n\n if len(coords) == 0:\n coords.append((x*grid_size, y*grid_size))\n else:\n for coord in coords:\n if (x not in range(coord[0]-buffer*grid_size, coord[0]+buffer*grid_size)) and (y not in range(coord[1]-buffer, coord[1]+buffer)):\n pass\n else:\n break",
"def create_grid(data, drone_altitude, safety_distance):\n\n # minimum and maximum north coordinates\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\n #print(north_min, north_max)\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\n #print(east_min, east_max)\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil((north_max - north_min)))\n east_size = int(np.ceil((east_max - east_min)))\n #print(north_size, east_size)\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n # Center offset for grid\n north_min_center = np.min(data[:, 0])\n east_min_center = np.min(data[:, 1])\n \n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n\n if alt + d_alt + safety_distance > drone_altitude:\n obstacle = [\n int(north - d_north - safety_distance - north_min_center),\n int(north + d_north + safety_distance - north_min_center),\n int(east - d_east - safety_distance - east_min_center),\n int(east + d_east + safety_distance - east_min_center),\n ]\n grid[obstacle[0]:obstacle[1], obstacle[2]:obstacle[3]] = 1\n\n return grid",
"def get_traps_boundaries(traps, nx, ny, d4):\n\n indices = np.arange(0, nx * ny, 1)\n nbrs = util.get_neighbor_indices(indices, nx, d4)\n\n # N.B: If boundary pairs to domain should be removed, include line below\n # domain_bnd_nodes = get_domain_boundary_indices(nx, ny)\n\n trap_boundary = []\n\n for trap in traps:\n nbrs_for_each_node_in_trap = nbrs[trap]\n nbr_is_in_trap = np.split(np.in1d(nbrs_for_each_node_in_trap, trap), len(trap))\n node_is_in_trap_boundary = ~np.all(nbr_is_in_trap, axis=1)\n\n # It is not possible that no elements are in trap boundary\n trap_boundary.append(trap[node_is_in_trap_boundary])\n\n return trap_boundary"
] | [
"0.76811284",
"0.6554566",
"0.6185524",
"0.58874184",
"0.5519401",
"0.5501593",
"0.54900706",
"0.53688544",
"0.52624065",
"0.5220514",
"0.51110923",
"0.5086385",
"0.506581",
"0.50574595",
"0.502888",
"0.5023648",
"0.49670285",
"0.49498272",
"0.4919114",
"0.4904832",
"0.49038145",
"0.48976552",
"0.48848668",
"0.48847833",
"0.4881304",
"0.48785308",
"0.4842948",
"0.4833769",
"0.48243922",
"0.48183304"
] | 0.71093625 | 1 |
locates droplets in a binary data set on a spherical grid | def _locate_droplets_in_mask_spherical(
grid: SphericalSymGridBase, mask: np.ndarray
) -> Emulsion:
assert np.all(mask.shape == grid.shape)
# locate clusters in the binary image
labels, num_labels = ndimage.label(mask)
if num_labels == 0:
return Emulsion([], grid=grid)
# locate clusters around origin
object_slices = ndimage.measurements.find_objects(labels)
droplet = None
for slices in object_slices:
if slices[0].start == 0: # contains point around origin
radius = grid.cell_to_point(slices[0].stop).flat[-1]
droplet = SphericalDroplet(np.zeros(grid.dim), radius=radius)
else:
logger = logging.getLogger(grid.__class__.__module__)
logger.warning("Found object not located at origin")
# return an emulsion of droplets
if droplet:
return Emulsion([droplet], grid=grid)
else:
return Emulsion([], grid=grid) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _locate_droplets_in_mask_cylindrical_single(\n grid: CylindricalSymGrid, mask: np.ndarray\n) -> Emulsion:\n # locate the individual clusters\n labels, num_features = ndimage.label(mask)\n if num_features == 0:\n return Emulsion([], grid=grid)\n\n # locate clusters on the symmetry axis\n object_slices = ndimage.measurements.find_objects(labels)\n indices = []\n for index, slices in enumerate(object_slices, 1):\n if slices[0].start == 0: # contains point on symmetry axis\n indices.append(index)\n else:\n logger = logging.getLogger(grid.__class__.__module__)\n logger.warning(\"Found object not located on symmetry axis\")\n\n # determine position from binary image and scale it to real space\n pos = ndimage.measurements.center_of_mass(mask, labels, index=indices)\n pos = grid.cell_to_point(pos)\n\n # determine volume from binary image and scale it to real space\n vol_r, dz = grid.cell_volume_data\n cell_volumes = vol_r * dz\n vol = ndimage.measurements.sum(cell_volumes, labels, index=indices)\n\n # return an emulsion of droplets\n droplets = (\n SphericalDroplet.from_volume(np.array([0, 0, p[2]]), v)\n for p, v in zip(pos, vol)\n )\n return Emulsion(droplets, grid=grid)",
"def _locate_droplets_in_mask_cylindrical(\n grid: CylindricalSymGrid, mask: np.ndarray\n) -> Emulsion:\n assert np.all(mask.shape == grid.shape)\n\n if grid.periodic[1]:\n # locate droplets respecting periodic boundary conditions in z-direction\n\n # pad the array to simulate periodic boundary conditions\n dim_r, dim_z = grid.shape\n mask_padded = np.pad(mask, [[0, 0], [dim_z, dim_z]], mode=\"wrap\")\n assert mask_padded.shape == (dim_r, 3 * dim_z)\n\n # locate droplets in the extended image\n candidates = _locate_droplets_in_mask_cylindrical_single(grid, mask_padded)\n grid._logger.info(f\"Found {len(candidates)} droplet candidates.\")\n\n # keep droplets that are inside the central area\n droplets = Emulsion(grid=grid)\n for droplet in candidates:\n # correct for the additional padding of the array\n droplet.position[2] -= grid.length\n # check whether the droplet lies in the original box\n if grid.contains_point(droplet.position):\n droplets.append(droplet)\n\n grid._logger.info(f\"Kept {len(droplets)} central droplets.\")\n\n # filter overlapping droplets (e.g. due to duplicates)\n droplets.remove_overlapping()\n\n else:\n # simply locate droplets in the mask\n droplets = _locate_droplets_in_mask_cylindrical_single(grid, mask)\n\n return droplets",
"def bin_data(data, lat, lon, binsize=1, uv_data=False, pressure=None):\n\n # Create lats and lons based on binsize\n lonlen = 360\n latlen = 180\n\n lon_lowerlim = 0\n lon_upperlim = 360\n\n lat_lowerlim = -90\n lat_upperlim = 90\n\n if latlen % binsize == 0 and lonlen % binsize == 0:\n latbin = int(latlen/binsize)\n lonbin = int(lonlen/binsize)\n n_deg = binsize/2\n\n ll_lats = np.linspace(lat_lowerlim+(n_deg),\n lat_upperlim-(n_deg),\n latbin)\n\n ll_lons = np.linspace(lon_lowerlim+(n_deg),\n lon_upperlim-(n_deg),\n lonbin)\n\n else:\n print('ERROR: Binsize does not work for grid shape (180,360). Please use different binsize.')\n return\n\n paramlist = list(itertools.product(ll_lats, ll_lons))\n\n # Bin Data\n if uv_data == True:\n binned_u_data = np.full((latbin, lonbin), np.nan, dtype=object)\n binned_v_data = np.full((latbin, lonbin), np.nan, dtype=object)\n\n if pressure is not None:\n binned_pressure = np.full((latbin, lonbin), np.nan, dtype=object)\n\n for val in paramlist:\n # Get index of 1x1 grid lat and lon\n latidx = np.where(ll_lats == val[0])\n lonidx = np.where(ll_lons == val[1])\n # values of the 1x1 grid lat and lon\n binnedlons = val[1]\n binnedlats = val[0]\n\n # find instances where data is within 1x1 grid point of orginal data\n data_idx = np.where((lon >= binnedlons - n_deg) & (lon <= binnedlons + n_deg) &\n (lat >= binnedlats - n_deg) & (lat <= binnedlats + n_deg))\n\n latlon_idx = [latidx[0][0], lonidx[0][0]]\n\n # calculate stats if there is data at this grid point, else append np.nan\n if len(data_idx[0]) > 0:\n u = data['u'][data_idx]\n v = data['v'][data_idx]\n\n binned_u_data[latlon_idx[0], latlon_idx[1]] = u\n binned_v_data[latlon_idx[0], latlon_idx[1]] = v\n\n if pressure is not None:\n p = pressure[data_idx]\n binned_pressure[latlon_idx[0], latlon_idx[1]] = p\n\n if pressure is not None:\n return binned_u_data, binned_v_data, binned_pressure\n\n else:\n return binned_u_data, binned_v_data\n\n else:\n binned_data = np.full((latbin, lonbin), np.nan, dtype=object)\n if pressure is not None:\n binned_pressure = np.full((latbin, lonbin), np.nan, dtype=object)\n\n for val in paramlist:\n # Get index of grid lat and lon\n latidx = np.where(ll_lats == val[0])\n lonidx = np.where(ll_lons == val[1])\n # values of the 1x1 grid lat and lon\n binnedlons = val[1]\n binnedlats = val[0]\n\n # find instances where data is within 1x1 grid point of orginal data\n data_idx = np.where((lon >= binnedlons - n_deg) & (lon <= binnedlons + n_deg) &\n (lat >= binnedlats - n_deg) & (lat <= binnedlats + n_deg))\n\n latlon_idx = [latidx[0][0], lonidx[0][0]]\n\n # calculate stats if there is data at this grid point\n if len(data_idx[0]) > 0:\n d = data[data_idx]\n binned_data[latlon_idx[0], latlon_idx[1]] = d\n\n if pressure is not None:\n p = pressure[data_idx]\n binned_pressure[latlon_idx[0], latlon_idx[1]] = p\n\n if pressure is not None:\n return binned_data, binned_pressure\n\n else:\n return binned_data",
"def _locate_droplets_in_mask_cartesian(\n grid: CartesianGridBase, mask: np.ndarray\n) -> Emulsion:\n if mask.shape != grid.shape:\n raise ValueError(\n f\"The shape {mask.shape} of the data is not compatible with the grid \"\n f\"shape {grid.shape}\"\n )\n\n # pad the array to simulate periodic boundary conditions\n offset = np.array([dim if p else 0 for p, dim in zip(grid.periodic, grid.shape)])\n pad = np.c_[offset, offset].astype(np.intc)\n mask_padded = np.pad(mask, pad, mode=\"wrap\")\n assert np.all(mask_padded.shape == np.array(grid.shape) + 2 * offset)\n\n # locate individual clusters in the padded image\n labels, num_labels = ndimage.label(mask_padded)\n if num_labels == 0:\n return Emulsion([], grid=grid)\n indices = range(1, num_labels + 1)\n\n # create and emulsion from this of droplets\n grid._logger.info(f\"Found {num_labels} droplet candidate(s)\")\n\n # determine position from binary image and scale it to real space\n positions = ndimage.measurements.center_of_mass(mask_padded, labels, index=indices)\n # correct for the additional padding of the array\n positions = grid.cell_to_point(positions - offset)\n\n # determine volume from binary image and scale it to real space\n volumes = ndimage.measurements.sum(mask_padded, labels, index=indices)\n volumes = np.asanyarray(volumes) * np.prod(grid.discretization)\n\n # only retain droplets that are inside the central area\n droplets = (\n SphericalDroplet.from_volume(position, volume)\n for position, volume in zip(positions, volumes)\n if grid.cuboid.contains_point(position)\n )\n\n # filter overlapping droplets (e.g. due to duplicates)\n emulsion = Emulsion(droplets, grid=grid)\n num_candidates = len(emulsion)\n if num_candidates < num_labels:\n grid._logger.info(f\"Only {num_candidates} candidate(s) inside bounds\")\n\n emulsion.remove_overlapping()\n if len(emulsion) < num_candidates:\n grid._logger.info(f\"Only {num_candidates} candidate(s) not overlapping\")\n\n return emulsion",
"def find_loc_indices(loc, dir, tile):\n #returns the indices of the nearest neighbor point in the given tile, the lon/lat of the nearest neighbor, \n #and the distance (m) from the given point to the nearest neighbor grid cell\n \n filename_pattern = '*grid.tile{0}.nc'.format(tile)\n for f_name in os.listdir(dir):\n if fnmatch.fnmatch(f_name, filename_pattern):\n filename = f_name\n if not filename:\n message = 'No filenames matching the pattern {0} found in {1}'.format(filename_pattern,dir)\n logging.critical(message)\n raise Exception(message)\n \n nc_file = Dataset('{0}/{1}'.format(dir,filename))\n #read in supergrid longitude and latitude\n lon_super = np.array(nc_file['x']) #[lat,lon] or [y,x] #.swapaxes(0,1)\n lat_super = np.array(nc_file['y']) #[lat,lon] or [y,x] #.swapaxes(0,1)\n #get the longitude and latitude data for the grid centers by slicing the supergrid \n #and taking only odd-indexed values\n longitude = lon_super[1::2,1::2]\n latitude = lat_super[1::2,1::2]\n nc_file.close()\n \n adj_long = False \n #look for reversal of longitude; if found, adjust longitude so that 0-360 transition doesn't exist\n temp_loc = copy.deepcopy(loc)\n for row in longitude:\n if not (np.all(np.diff(row) >= 0) or np.all(np.diff(row) <= 0)):\n adj_long = True\n if adj_long:\n longitude[longitude < 180] += 360\n if loc[0] < 180:\n temp_loc[0] += 360\n \n #set up an array to hold the euclidean distance between the given point and every grid cell\n eucl_dist = np.zeros((longitude.shape[0],longitude.shape[1]))\n \n #get the Cartesian location of the given point\n cart_loc = np.array(sph2cart(math.radians(temp_loc[0]), math.radians(temp_loc[1]), earth_radius))\n \n for i in range(len(longitude)):\n for j in range(len(longitude[i])):\n #get the Cartesian location of all grid points\n cart_cell = np.array(sph2cart(math.radians(longitude[i,j]), math.radians(latitude[i,j]), earth_radius))\n \n #calculate the euclidean distance from the given point to the current grid cell\n eucl_dist[i,j] = np.linalg.norm(cart_loc - cart_cell)\n \n #get the indices of the grid point with the minimum euclidean distance to the given point\n i,j = np.unravel_index(eucl_dist.argmin(), eucl_dist.shape)\n \n return (i,j,longitude[i,j]%360.0, latitude[i,j], eucl_dist[i,j])",
"def find_tile(loc, dir):\n #returns the integer tile number\n \n # should be looking in the directory with supergrid data (probably \"fix\" directory)\n filename_pattern = '*grid.tile*.nc'\n \n #find all supergrid files in the directory\n grid_fnames = []\n for f_name in os.listdir(dir):\n if fnmatch.fnmatch(f_name, filename_pattern):\n grid_fnames.append(f_name)\n if not grid_fnames:\n message = 'No filenames matching the pattern {0} found in {1}'.format(filename_pattern,dir)\n logging.critical(message)\n raise Exception(message)\n \n #non-polar tiles can use traditional 2D point-in-polygon methods; if a point is not in a non-polar tile,\n #it is in one of the polar tiles, and the tile can be distinguished by the sign of latitude of the point\n polar_tile_filenames = []\n found_tile = False\n for f_name in grid_fnames:\n if not found_tile:\n nc_file = Dataset('{0}/{1}'.format(dir,f_name))\n longitude = np.array(nc_file['x']).swapaxes(0,1)\n latitude = np.array(nc_file['y']).swapaxes(0,1)\n nc_file.close()\n \n adj_long = False \n #look for reversal of longitude; if found, adjust longitude so that 0-360 transition doesn't exist\n for row in longitude:\n if not (np.all(np.diff(row) >= 0) or np.all(np.diff(row) <= 0)):\n adj_long = True\n if adj_long:\n longitude[longitude < 180] += 360\n \n #get lon/lat pairs for all edges of the tiles\n \n edge_1_lon = longitude[0,:]\n edge_1_lat = latitude[0,:]\n edge_1 = list(zip(edge_1_lon, edge_1_lat))\n \n edge_2_lon = longitude[:,-1]\n edge_2_lat = latitude[:,-1]\n edge_2 = list(zip(edge_2_lon, edge_2_lat))\n \n edge_3_lon = longitude[-1,:]\n edge_3_lat = latitude[-1,:]\n edge_3 = list(zip(edge_3_lon, edge_3_lat))\n edge_3.reverse() #need to reverse the direction of this edge to form a regular polygon\n \n edge_4_lon = longitude[:,0]\n edge_4_lat = latitude[:,0]\n edge_4 = list(zip(edge_4_lon, edge_4_lat))\n edge_4.reverse() #need to reverse the direction of this edge to form a regular polygon\n \n polygon_points = edge_1 + edge_2 + edge_3 + edge_4\n \n tile_polygon = Polygon(polygon_points)\n tile_polygon = tile_polygon.simplify(0)\n \n if tile_polygon.is_valid: #this will be True unless the tile is a polar tile, which will not form a regular polygon in Cartesian space using lon/lat data\n temp_loc = copy.deepcopy(loc)\n if adj_long:\n if loc[0] < 180:\n temp_loc[0] += 360\n loc_point = Point(temp_loc)\n if tile_polygon.contains(loc_point):\n found_tile = True\n return f_name.split('tile')[1].split('.nc')[0] \n else:\n polar_tile_filenames.append(f_name)\n \n #if the tile hasn't been found by this point, it must be contained within a polar tile\n for f_name in polar_tile_filenames:\n nc_file = Dataset('{0}/{1}'.format(dir,f_name))\n latitude = np.array(nc_file['y']).swapaxes(0,1)\n nc_file.close()\n \n #if the sign of the mean latitude of the tile is the same as that of the point, the tile has been found\n if np.sign(np.mean(latitude)) == np.sign(loc[1]):\n found_tile = True\n return f_name.split('tile')[1].split('.nc')[0] \n return -1",
"def spatial(self):",
"def generate_all_locations(grid, shape):",
"def obs_ijpos(gridfile,lons,lats,coor):\n\n gfh= netCDF4.Dataset(gridfile)\n cartesian=0\n if (coor=='r'):\n try:\n \n latr=gfh.variables['lat_rho'][:,:]\n lonr=gfh.variables['lon_rho'][:,:]\n except:\n latr=gfh.variables['latitude'][:,:]\n lonr=gfh.variables['longitude'][:,:]\n \n\n try:\n xr=gfh.variables['xi_rho'][:]\n yr=gfh.variables['eta_rho'][:]\n except:\n try:\n xr=gfh.variables['x_rho'][:]\n yr=gfh.variables['y_rho'][:]\n except:\n print('Neither xi_rho/eta_rho or x_rho/y_rho on file.')\n print('This might slow down the calculations')\n\n\n elif (coor=='u'):\n latr=gfh.variables['lat_u'][:,:]\n lonr=gfh.variables['lon_u'][:,:]\n try:\n xr=gfh.variables['xi_u'][:]\n yr=gfh.variables['eta_u'][:]\n except:\n xr=gfh.variables['x_u'][:]\n yr=gfh.variables['y_u'][:]\n elif (coor=='v'):\n latr=gfh.variables['lat_v'][:,:]\n lonr=gfh.variables['lon_v'][:,:]\n try:\n xr=gfh.variables['xi_v'][:]\n yr=gfh.variables['eta_v'][:]\n except:\n xr=gfh.variables['x_v'][:]\n yr=gfh.variables['y_v'][:]\n\n IN = point_in_polygon(lonr, latr, lons, lats)\n ind=np.where(IN)[0]\n \n if lats.size >1: \n lons=lons[ind]; lats=lats[ind]\n # If there's no lons, lats left at this stage, return oipos, ojpos with -999 everywhere\n if not len(lons):\n return np.ones_like(IN)*-999, np.ones_like(IN)*-999\n \n try:\n try:\n mapstr=str(gfh.variables['h'].getncattr('mapping'))\n except:\n try:\n mapstr=str(gfh.variables['h'].getncattr('grid_mapping'))\n except:\n pass\n try:\n projstring=(gfh.variables[mapstr]).getncattr('proj4')\n except:\n try:\n projstring=(gfh.variables[mapstr]).getncattr('proj4string')\n except:\n pass\n try:\n projstring=(gfh.variables['grid_mapping']).getncattr('proj4')\n except:\n try:\n projstring=(gfh.variables['grid_mapping']).getncattr('proj4string')\n except:\n pass\n\n gridproj=proj.Proj(str(projstring))\n hasproj=1\n except:\n hasproj=0\n\n # Check if lat, lon spacing is uniform\n dx1=np.abs(lonr[0,1]-lonr[0,0])\n dx2=np.abs(lonr[0,-1]-lonr[0,-2])\n n=int(np.round(lonr.shape[1]/2))\n dx3=np.abs(lonr[0,n]-lonr[0,n-1])\n\n dy1=np.abs(latr[1,0]-latr[0,0])\n dy2=np.abs(latr[-1,0]-latr[-2,0])\n n=int(np.round(latr.shape[0]/2))\n dy3=np.abs(latr[n,0]-latr[n-1,0])\n\n if ( (dx1 == dx2) & (dx1==dx3) & (dx2==dx3) & (dy1 == dy2) & (dy1==dy3) & (dy2==dy3) ):\n cartesian=1\n gridproj=proj.Proj(\"+proj=latlong +datum=WGS84\")\n \n\n \n if hasproj:\n dx=xr[1]-xr[0]\n dy=yr[1]-yr[0]\n [x,y]=gridproj(lons,lats)\n ipos=(x-xr[0])/dx\n jpos=(y-yr[0])/dy\n\n elif cartesian:\n [x1,y1]=gridproj(lonr[0,0],latr[0,0])\n [x2,y2]=gridproj(lonr[0,1],latr[0,1])\n dx=x2-x1\n [x2,y2]=gridproj(lonr[1,0],latr[1,0])\n dy=y2-y1\n [x,y]=gridproj(lons,lats)\n [x0,y0]=gridproj(lonr[0,0],latr[0,0])\n\n ipos=(x-x0)/dx\n jpos=(y-y0)/dy\n\n else:\n x=np.linspace(0,lonr.shape[1]-1,lonr.shape[1])\n y=np.linspace(0,lonr.shape[0]-1,lonr.shape[0])\n xi=np.zeros_like(lonr); yi=np.zeros([lonr.shape[1],lonr.shape[0]])\n xi[:,:]=x; yi[:,:]=y; yi=np.swapaxes(yi,1,0)\n zi=scipy.interpolate.griddata((lonr.flatten(),latr.flatten()),xi.flatten(),(lons,lats))\n ipos=zi\n zi=scipy.interpolate.griddata((lonr.flatten(),latr.flatten()),yi.flatten(),(lons,lats))\n jpos=zi\n \n if 'ind' in locals():\n oipos=np.ones(IN.shape)*-999.; ojpos=np.ones(IN.shape)*-999.\n oipos[ind]=ipos; ojpos[ind]=jpos\n else:\n oipos=ipos\n ojpos=jpos\n if not IN:\n oipos = np.array([-999.])\n ojpos = np.array([-999.])\n gfh.close()\n return oipos,ojpos",
"def get_nc_BGrid_GFDL(grdfile, name='GFDL_CM2.1_North_Pacific', area='regional', \\\n xrange=(60,175), yrange=(120, 190), ystart=235):\n\n nc = pyroms.io.Dataset(grdfile)\n\n lon_t = nc.variables['geolon_t'][:]\n lat_t = nc.variables['geolat_t'][:]\n lon_uv = nc.variables['geolon_c'][:]\n lat_uv = nc.variables['geolat_c'][:]\n\n h = nc.variables['ht'][:]\n\n f = nc.variables['coriolis_param'][:]\n\n kmt = nc.variables['kmt'][:]\n z_t = nc.variables['st_ocean'][:]\n z_t_edges = nc.variables['st_edges_ocean'][:]\n\n kmu = nc.variables['kmu'][:]\n z_uv = nc.variables['sw_ocean'][:]\n z_uv_edges = nc.variables['sw_edges_ocean'][:]\n\n # compute mask at t-point\n M_t, L_t = kmt.shape\n N_t = z_t.shape[0]\n mask_t = np.zeros((N_t, M_t, L_t))\n for j in range(M_t):\n for i in range(L_t):\n try:\n mask_t[0:int(kmt[j,i]), j,i] = 1\n except:\n mask_t[:, j,i] = 0\n\n # compute mask at uv-point\n M_uv, L_uv = kmu.shape\n N_uv = z_uv.shape[0]\n mask_uv = np.zeros((N_uv, M_uv, L_uv))\n for j in range(M_uv):\n for i in range(L_uv):\n try:\n mask_uv[0:int(kmu[j,i]), j,i] = 1\n except:\n mask_uv[:, j,i] = 0\n\n if area == 'npolar':\n #add two rows in the north and the south\n lon_t = lon_t[np.r_[0,0,:np.size(lon_t,0),-1,-1]]\n lon_t = lon_t[:,np.r_[0,:np.size(lon_t,1),-1]]\n lon_t[:,0] = lon_t[:,1] - (lon_t[:,2]-lon_t[:,1])\n lon_t[:,-1] = lon_t[:,-2] + (lon_t[:,-2]-lon_t[:,-3])\n lat_t = lat_t[np.r_[0,0,:np.size(lat_t,0),-1,-1]]\n lat_t = lat_t[:,np.r_[0,:np.size(lat_t,1),-1]]\n lat_t[0,:] = -85\n lat_t[1,:] = -80\n lat_t[-2,:] = 90\n lat_t[-1,:] = 91\n lon_uv = lon_uv[np.r_[0,0,:np.size(lon_uv,0),-1,-1]]\n lon_uv = lon_uv[:,np.r_[0,:np.size(lon_uv,1),-1]]\n lon_uv[:,0] = lon_uv[:,1] - (lon_uv[:,2]-lon_t[:,1])\n lon_uv[:,-1] = lon_uv[:,-2] + (lon_uv[:,-2]-lon_uv[:,-3])\n lat_uv = lat_uv[np.r_[0,0,:np.size(lat_uv,0),-1,-1]]\n lat_uv = lat_uv[:,np.r_[0,:np.size(lat_uv,1),-1]]\n lat_uv[0,:] = -85\n lat_uv[1,:] = -80\n lat_uv[-2,:] = 90\n lat_uv[-1,:] = 91\n mask_t = mask_t[:,np.r_[0,0,:np.size(mask_t,1),-1,-1],:]\n mask_t = mask_t[:,:,np.r_[0,:np.size(mask_t,2),-1]]\n mask_t[:,:,0] = mask_t[:,:,-2]\n mask_t[:,:,-1] = mask_t[:,:,1]\n mask_uv = mask_uv[:,np.r_[0,0,:np.size(mask_uv,1),-1,-1],:]\n mask_uv = mask_uv[:,:,np.r_[0,:np.size(mask_uv,2),-1]]\n mask_uv[:,:,0] = mask_uv[:,:,-2]\n mask_uv[:,:,-1] = mask_uv[:,:,1]\n h = h[np.r_[0,0,:np.size(h,0),-1,-1]]\n h = h[:,np.r_[0,:np.size(h,1),-1]]\n h[:,0] = h[:,-2]\n h[:,-1] = h[:,1]\n f = f[np.r_[0,0,:np.size(f,0),-1,-1]]\n f = f[:,np.r_[0,:np.size(f,1),-1]]\n f[:,0] = f[:,-2]\n f[:,-1] = f[:,1]\n m,l = h.shape\n xrange=(1,l-2)\n yrange=(ystart+2,m-2)\n\n if area == 'tripole':\n #add two rows in the north and the south\n fold1 = L_t//2\n lon_t = lon_t[np.r_[0,0,:np.size(lon_t,0),-1,-1]]\n lon_t[-2,:fold1] = lon_t[-3,L_t:fold1-1:-1]\n lon_t[-2,L_t:fold1-1:-1] = lon_t[-3,:fold1]\n lon_t[-1,:fold1] = lon_t[-4,L_t:fold1-1:-1]\n lon_t[-1,L_t:fold1-1:-1] = lon_t[-4,:fold1]\n\n lon_t = lon_t[:,np.r_[0,:np.size(lon_t,1),-1]]\n lon_t[:,0] = lon_t[:,1] - (lon_t[:,2]-lon_t[:,1])\n lon_t[:,-1] = lon_t[:,-2] + (lon_t[:,-2]-lon_t[:,-3])\n lat_t = lat_t[np.r_[0,0,:np.size(lat_t,0),-1,-1]]\n lat_t = lat_t[:,np.r_[0,:np.size(lat_t,1),-1]]\n lat_t[0,:] = -85\n lat_t[1,:] = -80\n lat_t[-2,:] = lat_t[-3,:]\n lat_t[-1,:] = lat_t[-4,:]\n lon_uv = lon_uv[np.r_[0,0,:np.size(lon_uv,0),-1,-1]]\n\n lon_uv[-2,:fold1] = lon_uv[-4,L_t:fold1-1:-1]\n lon_uv[-2,L_t:fold1-1:-1] = lon_uv[-4,:fold1]\n lon_uv[-1,:fold1] = lon_uv[-5,L_t:fold1-1:-1]\n lon_uv[-1,L_t:fold1-1:-1] = lon_uv[-5,:fold1]\n\n lon_uv = lon_uv[:,np.r_[0,:np.size(lon_uv,1),-1]]\n lon_uv[:,0] = lon_uv[:,1] - (lon_uv[:,2]-lon_t[:,1])\n lon_uv[:,-1] = lon_uv[:,-2] + (lon_uv[:,-2]-lon_uv[:,-3])\n lat_uv = lat_uv[np.r_[0,0,:np.size(lat_uv,0),-1,-1]]\n lat_uv = lat_uv[:,np.r_[0,:np.size(lat_uv,1),-1]]\n lat_uv[0,:] = -85\n lat_uv[1,:] = -80\n lat_uv[-2,:] = lat_uv[-3,:]\n lat_uv[-1,:] = lat_uv[-4,:]\n mask_t = mask_t[:,np.r_[0,0,:np.size(mask_t,1),-1,-1],:]\n mask_t = mask_t[:,:,np.r_[0,:np.size(mask_t,2),-1]]\n mask_t[:,:,0] = mask_t[:,:,-2]\n mask_t[:,:,-1] = mask_t[:,:,1]\n mask_uv = mask_uv[:,np.r_[0,0,:np.size(mask_uv,1),-1,-1],:]\n mask_uv = mask_uv[:,:,np.r_[0,:np.size(mask_uv,2),-1]]\n mask_uv[:,:,0] = mask_uv[:,:,-2]\n mask_uv[:,:,-1] = mask_uv[:,:,1]\n h = h[np.r_[0,0,:np.size(h,0),-1,-1]]\n h = h[:,np.r_[0,:np.size(h,1),-1]]\n h[:,0] = h[:,-2]\n h[:,-1] = h[:,1]\n f = f[np.r_[0,0,:np.size(f,0),-1,-1]]\n f = f[:,np.r_[0,:np.size(f,1),-1]]\n f[:,0] = f[:,-2]\n f[:,-1] = f[:,1]\n m,l = h.shape\n xrange=(1,l-2)\n yrange=(ystart+2,m-2)\n\n return BGrid_GFDL(lon_t, lat_t, lon_uv, lat_uv, \\\n mask_t, mask_uv, h, z_t, z_t_edges, \\\n z_uv, z_uv_edges, f, \\\n name, xrange=xrange, yrange=yrange)",
"def spatial_bin(data, metadata, lat, lon, binsize=1, pressure=None, pbins=None):\n \n uv_data = True if 'Variable' in metadata and metadata['Variable'] == 'uv' else False \n\n if pbins is None:\n pressure_list = [None, 0, 100, 250, 500, 700, 850, 925, 1000, 1100]\n else:\n pbins.insert(0, None)\n pressure_list = pbins\n \n if uv_data:\n if pressure.any() == None:\n binned_u_data, binned_v_data = bin_data(data, lat, lon,\n binsize=binsize,\n uv_data=uv_data,\n pressure=pressure)\n\n rows = binned_u_data.shape[0]\n cols = binned_u_data.shape[1]\n\n binned_u_nobs = np.full((rows, cols), np.nan)\n binned_u_mean = np.full((rows, cols), np.nan)\n binned_u_max = np.full((rows, cols), np.nan)\n binned_u_min = np.full((rows, cols), np.nan)\n binned_u_std = np.full((rows, cols), np.nan)\n binned_u_rmse = np.full((rows, cols), np.nan)\n\n binned_v_nobs = np.full((rows, cols), np.nan)\n binned_v_mean = np.full((rows, cols), np.nan)\n binned_v_max = np.full((rows, cols), np.nan)\n binned_v_min = np.full((rows, cols), np.nan)\n binned_v_std = np.full((rows, cols), np.nan)\n binned_v_rmse = np.full((rows, cols), np.nan)\n\n for x in range(0, rows):\n for y in range(0, cols):\n if np.isnan(binned_u_data[x, y]).any() == False:\n binned_u_nobs[x, y] = len(binned_u_data[x, y])\n binned_u_mean[x, y] = np.mean(binned_u_data[x, y])\n binned_u_max[x, y] = np.max(binned_u_data[x, y])\n binned_u_min[x, y] = np.min(binned_u_data[x, y])\n binned_u_std[x, y] = np.std(binned_u_data[x, y])\n binned_u_rmse[x, y] = np.sqrt(\n np.nanmean(np.square(binned_u_data[x, y])))\n\n binned_v_nobs[x, y] = len(binned_v_data[x, y])\n binned_v_mean[x, y] = np.mean(binned_v_data[x, y])\n binned_v_max[x, y] = np.max(binned_v_data[x, y])\n binned_v_min[x, y] = np.min(binned_v_data[x, y])\n binned_v_std[x, y] = np.std(binned_v_data[x, y])\n binned_v_rmse[x, y] = np.sqrt(\n np.nanmean(np.square(binned_v_data[x, y])))\n\n else:\n binned_u_data, binned_v_data, binned_pressure = bin_data(data, lat, lon,\n binsize=binsize,\n uv_data=uv_data,\n pressure=pressure)\n\n rows = binned_u_data.shape[0]\n cols = binned_u_data.shape[1]\n \n n_plevs = len(pressure_list)-1\n\n binned_u_nobs = np.full((rows, cols, n_plevs), np.nan)\n binned_u_mean = np.full((rows, cols, n_plevs), np.nan)\n binned_u_max = np.full((rows, cols, n_plevs), np.nan)\n binned_u_min = np.full((rows, cols, n_plevs), np.nan)\n binned_u_std = np.full((rows, cols, n_plevs), np.nan)\n binned_u_rmse = np.full((rows, cols, n_plevs), np.nan)\n\n binned_v_nobs = np.full((rows, cols, n_plevs), np.nan)\n binned_v_mean = np.full((rows, cols, n_plevs), np.nan)\n binned_v_max = np.full((rows, cols, n_plevs), np.nan)\n binned_v_min = np.full((rows, cols, n_plevs), np.nan)\n binned_v_std = np.full((rows, cols, n_plevs), np.nan)\n binned_v_rmse = np.full((rows, cols, n_plevs), np.nan)\n\n for i, pressure in enumerate(pressure_list[:-1]):\n for x in range(0, rows):\n for y in range(0, cols):\n if np.isnan(binned_u_data[x, y]).any() == False:\n if i == 0:\n binned_u_nobs[x, y, i] = len(\n binned_u_data[x, y])\n binned_u_mean[x, y, i] = np.mean(\n binned_u_data[x, y])\n binned_u_max[x, y, i] = np.max(\n binned_u_data[x, y])\n binned_u_min[x, y, i] = np.min(\n binned_u_data[x, y])\n binned_u_std[x, y, i] = np.std(\n binned_u_data[x, y])\n binned_u_rmse[x, y, i] = np.sqrt(\n np.nanmean(np.square(binned_u_data[x, y])))\n\n binned_v_nobs[x, y, i] = len(\n binned_v_data[x, y])\n binned_v_mean[x, y, i] = np.mean(\n binned_v_data[x, y])\n binned_v_max[x, y, i] = np.max(\n binned_v_data[x, y])\n binned_v_min[x, y, i] = np.min(\n binned_v_data[x, y])\n binned_v_std[x, y, i] = np.std(\n binned_v_data[x, y])\n binned_v_rmse[x, y, i] = np.sqrt(\n np.nanmean(np.square(binned_v_data[x, y])))\n else:\n pressure_idx = np.where((binned_pressure[x, y] > pressure_list[i]) & (\n binned_pressure[x, y] < pressure_list[i+1]))\n if len(pressure_idx[0]) > 0:\n binned_u_nobs[x, y, i] = len(\n binned_u_data[x, y][pressure_idx])\n binned_u_mean[x, y, i] = np.mean(\n binned_u_data[x, y][pressure_idx])\n binned_u_max[x, y, i] = np.max(\n binned_u_data[x, y][pressure_idx])\n binned_u_min[x, y, i] = np.min(\n binned_u_data[x, y][pressure_idx])\n binned_u_std[x, y, i] = np.std(\n binned_u_data[x, y][pressure_idx])\n binned_u_rmse[x, y, i] = np.sqrt(np.nanmean(\n np.square(binned_u_data[x, y][pressure_idx])))\n\n binned_v_nobs[x, y, i] = len(\n binned_v_data[x, y][pressure_idx])\n binned_v_mean[x, y, i] = np.mean(\n binned_v_data[x, y][pressure_idx])\n binned_v_max[x, y, i] = np.max(\n binned_v_data[x, y][pressure_idx])\n binned_v_min[x, y, i] = np.min(\n binned_v_data[x, y][pressure_idx])\n binned_v_std[x, y, i] = np.std(\n binned_v_data[x, y][pressure_idx])\n binned_v_rmse[x, y, i] = np.sqrt(np.nanmean(\n np.square(binned_v_data[x, y][pressure_idx])))\n\n binned_data = {'u': {'binned_nobs': binned_u_nobs,\n 'binned_mean': binned_u_mean,\n 'binned_max': binned_u_max,\n 'binned_min': binned_u_min,\n 'binned_std': binned_u_std,\n 'binned_rmse': binned_u_rmse\n },\n 'v': {'binned_nobs': binned_v_nobs,\n 'binned_mean': binned_v_mean,\n 'binned_max': binned_v_max,\n 'binned_min': binned_v_min,\n 'binned_std': binned_v_std,\n 'binned_rmse': binned_v_rmse\n }\n }\n\n return binned_data\n\n else:\n if pressure is None:\n binned_data = bin_data(data, lat, lon, binsize=binsize)\n\n rows = binned_data.shape[0]\n cols = binned_data.shape[1]\n\n binned_nobs = np.full((rows, cols), np.nan)\n binned_mean = np.full((rows, cols), np.nan)\n binned_max = np.full((rows, cols), np.nan)\n binned_min = np.full((rows, cols), np.nan)\n binned_std = np.full((rows, cols), np.nan)\n binned_rmse = np.full((rows, cols), np.nan)\n\n for x in range(0, rows):\n for y in range(0, cols):\n if np.isnan(binned_data[x, y]).any() == False:\n binned_nobs[x, y] = len(binned_data[x, y])\n binned_mean[x, y] = np.mean(binned_data[x, y])\n binned_max[x, y] = np.max(binned_data[x, y])\n binned_min[x, y] = np.min(binned_data[x, y])\n binned_std[x, y] = np.std(binned_data[x, y])\n binned_rmse[x, y] = np.sqrt(\n np.nanmean(np.square(binned_data[x, y])))\n\n else:\n binned_data, binned_pressure = bin_data(data, lat, lon,\n binsize=binsize,\n uv_data=False,\n pressure=pressure)\n rows = binned_data.shape[0]\n cols = binned_data.shape[1]\n \n n_plevs = len(pressure_list)-1\n\n binned_nobs = np.full((rows, cols, n_plevs), np.nan)\n binned_mean = np.full((rows, cols, n_plevs), np.nan)\n binned_max = np.full((rows, cols, n_plevs), np.nan)\n binned_min = np.full((rows, cols, n_plevs), np.nan)\n binned_std = np.full((rows, cols, n_plevs), np.nan)\n binned_rmse = np.full((rows, cols, n_plevs), np.nan)\n\n for i, pressure in enumerate(pressure_list[:-1]):\n for x in range(0, rows):\n for y in range(0, cols):\n if np.isnan(binned_data[x, y]).any() == False:\n if i == 0:\n binned_nobs[x, y, i] = len(binned_data[x, y])\n binned_mean[x, y, i] = np.mean(\n binned_data[x, y])\n binned_max[x, y, i] = np.max(binned_data[x, y])\n binned_min[x, y, i] = np.min(binned_data[x, y])\n binned_std[x, y, i] = np.std(binned_data[x, y])\n binned_rmse[x, y, i] = np.sqrt(\n np.nanmean(np.square(binned_data[x, y])))\n else:\n pressure_idx = np.where((binned_pressure[x, y] > pressure_list[i]) &\n (binned_pressure[x, y] < pressure_list[i+1]))\n if len(pressure_idx[0]) > 0:\n binned_nobs[x, y, i] = len(\n binned_data[x, y][pressure_idx])\n binned_mean[x, y, i] = np.mean(\n binned_data[x, y][pressure_idx])\n binned_max[x, y, i] = np.max(\n binned_data[x, y][pressure_idx])\n binned_min[x, y, i] = np.min(\n binned_data[x, y][pressure_idx])\n binned_std[x, y, i] = np.std(\n binned_data[x, y][pressure_idx])\n binned_rmse[x, y, i] = np.sqrt(np.nanmean(\n np.square(binned_data[x, y][pressure_idx])))\n\n binned_data = {'binned_nobs': binned_nobs,\n 'binned_mean': binned_mean,\n 'binned_max': binned_max,\n 'binned_min': binned_min,\n 'binned_std': binned_std,\n 'binned_rmse': binned_rmse\n }\n\n return binned_data",
"def locate_droplets_in_mask(grid: GridBase, mask: np.ndarray) -> Emulsion:\n if isinstance(grid, CartesianGridBase):\n return _locate_droplets_in_mask_cartesian(grid, mask)\n elif isinstance(grid, SphericalSymGridBase):\n return _locate_droplets_in_mask_spherical(grid, mask)\n elif isinstance(grid, CylindricalSymGrid):\n return _locate_droplets_in_mask_cylindrical(grid, mask)\n elif isinstance(grid, GridBase):\n raise NotImplementedError(f\"Locating droplets is not possible for grid {grid}\")\n else:\n raise ValueError(f\"Invalid grid {grid}\")",
"def findSubsetIndices(grdMODEL, min_lat, max_lat, min_lon, max_lon):\n\n\n if min_lon<0 and max_lon>0:\n splitExtract = True; Turns=2\n grdMODEL.splitExtract=splitExtract\n else:\n splitExtract = False; Turns=1\n grdMODEL.splitExtract=splitExtract\n grdMODEL.lon = np.where(grdMODEL.lon>180,grdMODEL.lon-360,grdMODEL.lon)\n \n # Array to store the results returned from the function\n res=np.zeros((Turns,4),dtype=np.float64)\n \n lats=grdMODEL.lat[:,0]\n lons=grdMODEL.lon[0,:]\n\n \n for k in range(Turns):\n\n if k==0 and splitExtract == True:\n minLon=min_lon; maxLon=0\n minLon=minLon+360\n maxLon=maxLon+360\n elif k==1 and splitExtract == True:\n minLon=0; maxLon=max_lon\n else:\n minLon=min_lon; maxLon=max_lon\n \n distances1 = []\n distances2 = []\n indices=[]\n index=1\n for point in lats:\n s1 = max_lat-point # (vector subtract)\n s2 = min_lat-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n\n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n \n distances1 = []\n distances2 = []\n index=1\n \n for point in lons:\n s1 = maxLon-point # (vector subtract)\n s2 = minLon-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n \n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n\n # Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices\n minJ=indices[1][2]\n maxJ=indices[0][2]\n minI=indices[3][2]\n maxI=indices[2][2]\n \n res[k,0]=minI; res[k,1]=maxI; res[k,2]=minJ; res[k,3]=maxJ;\n\n # Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices\n grdMODEL.indices=res",
"def create_grid(data, drone_altitude, safety_distance):\n\n # minimum and maximum north coordinates\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\n #print(north_min, north_max)\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\n #print(east_min, east_max)\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil((north_max - north_min)))\n east_size = int(np.ceil((east_max - east_min)))\n #print(north_size, east_size)\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n # Center offset for grid\n north_min_center = np.min(data[:, 0])\n east_min_center = np.min(data[:, 1])\n \n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n\n if alt + d_alt + safety_distance > drone_altitude:\n obstacle = [\n int(north - d_north - safety_distance - north_min_center),\n int(north + d_north + safety_distance - north_min_center),\n int(east - d_east - safety_distance - east_min_center),\n int(east + d_east + safety_distance - east_min_center),\n ]\n grid[obstacle[0]:obstacle[1], obstacle[2]:obstacle[3]] = 1\n\n return grid",
"def bin_coords(coords, dsf, grid=True):\n if grid: return bin_coords_grid(coords, dsf)\n else: \n with h5py.File(dsf, 'r') as dataset:\n x = dataset['x'][:].flatten()\n y = dataset['y'][:].flatten()\n z = dataset['z'][:].flatten()\n\n return bin_coords_nogrid(coords,np.array([x,y,z]))",
"def CreateLandmask(Fieldset, test = False):\n \n \n \"\"\"\n This first set of lines creates a numpy array with u velocities and a numpy\n array with v velocities. First we get the U and V fields from the dataset. Then\n we compute a time chunk, which is needed because of the dataset. Then we only\n take the first slice of the U and V field (we do not need more for finding the land\n and ocean grids). As last we make an empty array which will be filled with zeros and \n ones.\n \"\"\"\n fU = Fieldset.U\n fV = Fieldset.V\n Fieldset.computeTimeChunk(fU.grid.time[0], 1) \n uvel_mask_c = fU.data[0,:,:] \n vvel_mask_c = fV.data[0,:,:]\n# vvel_mask_c = np.roll(vvel_mask_c, 1, axis = 0)\n landmask = np.zeros((uvel_mask_c.shape[0], uvel_mask_c.shape[1]))\n \n \"\"\"\n The first loop checks the value of the u and v velocitites. Notice that we get the\n values of two adjacent grid, since we're working with a C-grid.\n Visualizations of velocities in the C-grids(see below). So for a grid to be flagged identified\n as a land grid two U velocities and 2 V velocities need to be zero. The first loop makes all\n ocean grids 1 and land grids 0. \n ____ ____ ____ ____\n | V | V | \n | | | \n U T U T U\n | | | \n |____V____|_____V_____| \n \"\"\"\n \n for i in range (len(landmask[:,0])-1):\n for j in range (len(landmask[0,:])-1):\n u1 = uvel_mask_c[i,j]\n\n u2 = uvel_mask_c[i,j+1]\n\n v1 = vvel_mask_c[i,j]\n\n v2 = vvel_mask_c[i+1,j]\n\n if u1 != 0 or u2 != 0 or v1 != 0 or v2 != 0:\n landmask[i,j] = 1\n \n \n \"\"\"\n Change all zero to 1 and rest 0. since we want the land grids to be 1 and ocean\n grids to be 0. \n \"\"\"\n \n landmask = ChangeValues(landmask,0,1) \n \n \"\"\"\n The created landmask needs to be shifted upwards one grid. We will\n use the numpy roll function to do this.\n \"\"\"\n \n if test == True:\n plt.figure()\n plt.imshow(landmask)\n plt.colorbar()\n \n return landmask",
"def galaxy_positions():\n hdulist1 = pf.open(source+'/kids_data/KiDS_DR3.1_G9_ugri_shear.fits')\n '''\n hdulist2 = pf.open('../kids_data/KiDS_DR3.1_G12_ugri_shear.fits')\n hdulist3 = pf.open('../kids_data/KiDS_DR3.1_G15_ugri_shear.fits')\n hdulist4 = pf.open('../kids_data/KiDS_DR3.1_G23_ugri_shear.fits')\n hdulist5 = pf.open('../kids_data/KiDS_DR3.1_GS_ugri_shear.fits')\n '''\n ra = hdulist1[1].data['RAJ2000'][:sample]\n dec = hdulist1[1].data['DECJ2000'][:sample]\n global maxra\n maxra = max(ra)\n global minra\n minra = min(ra)\n global maxdec\n maxdec = max(dec)\n global mindec\n mindec = min(dec)\n global bsize\n bsize = abs(max(maxra, maxdec) - min(mindec, minra))\n coords = np.column_stack([ra, dec])\n global SIZE\n SIZE = len(coords)\n print(maxra, maxdec, minra, mindec, SIZE)\n ctree = cKDTree(coords)\n return ctree",
"def monolayer_4band():\n a = 0.222\n ax = 0.438\n ay = 0.332\n theta = 96.79 * (pi / 180)\n phi = 103.69 * (pi / 180)\n\n lat = pb.Lattice(a1=[ax, 0], a2=[0, ay])\n\n h = a * sin(phi - pi / 2)\n s = 0.5 * ax - a * cos(theta / 2)\n lat.add_sublattices(\n ('A', [0, 0, h], 0),\n ('B', [s, 0, 0], 0),\n ('C', [ax/2, ay/2, 0], 0),\n ('D', [ax/2 + s, ay/2, h], 0)\n )\n\n lat.register_hopping_energies({\n 't1': -1.22,\n 't2': 3.665,\n 't3': -0.205,\n 't4': -0.105,\n 't5': -0.055\n })\n\n lat.add_hoppings(\n # t1\n ([-1, 0], 'A', 'D', 't1'),\n ([-1, -1], 'A', 'D', 't1'),\n ([ 0, 0], 'B', 'C', 't1'),\n ([ 0, -1], 'B', 'C', 't1'),\n # t2\n ([ 0, 0], 'A', 'B', 't2'),\n ([ 0, 0], 'C', 'D', 't2'),\n # t3\n ([ 0, 0], 'A', 'D', 't3'),\n ([ 0, -1], 'A', 'D', 't3'),\n ([ 1, 1], 'C', 'B', 't3'),\n ([ 1, 0], 'C', 'B', 't3'),\n # t4\n ([ 0, 0], 'A', 'C', 't4'),\n ([ 0, -1], 'A', 'C', 't4'),\n ([-1, 0], 'A', 'C', 't4'),\n ([-1, -1], 'A', 'C', 't4'),\n ([ 0, 0], 'B', 'D', 't4'),\n ([ 0, -1], 'B', 'D', 't4'),\n ([-1, 0], 'B', 'D', 't4'),\n ([-1, -1], 'B', 'D', 't4'),\n # t5\n ([-1, 0], 'A', 'B', 't5'),\n ([ 0, 1], 'A', 'B', 't5'),\n ([ 0, -1], 'A', 'B', 't5'),\n ([-1, 0], 'C', 'D', 't5'),\n ([ 0, 1], 'C', 'D', 't5'),\n ([ 0, -1], 'C', 'D', 't5'),\n )\n\n return lat",
"def identify_leaflets(u, time_ts):\n z = u.select_atoms(\"all\").center_of_geometry()[2]\n COM_z= np.array([0,0,z]) #defines the global midplane position along z\n x, y, z = u.trajectory.ts.triclinic_dimensions[0][0], u.trajectory.ts.triclinic_dimensions[1][1], u.trajectory.ts.triclinic_dimensions[2][2]\n box = np.array([x, y, z, 90, 90, 90]) \n ### Determining side of the bilayer CHOL belongs to in this frame\n lipid1 = 'CHL'\n lipid2 = 'DLIP'\n lipid3 = 'SSM'\n lipid4 = 'DSPC'\n \n lpd1_atoms = u.select_atoms('resname %s and name O2'%lipid1) \n lpd2_atoms = u.select_atoms('resname %s and name P '%lipid2) \n lpd3_atoms = u.select_atoms('resname %s and name P '%lipid3) \n lpd4_atoms = u.select_atoms('resname %s and name P '%lipid4)\n \n num_lpd2 = lpd2_atoms.n_atoms\n num_lpd3 = lpd3_atoms.n_atoms\n num_lpd4 = lpd4_atoms.n_atoms \n # atoms in the upper leaflet as defined by insane.py or the CHARMM-GUI membrane builders\n # select cholesterol headgroups within 1.5 nm of lipid headgroups in the selected leaflet\n # this must be done because CHOL rapidly flip-flops between leaflets\n # so we must assign CHOL to each leaflet at every time step, and in large systems\n # with substantial membrane undulations, a simple cut-off in the z-axis just will not cut it\n if side == 'up':\n lpd2i = lpd2_atoms[:int((num_lpd2)/2)]\n lpd3i = lpd3_atoms[:int((num_lpd3)/2)]\n lpd4i = lpd4_atoms[:int((num_lpd4)/2)]\n \n\n lipids = lpd2i + lpd3i + lpd4i \n\n ns_lipids = NS.AtomNeighborSearch(lpd1_atoms, box=box) \n lpd1i = ns_lipids.search(lipids,15.0) #1.5 nm\n leaflet = lpd1i + lpd2i + lpd3i + lpd4i \n\n elif side == 'down':\n lpd2i = lpd2_atoms[int((num_lpd2)/2):]\n lpd3i = lpd3_atoms[int((num_lpd3)/2):]\n lpd4i = lpd4_atoms[int((num_lpd4)/2):]\n\n lipids = lpd2i + lpd3i + lpd4i #+ lpd3i\n \n ns_lipids = NS.AtomNeighborSearch(lpd1_atoms, box=box)\n lpd1i = ns_lipids.search(lipids,15.0) # 1.5nm\n leaflet = lpd1i + lpd2i + lpd3i+ lpd4i \n return lpd1i, lpd2i, lpd3i, lpd4i, COM_z, box, leaflet",
"def __init__(self, \n nd = 2, \n goal = np.array([1.0,1.0]),\n state_bound = [[0,1],[0,1]],\n nA = 4,\n action_list = [[0,1],[0,-1],[1,0],[-1,0]],\n<<<<<<< HEAD:archive-code/puddleworld.py\n ngrid = [10.0,10.0],\n maxStep = 40):\n ngrid = [40, 40]\n x_vec = np.linspace(0,1,ngrid[0])\n y_vec = np.linspace(0,1,ngrid[1])\n for x in x_vec:\n for y in y_vec:\n if ~self.inPuddle([x,y]):\n puddle.append([x,y])\n # puddle is a closed loop \n outpuddlepts = np.asarray(puddle)\n \"\"\"\n\n\n # Horizontal wing of puddle consists of \n # 1) rectangle area xch1<= x <=xc2 && ych1-radius <= y <=ych2+radius\n # (xchi,ychi) is the center points (h ==> horizantal)\n # x, y = state[0], state[1]\n xch1, ych1 = 0.3, 0.7\n xch2, ych2 = 0.65, ych1\n radius = 0.1\n\n\n #Vertical wing of puddle consists of \n # 1) rectangle area xcv1-radius<= x <=xcv2+radius && ycv1 <= y <= ycv2\n # where (xcvi,ycvi) is the center points (v ==> vertical)\n xcv1 = 0.45; ycv1=0.4;\n xcv2 = xcv1; ycv2 = 0.8;\n\n # % 2) two half-circle at end edges of rectangle\n \n # POINTS ON HORIZANTAL LINES OF PUDDLE BOUNDARY\n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n puddle.append([xcv1-radius,ych1-radius])\n \n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n \n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n \n puddle.append([xcv1-radius,ych1+radius])\n\n\n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n\n # POINTS ON VERTICAL LINES OF PUDDLE BOUNDARY\n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1-radius,y])\n \n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1+radius,y])\n \"\"\"\n for y in np.arrange():\n puddle.append([])\n \n for y in np.arrange():\n puddle.append([])\n \"\"\"\n\n # HALF CIRCLES\n ngridTheta = 10\n thetaVec = np.linspace(0,pi,ngridTheta)\n\n for t in thetaVec:\n puddle.append([xch1+radius*np.cos(pi/2+t),ych1+radius*np.sin(pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xch2+radius*np.cos(-pi/2+t),ych2+radius*np.sin(-pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xcv1+radius*np.cos(pi+t),ycv1+radius*np.sin(pi+t)])\n\n for t in thetaVec:\n puddle.append([xcv2+radius*np.cos(t),ycv2+radius*np.sin(t)])\n\n \n outpuddlepts = np.asarray(puddle)\n return outpuddlepts",
"def square_bravais_lattice(self,R,lattice_multiplier=1):\n a = lattice_multiplier*self.a\n b = lattice_multiplier*self.b\n c = lattice_multiplier*self.c\n\n #Calculate the number of lattice points needed in each direction to cover a length of R\n #I use the ceiling function so that when I shift the origin by a one unit cell vector,\n #I still cover all lattive points within a distance of R\n Na = int(np.ceil(R/np.linalg.norm(a)))\n Nb = int(np.ceil(R/np.linalg.norm(b)))\n Nc = int(np.ceil(R/np.linalg.norm(c)))\n\n #calculate the number of vertices in a grid that covers the sphere\n #A sphere of radius R fits within a grid of size 2R x 2R x 2R\n #Adding one to account for origin\n number_vertices = (2*Na+1)*(2*Nb+1)*(2*Nc+1)\n vertices = np.empty((number_vertices,3))\n vertex_labels = np.empty(number_vertices ,dtype=int)\n \n # populate the vertices list with the positions of a lattice with single spacing\n n = 0\n for i in np.arange(-Na,Na+1):\n for j in np.arange(-Nb,Nb+1):\n for k in np.arange(-Nc,Nc+1):\n vertices[n]=np.dot([[i,j,k]],[[a[0],a[1],a[2]],[b[0],b[1],b[2]],[c[0],c[1],c[2]]])\n vertex_labels[n] = self.position_map_inverse[(i*lattice_multiplier)%2,(j*lattice_multiplier)%2,(k*lattice_multiplier)%2]\n n += 1\n return vertices, vertex_labels",
"def cut_bonds_z_random(xy, NL, KL, BL, target_z, min_coord=2, bulk_determination='Triangulation', check=False):\n print ' Cutting bonds z...'\n NP = len(xy)\n NN = np.shape(NL)[1]\n\n # Identify boundary pts, bulk pts\n print ' cut_bonds_z : extract boundary...'\n boundary = extract_boundary(xy, NL, KL, BL)\n # print 'boundary = ', boundary\n bulk = np.setdiff1d(np.arange(NP), boundary)\n NP_bulk = len(bulk)\n NP_bound = len(np.unique(boundary))\n print 'NP_bound = ', NP_bound\n print 'NP_bulk = ', NP_bulk\n\n if bulk_determination == 'Triangulation':\n # Form indices of BL in bulk. Bulk bonds appear in two simplices.\n # CHANGE THIS TO TEST IF BOND TWO SIMPLICES\n TRI = BL2TRI(BL, xy)\n Binds_list = []\n for ii in range(len(BL)):\n row = BL[ii]\n # get rows of TRI where each elem of row lives\n is_a = np.where(TRI == row[0])[0]\n is_b = np.where(TRI == row[1])[0]\n # The intersection of those rows gives where both live\n simplices = np.intersect1d(is_a, is_b)\n # print 'simplices = ', simplices\n # print 'np.size(simplices) = ', np.size(simplices)\n # If more than one simplex, bulk bond\n if np.size(simplices) < 2:\n # add to boundary list\n Binds_list.append(ii)\n # print ' --> Binds = ', Binds_list\n\n Binds = np.array(Binds_list).ravel()\n # Get the BL indices of bulk bonds --> (binds)\n binds = np.setdiff1d(np.arange(len(BL)), Binds)\n\n elif bulk_determination == 'Endpts':\n # Define bulk bonds as connecting at least one bulk particle\n is_a = np.in1d(BL[:, 0], bulk)\n is_b = np.in1d(BL[:, 1], bulk)\n binds = np.where(np.logical_or(is_a, is_b))[0]\n Binds = np.setdiff1d(np.arange(len(BL)), binds)\n else:\n raise RuntimeError('ERROR: argument <bulk_determination> did not match known method!')\n\n # print 'binds = ', binds\n # print 'Binds = ', Binds\n print 'len(binds) = ', len(binds)\n print 'len(Binds) = ', len(Binds)\n\n # Check\n if check:\n # plt.triplot(xy[:,0], xy[:,1], TRI, 'bo-')\n for bii in binds:\n XX = xy[BL[bii], 0]\n YY = xy[BL[bii], 1]\n plt.plot(XX, YY, 'b-')\n for Bii in Binds:\n XX = xy[BL[Bii], 0]\n YY = xy[BL[Bii], 1]\n plt.plot(XX, YY, 'r-')\n # for i in range(len(xy)):\n # plt.text(xy[i,0]+0.2,xy[i,1],str(i))\n plt.gca().set_aspect('equal')\n plt.show()\n\n # Compute the starting z in the bulk\n countKL = [KL[jj] for jj in bulk]\n # print 'found = ', np.count_nonzero(countKL), ' connections for ', NP_bulk, ' bulk particles...'\n z_start = float(np.count_nonzero(countKL)) / float(NP_bulk)\n print 'z_start = ', z_start\n print 'target_z = ', target_z\n\n # number of bonds to cut in the bulk\n # Be sure to divide the number of bonds by 2, since each bond double counts\n nbulk2cut = int(max([0, round((z_start - target_z) * 0.5 * float(NP_bulk))]))\n print 'nbulk2cut = ', nbulk2cut\n # number of bonds to cut in the boundary = nbulk2cut * (# boundary bonds)/(#bulk bonds)\n nB2cut = int(round(nbulk2cut * float(len(Binds)) / float(len(binds))))\n print 'nB2cut = ', nB2cut\n\n # CUT RANDOM BONDS\n\n ############################################\n ## DO BOUNDARY FIRST --> to avoid dangling particles\n # Choose nB2cut randomly from bulk\n # Shuffle bulk in-place\n np.random.shuffle(Binds)\n # Now work slowly towards selecting nbulk2cut: of the bonds,\n # but ensure that never leave a particle dangling without bonds\n done_cutting = False\n dmyi = 0\n # Set up mask for BL\n mask = np.ones(len(BL), dtype=bool)\n\n #################################\n # # Check :\n # plt.figure()\n # plt.gca().set_aspect('equal')\n # for ii in range(len(BL)):\n # XX = xy[BL[ii],0]\n # YY = xy[BL[ii],1]\n # plt.plot(XX, YY, 'b-')\n # plt.text(np.mean(XX), np.mean(YY), str(ii))\n # plt.show()\n #################################\n\n while not done_cutting:\n if len(np.where(mask == False)[0]) == nB2cut:\n done_cutting = True\n else:\n if np.mod(dmyi, 200) == 1:\n print 'cutting boundary bond: pass ', dmyi, ' (need to cut', nB2cut, ')'\n # consider adding dmyi element of bind to cut (make a test list)\n test = copy.deepcopy(mask)\n test[Binds[dmyi]] = False\n BLtmp = BL[test]\n # Check that BL leads to no dangling particles\n KLtmp = BL2KL(BLtmp, NL)\n # if all the rows in KLtmp have at least one nonzero bond, add dmyi to cut\n # print 'KLtmp.any(axis=1) = ', KLtmp.any(axis=1)\n if (np.where(~KLtmp.any(axis=1))[0]).size > 0:\n dmyi += 1\n else:\n mask[Binds[dmyi]] = False\n dmyi += 1\n\n ############################################\n # Choose nbulk2cut randomly from bulk\n # Shuffle bulk in-place\n np.random.shuffle(binds)\n # print 'binds = ', binds\n # Now work slowly towards selecting nbulk2cut: of the bonds,\n # but ensure that never leave a particle dangling without bonds\n done_cutting = False\n dmyi = 0\n while not done_cutting:\n if len(np.where(mask == False)[0]) == nB2cut + nbulk2cut:\n done_cutting = True\n else:\n if np.mod(dmyi, 200) == 1:\n print 'cutting bulk bond: pass ', dmyi, ' (need to cut', nbulk2cut, ')'\n # consider adding dmyi element of bind to cut (make a test list)\n test = copy.deepcopy(mask)\n test[binds[dmyi]] = False\n BLtmp = BL[test]\n # Check that BL leads to no dangling particles\n KLtmp = BL2KL(BLtmp, NL)\n # print 'KL = ', KLtmp\n # print 'np.where(~KLtmp.any(axis=1))[0] = ', np.where(~KLtmp.any(axis=1))[0]\n # if all the rows in KLtmp have at least one nonzero bond, add dmyi to cut\n if (np.where(~KLtmp.any(axis=1))[0]).size > min_coord - 1:\n dmyi += 1\n else:\n mask[binds[dmyi]] = False\n dmyi += 1\n\n # drop the nbulk2cut + nB2cut rows from total Bond List\n BL = BL[mask]\n # print 'BLout = ', BLout\n NL, KL = BL2NLandKL(BL, NN=NN)\n if check:\n display_lattice_2D(xy, BL)\n\n print '\\nReturning lattice with ', len(BL), ' bonds for ', NP, ' particles...'\n print 'KL[bulk] = ', KL[bulk]\n\n return NL, KL, BL",
"def spherical_bravais_lattice(self,R,iNumber,iLetter,jNumber,jLetter,lattice_multiplier=1):\n\n vertices, vertex_labels = self.square_bravais_lattice(R,lattice_multiplier)\n #Shift vertices to be the lattice generated from the jth position\n vertices = vertices + self.position[str(jNumber) + jLetter]\n #Calculate distances from the ith atom to each other atom\n distance = np.sqrt(np.sum(np.power(vertices - self.position[str(iNumber) + iLetter],2),axis=1))\n #only keep the locations of which are within a distance R from ion i\n #I take the intersection with non-zero distances to avoid counting origin when ith and jth ions are equal\n vertices = vertices[(distance < R) & (distance != 0.0)]\n vertex_labels = vertex_labels[(distance < R) & (distance != 0.0)]\n #If this is a lattice of the B ions, then change the vertex labels accordingly\n if jLetter == 'B':\n vertex_labels += 8\n \n return vertices, vertex_labels",
"def test_correct_binid(self):\n\n maps = Maps(plateifu='8485-1901', release='DR17', bintype='HYB10')\n spaxel = maps[22, 14]\n\n assert isinstance(spaxel, Spaxel)\n assert spaxel.x == 14, spaxel.y == 22\n\n bin_spaxels = spaxel.stellar_vel.bin.get_bin_spaxels()\n\n for sp in bin_spaxels:\n\n sp.load()\n assert sp.stellar_vel.bin.binid == spaxel.stellar_vel.bin.binid\n\n sp_bin = maps[sp.y, sp.x]\n assert sp_bin.stellar_vel.bin.binid == spaxel.stellar_vel.bin.binid",
"def init(volume):\n\n point = np.array((np.where(volume)[:][0][0], np.where(volume)[:][1][0], np.where(volume)[:][2][0]))\n\n is_visited_map = np.zeros(volume.shape, dtype=int)\n is_visited_map[point[0], point[1], point[2]]=1\n\n is_queued_map =np.zeros(volume.shape, dtype=int)\n is_queued_map[point[0], point[1], point[2]]=1\n\n not_queued,_,_,_ = check_box(volume,point,is_queued_map,np.zeros(volume.shape, dtype=int))\n\n if len(not_queued)==2:\n while True:\n point = np.array(not_queued[0])\n is_queued_map[not_queued[0][0], not_queued[0][1], not_queued[0][2]] = 1\n not_queued,_,_,_ = check_box(volume, point, is_queued_map, np.zeros(volume.shape, dtype=int))\n\n if len(not_queued)!=1:\n break\n\n\n return point",
"def iridiumCatalyst():\n\n coords = [\n [-1.3672999, -1.4398999, 0.1359000],\n [-2.4911998, -0.6808999, 0.1396000],\n [-3.6534996, -1.1211999, -0.5090000],\n [-3.6468996, -2.3434998, -1.1725999],\n [-2.4848998, -3.1187997, -1.1555999],\n [-1.3670999, -2.6316997, -0.4883000],\n [-0.4373000, -3.1872997, -0.4306000],\n [-2.4432998, -4.0866996, -1.6463998],\n [-4.5575996, -0.5223999, -0.4887000],\n [-2.4206998, 0.5908999, 0.8954999],\n [-1.2879999, 0.7903999, 1.6181998],\n [-1.1378999, 1.9348998, 2.3084998],\n [-2.1077998, 2.9319997, 2.3219998],\n [-3.2770997, 2.7402997, 1.5819998],\n [-3.4330997, 1.5600998, 0.8608999],\n [-4.3267996, 1.4057999, 0.2659000],\n [-1.9411998, 3.8419996, 2.8913997],\n [-0.1872000, 2.0459998, 2.8181997],\n [0.4009000, -0.6061999, 1.1172999],\n [-1.2690999, -3.8143996, 3.7856996],\n [-0.1664000, -4.5494996, 4.2269996],\n [1.1218999, -4.0950996, 3.9273996],\n [1.2993999, -2.9384997, 3.1675997],\n [0.2001000, -2.2075998, 2.6786997],\n [-1.0849999, -2.6466997, 3.0382997],\n [-1.9573998, -2.0870998, 2.7090997],\n [0.8509999, -0.7173999, 2.6636997],\n [2.3007998, -2.5989997, 2.9226997],\n [-0.3087000, -5.4547995, 4.8119995],\n [0.6392999, 0.6220999, -0.5923999],\n [-0.0586000, 0.3754000, -1.7751998],\n [0.0637000, 1.5387999, -2.6275997],\n [0.0955000, 1.0794999, -4.0821996],\n [0.8716999, 0.3276000, -4.2397996],\n [0.2802000, 1.9248998, -4.7547995],\n [-0.8681999, 0.6330999, -4.3491996],\n [-1.1760999, 2.4077998, -2.3666998],\n [-1.2042999, 3.2867997, -3.0193997],\n [-2.0717998, 1.8058998, -2.5499998],\n [-1.2019999, 2.7410997, -1.3247999],\n [1.3891999, 2.1923998, -2.1029998],\n [1.3915999, 1.7859998, -0.7128999],\n [2.6481997, 1.5975998, -2.7492997],\n [2.6573997, 0.5124000, -2.6283997],\n [2.7186997, 1.8556998, -3.8108996],\n [3.5309997, 1.9918998, -2.2375998],\n [1.4299999, 3.7172996, -2.1670998],\n [0.6241999, 4.1645996, -1.5814998],\n [2.3812998, 4.0763996, -1.7610998],\n [1.3476999, 4.0651996, -3.2032997],\n [2.0756998, 0.4378000, 1.7666998],\n [3.3654997, -0.0810000, 1.8671998],\n [4.2709996, 1.0315999, 2.0579998],\n [5.4819995, 0.5533999, 2.8527997],\n [5.1838995, 0.0640000, 3.7825996],\n [6.0490994, -0.1689000, 2.2567998],\n [6.1442994, 1.3928999, 3.0939997],\n [4.6909995, 1.5017999, 0.6583999],\n [5.4398995, 2.2997998, 0.7063999],\n [5.1090995, 0.6483999, 0.1171000],\n [3.8181996, 1.8505998, 0.1015000],\n [3.3502997, 2.0656998, 2.7942997],\n [2.0458998, 1.7442998, 2.2507998],\n [3.6590996, 3.5300997, 2.4959998],\n [3.5358997, 3.7464996, 1.4332999],\n [2.9753997, 4.1760996, 3.0565997],\n [4.6847995, 3.7776996, 2.7930997],\n [3.2796997, 1.8273998, 4.3083996],\n [3.0708997, 0.7747999, 4.5225996],\n [4.2123996, 2.1093998, 4.8080995],\n [2.4671998, 2.4302998, 4.7258995],\n [1.7917998, -1.7489998, 0.1412000],\n [1.8500998, -3.1467997, 0.2110000],\n [3.0569997, -3.5802997, -0.4612000],\n [4.1632996, -3.6178996, 0.6029999],\n [4.3272996, -2.6173997, 1.0149999],\n [3.8420996, -4.2739996, 1.4174999],\n [5.1055995, -3.9992996, 0.1957000],\n [2.8261997, -4.9693995, -1.0466999],\n [2.6792997, -5.6906994, -0.2364000],\n [3.6907996, -5.2904995, -1.6389998],\n [1.9392998, -4.9911995, -1.6839998],\n [3.2640997, -2.4330998, -1.5048999],\n [2.7238997, -1.2952999, -0.7998999],\n [4.7166995, -2.1413998, -1.8718998],\n [5.1881995, -3.0188997, -2.3293998],\n [4.7565995, -1.3170999, -2.5912997],\n [5.2941995, -1.8488998, -0.9925999],\n [2.4197998, -2.6279997, -2.7728997],\n [1.3752999, -2.8206997, -2.5121998],\n [2.4501998, -1.7101998, -3.3667997],\n [2.7924997, -3.4536997, -3.3878997],\n [-2.2764998, -4.1481996, 4.0262996],\n [1.9905998, -4.6454995, 4.2840996],\n [-4.5414996, -2.6926997, -1.6821998],\n [-4.0522996, 3.5020997, 1.5576998],\n ]\n coords = [[float(j) / Bohr for j in i] for i in coords]\n\n symbols = [\n \"N\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"N\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"Ir\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"B\",\n \"O\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"O\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"B\",\n \"O\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"O\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"B\",\n \"O\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"O\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n ]\n\n atoms = []\n for i, _ in enumerate(coords):\n atoms.append(Atom(symbols[i], position=coords[i]))\n return Molecule(symbols=atoms)",
"def _fragment(div, eps):\n grids = []\n for lat in range(div):\n for log in range(div):\n init = [(1.0 / div) * lat, (1.0 / div) * log]\n end = [(1.0 / div) * (lat + 1) + 2 * eps,\n (1.0 / div) * (log + 1) + 2 * eps]\n end2 = [(1.0 / div) * (lat + 1), (1.0 / div) * (log + 1)]\n grids.append([init, end, end2])\n return grids",
"def bosonic_cells(self):\n cells = self.cells()\n fermionic_cells = self.fermionic_cells()\n coords = [x for x in cells if x not in fermionic_cells]\n return coords",
"def positions(self, tileID, numSamples):",
"def grid_spherical_decomposed(x, y, z, data, x_i, y_i, z_i, horz_res, missing_value=-32767):\n\n r_map = np.sqrt(x**2.0 + y**2.0) # cartesian radius from map (x,y) center\n az_map = np.arctan2(y,x) #azimuth in the cartesian system. might vary along a ray due to map projection curvature\n vcp = np.fromiter((np.median(az_map[:, i_az, :]) for i_az in range(az_map.shape[1])), np.float32)\n print x.shape\n \n r_i = np.arange(r_map.min(), r_map.max(), horz_res) # cartesian radius from map(x,y) center\n\n # also need to griddata the x, y, z geographic coordinates.\n # decomposed geometry in radar polar coordinates is a not a\n # geophysical coordinate system (it's really a tangent plane\n # coord sys without beam refraction effects), so really there \n # are two xyz systems in play here.\n\n # unless, if by using z and R = np.sqrt(x**2.0 + y**2.0), we remain in a cylinderical \n # system referenced to the map projection in use. I think this is true.\n\n # Interpolate from spherical to cylindrical.\n # Cylindrical system is a different\n # range coordinate than the radar range coordinate.\n az_idx = 1\n cyl_grid_shape = (r_i.shape[0], x.shape[az_idx], z_i.shape[0])\n cyl_grid = np.empty(cyl_grid_shape)\n \n for az_id in range(cyl_grid_shape[az_idx]):\n progress(az_id, cyl_grid_shape[az_idx], 'Gridding along azimuths')\n rhi_r = r_map[:, az_id, :]\n # rhi_y = y[:, az_id, :]\n # R_i = rhir = np.sqrt(x[:, az_id, :]**2.0 + y[:, az_id, :]**2.0)\n rhi_z = z[:, az_id, :]\n rhi_data = data[:, az_id, :]\n \n # input and output coordinates need to be taken from the same coordinate system\n cyl_grid[:, az_id, :] = griddata(rhi_r.flatten(), rhi_z.flatten(), rhi_data.flatten(), r_i, z_i).T\n print \"\\r\" + 'Gridding along azimuths ... done'\n # cyl_grid is r, az, z instead of r, az, el\n \n # get mesh of coordinates for all interpolated radii r_i and along the azimuth\n # since constant radar azimuth might have curvature induced by the map projection\n # it's tricky to do this.\n\n # steps:\n # Do new transform from r,az radar system to map system using r=r_i to get x,y\n # or \n # Just do naive assumption that azimuths are straight and accept the error (used this one)\n \n # interpolate from cylindrical to cartesian.\n grid = np.empty((len(x_i), len(y_i), len(z_i)), dtype=np.float32)\n for z_id in range(z_i.shape[0]):\n progress(z_id, z_i.shape[0], 'Gridding at constant altitude')\n cappi_x = r_i[:, None]*np.cos(vcp[None, :])\n cappi_y = r_i[:, None]*np.sin(vcp[None, :])\n cappi_data = cyl_grid[:,:,z_id]\n \n # input and output coordinates need to be taken from the same coordinate system\n grid_2d = griddata(cappi_x.flatten(), cappi_y.flatten(), cappi_data.flatten(), x_i, y_i).T\n grid[:, :, z_id] = grid_2d\n print \"\\r\" + 'Gridding at constant altitude ... done'\n \n grid[np.isnan(grid)] = missing_value\n \n return grid"
] | [
"0.6334625",
"0.6326938",
"0.6280052",
"0.62748045",
"0.6138442",
"0.5841916",
"0.5814795",
"0.5784163",
"0.5732394",
"0.5644218",
"0.56330884",
"0.5590427",
"0.5507217",
"0.54709375",
"0.5447573",
"0.5432364",
"0.5410063",
"0.54034096",
"0.53786707",
"0.5365377",
"0.53360826",
"0.53168005",
"0.53054184",
"0.53039545",
"0.53022444",
"0.5284551",
"0.5232547",
"0.5232158",
"0.5214145",
"0.52089393"
] | 0.6527605 | 0 |
locate droplets in a data set on a single cylindrical grid | def _locate_droplets_in_mask_cylindrical_single(
grid: CylindricalSymGrid, mask: np.ndarray
) -> Emulsion:
# locate the individual clusters
labels, num_features = ndimage.label(mask)
if num_features == 0:
return Emulsion([], grid=grid)
# locate clusters on the symmetry axis
object_slices = ndimage.measurements.find_objects(labels)
indices = []
for index, slices in enumerate(object_slices, 1):
if slices[0].start == 0: # contains point on symmetry axis
indices.append(index)
else:
logger = logging.getLogger(grid.__class__.__module__)
logger.warning("Found object not located on symmetry axis")
# determine position from binary image and scale it to real space
pos = ndimage.measurements.center_of_mass(mask, labels, index=indices)
pos = grid.cell_to_point(pos)
# determine volume from binary image and scale it to real space
vol_r, dz = grid.cell_volume_data
cell_volumes = vol_r * dz
vol = ndimage.measurements.sum(cell_volumes, labels, index=indices)
# return an emulsion of droplets
droplets = (
SphericalDroplet.from_volume(np.array([0, 0, p[2]]), v)
for p, v in zip(pos, vol)
)
return Emulsion(droplets, grid=grid) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _locate_droplets_in_mask_cylindrical(\n grid: CylindricalSymGrid, mask: np.ndarray\n) -> Emulsion:\n assert np.all(mask.shape == grid.shape)\n\n if grid.periodic[1]:\n # locate droplets respecting periodic boundary conditions in z-direction\n\n # pad the array to simulate periodic boundary conditions\n dim_r, dim_z = grid.shape\n mask_padded = np.pad(mask, [[0, 0], [dim_z, dim_z]], mode=\"wrap\")\n assert mask_padded.shape == (dim_r, 3 * dim_z)\n\n # locate droplets in the extended image\n candidates = _locate_droplets_in_mask_cylindrical_single(grid, mask_padded)\n grid._logger.info(f\"Found {len(candidates)} droplet candidates.\")\n\n # keep droplets that are inside the central area\n droplets = Emulsion(grid=grid)\n for droplet in candidates:\n # correct for the additional padding of the array\n droplet.position[2] -= grid.length\n # check whether the droplet lies in the original box\n if grid.contains_point(droplet.position):\n droplets.append(droplet)\n\n grid._logger.info(f\"Kept {len(droplets)} central droplets.\")\n\n # filter overlapping droplets (e.g. due to duplicates)\n droplets.remove_overlapping()\n\n else:\n # simply locate droplets in the mask\n droplets = _locate_droplets_in_mask_cylindrical_single(grid, mask)\n\n return droplets",
"def _locate_droplets_in_mask_cartesian(\n grid: CartesianGridBase, mask: np.ndarray\n) -> Emulsion:\n if mask.shape != grid.shape:\n raise ValueError(\n f\"The shape {mask.shape} of the data is not compatible with the grid \"\n f\"shape {grid.shape}\"\n )\n\n # pad the array to simulate periodic boundary conditions\n offset = np.array([dim if p else 0 for p, dim in zip(grid.periodic, grid.shape)])\n pad = np.c_[offset, offset].astype(np.intc)\n mask_padded = np.pad(mask, pad, mode=\"wrap\")\n assert np.all(mask_padded.shape == np.array(grid.shape) + 2 * offset)\n\n # locate individual clusters in the padded image\n labels, num_labels = ndimage.label(mask_padded)\n if num_labels == 0:\n return Emulsion([], grid=grid)\n indices = range(1, num_labels + 1)\n\n # create and emulsion from this of droplets\n grid._logger.info(f\"Found {num_labels} droplet candidate(s)\")\n\n # determine position from binary image and scale it to real space\n positions = ndimage.measurements.center_of_mass(mask_padded, labels, index=indices)\n # correct for the additional padding of the array\n positions = grid.cell_to_point(positions - offset)\n\n # determine volume from binary image and scale it to real space\n volumes = ndimage.measurements.sum(mask_padded, labels, index=indices)\n volumes = np.asanyarray(volumes) * np.prod(grid.discretization)\n\n # only retain droplets that are inside the central area\n droplets = (\n SphericalDroplet.from_volume(position, volume)\n for position, volume in zip(positions, volumes)\n if grid.cuboid.contains_point(position)\n )\n\n # filter overlapping droplets (e.g. due to duplicates)\n emulsion = Emulsion(droplets, grid=grid)\n num_candidates = len(emulsion)\n if num_candidates < num_labels:\n grid._logger.info(f\"Only {num_candidates} candidate(s) inside bounds\")\n\n emulsion.remove_overlapping()\n if len(emulsion) < num_candidates:\n grid._logger.info(f\"Only {num_candidates} candidate(s) not overlapping\")\n\n return emulsion",
"def locate_droplets_in_mask(grid: GridBase, mask: np.ndarray) -> Emulsion:\n if isinstance(grid, CartesianGridBase):\n return _locate_droplets_in_mask_cartesian(grid, mask)\n elif isinstance(grid, SphericalSymGridBase):\n return _locate_droplets_in_mask_spherical(grid, mask)\n elif isinstance(grid, CylindricalSymGrid):\n return _locate_droplets_in_mask_cylindrical(grid, mask)\n elif isinstance(grid, GridBase):\n raise NotImplementedError(f\"Locating droplets is not possible for grid {grid}\")\n else:\n raise ValueError(f\"Invalid grid {grid}\")",
"def _locate_droplets_in_mask_spherical(\n grid: SphericalSymGridBase, mask: np.ndarray\n) -> Emulsion:\n assert np.all(mask.shape == grid.shape)\n\n # locate clusters in the binary image\n labels, num_labels = ndimage.label(mask)\n if num_labels == 0:\n return Emulsion([], grid=grid)\n\n # locate clusters around origin\n object_slices = ndimage.measurements.find_objects(labels)\n droplet = None\n for slices in object_slices:\n if slices[0].start == 0: # contains point around origin\n radius = grid.cell_to_point(slices[0].stop).flat[-1]\n droplet = SphericalDroplet(np.zeros(grid.dim), radius=radius)\n else:\n logger = logging.getLogger(grid.__class__.__module__)\n logger.warning(\"Found object not located at origin\")\n\n # return an emulsion of droplets\n if droplet:\n return Emulsion([droplet], grid=grid)\n else:\n return Emulsion([], grid=grid)",
"def find_loc_indices(loc, dir, tile):\n #returns the indices of the nearest neighbor point in the given tile, the lon/lat of the nearest neighbor, \n #and the distance (m) from the given point to the nearest neighbor grid cell\n \n filename_pattern = '*grid.tile{0}.nc'.format(tile)\n for f_name in os.listdir(dir):\n if fnmatch.fnmatch(f_name, filename_pattern):\n filename = f_name\n if not filename:\n message = 'No filenames matching the pattern {0} found in {1}'.format(filename_pattern,dir)\n logging.critical(message)\n raise Exception(message)\n \n nc_file = Dataset('{0}/{1}'.format(dir,filename))\n #read in supergrid longitude and latitude\n lon_super = np.array(nc_file['x']) #[lat,lon] or [y,x] #.swapaxes(0,1)\n lat_super = np.array(nc_file['y']) #[lat,lon] or [y,x] #.swapaxes(0,1)\n #get the longitude and latitude data for the grid centers by slicing the supergrid \n #and taking only odd-indexed values\n longitude = lon_super[1::2,1::2]\n latitude = lat_super[1::2,1::2]\n nc_file.close()\n \n adj_long = False \n #look for reversal of longitude; if found, adjust longitude so that 0-360 transition doesn't exist\n temp_loc = copy.deepcopy(loc)\n for row in longitude:\n if not (np.all(np.diff(row) >= 0) or np.all(np.diff(row) <= 0)):\n adj_long = True\n if adj_long:\n longitude[longitude < 180] += 360\n if loc[0] < 180:\n temp_loc[0] += 360\n \n #set up an array to hold the euclidean distance between the given point and every grid cell\n eucl_dist = np.zeros((longitude.shape[0],longitude.shape[1]))\n \n #get the Cartesian location of the given point\n cart_loc = np.array(sph2cart(math.radians(temp_loc[0]), math.radians(temp_loc[1]), earth_radius))\n \n for i in range(len(longitude)):\n for j in range(len(longitude[i])):\n #get the Cartesian location of all grid points\n cart_cell = np.array(sph2cart(math.radians(longitude[i,j]), math.radians(latitude[i,j]), earth_radius))\n \n #calculate the euclidean distance from the given point to the current grid cell\n eucl_dist[i,j] = np.linalg.norm(cart_loc - cart_cell)\n \n #get the indices of the grid point with the minimum euclidean distance to the given point\n i,j = np.unravel_index(eucl_dist.argmin(), eucl_dist.shape)\n \n return (i,j,longitude[i,j]%360.0, latitude[i,j], eucl_dist[i,j])",
"def generate_all_locations(grid, shape):",
"def find_tile(loc, dir):\n #returns the integer tile number\n \n # should be looking in the directory with supergrid data (probably \"fix\" directory)\n filename_pattern = '*grid.tile*.nc'\n \n #find all supergrid files in the directory\n grid_fnames = []\n for f_name in os.listdir(dir):\n if fnmatch.fnmatch(f_name, filename_pattern):\n grid_fnames.append(f_name)\n if not grid_fnames:\n message = 'No filenames matching the pattern {0} found in {1}'.format(filename_pattern,dir)\n logging.critical(message)\n raise Exception(message)\n \n #non-polar tiles can use traditional 2D point-in-polygon methods; if a point is not in a non-polar tile,\n #it is in one of the polar tiles, and the tile can be distinguished by the sign of latitude of the point\n polar_tile_filenames = []\n found_tile = False\n for f_name in grid_fnames:\n if not found_tile:\n nc_file = Dataset('{0}/{1}'.format(dir,f_name))\n longitude = np.array(nc_file['x']).swapaxes(0,1)\n latitude = np.array(nc_file['y']).swapaxes(0,1)\n nc_file.close()\n \n adj_long = False \n #look for reversal of longitude; if found, adjust longitude so that 0-360 transition doesn't exist\n for row in longitude:\n if not (np.all(np.diff(row) >= 0) or np.all(np.diff(row) <= 0)):\n adj_long = True\n if adj_long:\n longitude[longitude < 180] += 360\n \n #get lon/lat pairs for all edges of the tiles\n \n edge_1_lon = longitude[0,:]\n edge_1_lat = latitude[0,:]\n edge_1 = list(zip(edge_1_lon, edge_1_lat))\n \n edge_2_lon = longitude[:,-1]\n edge_2_lat = latitude[:,-1]\n edge_2 = list(zip(edge_2_lon, edge_2_lat))\n \n edge_3_lon = longitude[-1,:]\n edge_3_lat = latitude[-1,:]\n edge_3 = list(zip(edge_3_lon, edge_3_lat))\n edge_3.reverse() #need to reverse the direction of this edge to form a regular polygon\n \n edge_4_lon = longitude[:,0]\n edge_4_lat = latitude[:,0]\n edge_4 = list(zip(edge_4_lon, edge_4_lat))\n edge_4.reverse() #need to reverse the direction of this edge to form a regular polygon\n \n polygon_points = edge_1 + edge_2 + edge_3 + edge_4\n \n tile_polygon = Polygon(polygon_points)\n tile_polygon = tile_polygon.simplify(0)\n \n if tile_polygon.is_valid: #this will be True unless the tile is a polar tile, which will not form a regular polygon in Cartesian space using lon/lat data\n temp_loc = copy.deepcopy(loc)\n if adj_long:\n if loc[0] < 180:\n temp_loc[0] += 360\n loc_point = Point(temp_loc)\n if tile_polygon.contains(loc_point):\n found_tile = True\n return f_name.split('tile')[1].split('.nc')[0] \n else:\n polar_tile_filenames.append(f_name)\n \n #if the tile hasn't been found by this point, it must be contained within a polar tile\n for f_name in polar_tile_filenames:\n nc_file = Dataset('{0}/{1}'.format(dir,f_name))\n latitude = np.array(nc_file['y']).swapaxes(0,1)\n nc_file.close()\n \n #if the sign of the mean latitude of the tile is the same as that of the point, the tile has been found\n if np.sign(np.mean(latitude)) == np.sign(loc[1]):\n found_tile = True\n return f_name.split('tile')[1].split('.nc')[0] \n return -1",
"def swath_from_cartesian_grid(cart_grid, lons, lats, data,\n radius_of_influence):\n\n valid_index = get_valid_index_from_cartesian_grid(cart_grid, lons, lats,\n radius_of_influence)\n\n lons = lons[valid_index]\n lats = lats[valid_index]\n data = data[valid_index]\n\n return lons, lats, data",
"def get_valid_locations(location_list, grid, shape):",
"def findSubsetIndices(grdMODEL, min_lat, max_lat, min_lon, max_lon):\n\n\n if min_lon<0 and max_lon>0:\n splitExtract = True; Turns=2\n grdMODEL.splitExtract=splitExtract\n else:\n splitExtract = False; Turns=1\n grdMODEL.splitExtract=splitExtract\n grdMODEL.lon = np.where(grdMODEL.lon>180,grdMODEL.lon-360,grdMODEL.lon)\n \n # Array to store the results returned from the function\n res=np.zeros((Turns,4),dtype=np.float64)\n \n lats=grdMODEL.lat[:,0]\n lons=grdMODEL.lon[0,:]\n\n \n for k in range(Turns):\n\n if k==0 and splitExtract == True:\n minLon=min_lon; maxLon=0\n minLon=minLon+360\n maxLon=maxLon+360\n elif k==1 and splitExtract == True:\n minLon=0; maxLon=max_lon\n else:\n minLon=min_lon; maxLon=max_lon\n \n distances1 = []\n distances2 = []\n indices=[]\n index=1\n for point in lats:\n s1 = max_lat-point # (vector subtract)\n s2 = min_lat-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n\n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n \n distances1 = []\n distances2 = []\n index=1\n \n for point in lons:\n s1 = maxLon-point # (vector subtract)\n s2 = minLon-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n \n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n\n # Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices\n minJ=indices[1][2]\n maxJ=indices[0][2]\n minI=indices[3][2]\n maxI=indices[2][2]\n \n res[k,0]=minI; res[k,1]=maxI; res[k,2]=minJ; res[k,3]=maxJ;\n\n # Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices\n grdMODEL.indices=res",
"def query_region(self, point):\n result = []\n indexes = []\n for didx, dpoint in enumerate(self.data):\n if dpoint != point:\n if self.l2_distance(dpoint, point) <= self.eps:\n result.append(dpoint)\n indexes.append(didx)\n return result, indexes",
"def syed_dilation(data, vessel):",
"def identify_leaflets(u, time_ts):\n z = u.select_atoms(\"all\").center_of_geometry()[2]\n COM_z= np.array([0,0,z]) #defines the global midplane position along z\n x, y, z = u.trajectory.ts.triclinic_dimensions[0][0], u.trajectory.ts.triclinic_dimensions[1][1], u.trajectory.ts.triclinic_dimensions[2][2]\n box = np.array([x, y, z, 90, 90, 90]) \n ### Determining side of the bilayer CHOL belongs to in this frame\n lipid1 = 'CHL'\n lipid2 = 'DLIP'\n lipid3 = 'SSM'\n lipid4 = 'DSPC'\n \n lpd1_atoms = u.select_atoms('resname %s and name O2'%lipid1) \n lpd2_atoms = u.select_atoms('resname %s and name P '%lipid2) \n lpd3_atoms = u.select_atoms('resname %s and name P '%lipid3) \n lpd4_atoms = u.select_atoms('resname %s and name P '%lipid4)\n \n num_lpd2 = lpd2_atoms.n_atoms\n num_lpd3 = lpd3_atoms.n_atoms\n num_lpd4 = lpd4_atoms.n_atoms \n # atoms in the upper leaflet as defined by insane.py or the CHARMM-GUI membrane builders\n # select cholesterol headgroups within 1.5 nm of lipid headgroups in the selected leaflet\n # this must be done because CHOL rapidly flip-flops between leaflets\n # so we must assign CHOL to each leaflet at every time step, and in large systems\n # with substantial membrane undulations, a simple cut-off in the z-axis just will not cut it\n if side == 'up':\n lpd2i = lpd2_atoms[:int((num_lpd2)/2)]\n lpd3i = lpd3_atoms[:int((num_lpd3)/2)]\n lpd4i = lpd4_atoms[:int((num_lpd4)/2)]\n \n\n lipids = lpd2i + lpd3i + lpd4i \n\n ns_lipids = NS.AtomNeighborSearch(lpd1_atoms, box=box) \n lpd1i = ns_lipids.search(lipids,15.0) #1.5 nm\n leaflet = lpd1i + lpd2i + lpd3i + lpd4i \n\n elif side == 'down':\n lpd2i = lpd2_atoms[int((num_lpd2)/2):]\n lpd3i = lpd3_atoms[int((num_lpd3)/2):]\n lpd4i = lpd4_atoms[int((num_lpd4)/2):]\n\n lipids = lpd2i + lpd3i + lpd4i #+ lpd3i\n \n ns_lipids = NS.AtomNeighborSearch(lpd1_atoms, box=box)\n lpd1i = ns_lipids.search(lipids,15.0) # 1.5nm\n leaflet = lpd1i + lpd2i + lpd3i+ lpd4i \n return lpd1i, lpd2i, lpd3i, lpd4i, COM_z, box, leaflet",
"def _gridloc(self, nddata, bnds, axis):\n\n assert(axis < nddata.ndim), \"axis > ndim\"\n\n nddata = nddata[nddata[:,axis].argsort()]\n loc = np.searchsorted(bnds, nddata[:,axis]) - 1\n return nddata, loc",
"def iridiumCatalyst():\n\n coords = [\n [-1.3672999, -1.4398999, 0.1359000],\n [-2.4911998, -0.6808999, 0.1396000],\n [-3.6534996, -1.1211999, -0.5090000],\n [-3.6468996, -2.3434998, -1.1725999],\n [-2.4848998, -3.1187997, -1.1555999],\n [-1.3670999, -2.6316997, -0.4883000],\n [-0.4373000, -3.1872997, -0.4306000],\n [-2.4432998, -4.0866996, -1.6463998],\n [-4.5575996, -0.5223999, -0.4887000],\n [-2.4206998, 0.5908999, 0.8954999],\n [-1.2879999, 0.7903999, 1.6181998],\n [-1.1378999, 1.9348998, 2.3084998],\n [-2.1077998, 2.9319997, 2.3219998],\n [-3.2770997, 2.7402997, 1.5819998],\n [-3.4330997, 1.5600998, 0.8608999],\n [-4.3267996, 1.4057999, 0.2659000],\n [-1.9411998, 3.8419996, 2.8913997],\n [-0.1872000, 2.0459998, 2.8181997],\n [0.4009000, -0.6061999, 1.1172999],\n [-1.2690999, -3.8143996, 3.7856996],\n [-0.1664000, -4.5494996, 4.2269996],\n [1.1218999, -4.0950996, 3.9273996],\n [1.2993999, -2.9384997, 3.1675997],\n [0.2001000, -2.2075998, 2.6786997],\n [-1.0849999, -2.6466997, 3.0382997],\n [-1.9573998, -2.0870998, 2.7090997],\n [0.8509999, -0.7173999, 2.6636997],\n [2.3007998, -2.5989997, 2.9226997],\n [-0.3087000, -5.4547995, 4.8119995],\n [0.6392999, 0.6220999, -0.5923999],\n [-0.0586000, 0.3754000, -1.7751998],\n [0.0637000, 1.5387999, -2.6275997],\n [0.0955000, 1.0794999, -4.0821996],\n [0.8716999, 0.3276000, -4.2397996],\n [0.2802000, 1.9248998, -4.7547995],\n [-0.8681999, 0.6330999, -4.3491996],\n [-1.1760999, 2.4077998, -2.3666998],\n [-1.2042999, 3.2867997, -3.0193997],\n [-2.0717998, 1.8058998, -2.5499998],\n [-1.2019999, 2.7410997, -1.3247999],\n [1.3891999, 2.1923998, -2.1029998],\n [1.3915999, 1.7859998, -0.7128999],\n [2.6481997, 1.5975998, -2.7492997],\n [2.6573997, 0.5124000, -2.6283997],\n [2.7186997, 1.8556998, -3.8108996],\n [3.5309997, 1.9918998, -2.2375998],\n [1.4299999, 3.7172996, -2.1670998],\n [0.6241999, 4.1645996, -1.5814998],\n [2.3812998, 4.0763996, -1.7610998],\n [1.3476999, 4.0651996, -3.2032997],\n [2.0756998, 0.4378000, 1.7666998],\n [3.3654997, -0.0810000, 1.8671998],\n [4.2709996, 1.0315999, 2.0579998],\n [5.4819995, 0.5533999, 2.8527997],\n [5.1838995, 0.0640000, 3.7825996],\n [6.0490994, -0.1689000, 2.2567998],\n [6.1442994, 1.3928999, 3.0939997],\n [4.6909995, 1.5017999, 0.6583999],\n [5.4398995, 2.2997998, 0.7063999],\n [5.1090995, 0.6483999, 0.1171000],\n [3.8181996, 1.8505998, 0.1015000],\n [3.3502997, 2.0656998, 2.7942997],\n [2.0458998, 1.7442998, 2.2507998],\n [3.6590996, 3.5300997, 2.4959998],\n [3.5358997, 3.7464996, 1.4332999],\n [2.9753997, 4.1760996, 3.0565997],\n [4.6847995, 3.7776996, 2.7930997],\n [3.2796997, 1.8273998, 4.3083996],\n [3.0708997, 0.7747999, 4.5225996],\n [4.2123996, 2.1093998, 4.8080995],\n [2.4671998, 2.4302998, 4.7258995],\n [1.7917998, -1.7489998, 0.1412000],\n [1.8500998, -3.1467997, 0.2110000],\n [3.0569997, -3.5802997, -0.4612000],\n [4.1632996, -3.6178996, 0.6029999],\n [4.3272996, -2.6173997, 1.0149999],\n [3.8420996, -4.2739996, 1.4174999],\n [5.1055995, -3.9992996, 0.1957000],\n [2.8261997, -4.9693995, -1.0466999],\n [2.6792997, -5.6906994, -0.2364000],\n [3.6907996, -5.2904995, -1.6389998],\n [1.9392998, -4.9911995, -1.6839998],\n [3.2640997, -2.4330998, -1.5048999],\n [2.7238997, -1.2952999, -0.7998999],\n [4.7166995, -2.1413998, -1.8718998],\n [5.1881995, -3.0188997, -2.3293998],\n [4.7565995, -1.3170999, -2.5912997],\n [5.2941995, -1.8488998, -0.9925999],\n [2.4197998, -2.6279997, -2.7728997],\n [1.3752999, -2.8206997, -2.5121998],\n [2.4501998, -1.7101998, -3.3667997],\n [2.7924997, -3.4536997, -3.3878997],\n [-2.2764998, -4.1481996, 4.0262996],\n [1.9905998, -4.6454995, 4.2840996],\n [-4.5414996, -2.6926997, -1.6821998],\n [-4.0522996, 3.5020997, 1.5576998],\n ]\n coords = [[float(j) / Bohr for j in i] for i in coords]\n\n symbols = [\n \"N\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"N\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"Ir\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"B\",\n \"O\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"O\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"B\",\n \"O\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"O\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"B\",\n \"O\",\n \"C\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"O\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"C\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n \"H\",\n ]\n\n atoms = []\n for i, _ in enumerate(coords):\n atoms.append(Atom(symbols[i], position=coords[i]))\n return Molecule(symbols=atoms)",
"def find_endpoints(batch_trajectories):\n # empty lists to fill\n site_lats = []\n site_lons = []\n last_lats = []\n last_lons = []\n lats_150 = []\n lons_150 = [] \n last_times = []\n times_150 = []\n last_sst = []\n sst_150 = []\n \n # temporary lists as placeholders\n temp_site_lats = []\n temp_site_lons = []\n temp_lats = []\n temp_lons = []\n temp_lats150 = []\n temp_lons150 = []\n temp_times = []\n temp_times150 = []\n temp_sst = []\n temp_sst150 = []\n\n for speed in range(len(batch_trajectories)):\n # working with one speed at a time means working with one nc file at\n # a time\n \n # reset temporary lists\n temp_site_lats = []\n temp_site_lons = []\n temp_lats = []\n temp_lons = []\n temp_lats150 = []\n temp_lons150 = []\n temp_times = []\n temp_times150 = []\n temp_sst = []\n temp_sst150 = []\n\n # extract variables into lists\n lats = batch_trajectories[speed].variables['lat'][:]\n lons = batch_trajectories[speed].variables['lon'][:]\n lats150 = batch_trajectories[speed].variables['lat150'][:]\n lons150 = batch_trajectories[speed].variables['lon150'][:]\n times = batch_trajectories[speed].variables['time'][:]\n ssts = batch_trajectories[speed].variables['temp'][:]\n ssts_150 = batch_trajectories[speed].variables['temp150'][:]\n\n # if a particle is deleted before time is up, values are masked. \n # We'd like to get the last valid number.\n for trajectory in range(len(lats)):\n i = -1 # index for the last value\n while np.ma.is_masked(lats[trajectory][i]) is True:\n i -= 1 # if the value is masked, go to one value sooner\n \n j = i # use j for the 150m values\n while lats150[trajectory][j] > 0:\n # we want the first index where the latitude is recorded.\n # j is actually the last one where it's not recorded, so we\n # extract the information at index j+1\n j -= 1\n\n # once i and j are determined for a trajectory, we can extract the\n # variables and append them to temporary lists.\n temp_site_lats.append(lats[trajectory][0])\n temp_site_lons.append(lons[trajectory][0])\n temp_lats.append(lats[trajectory][i])\n temp_lons.append(lons[trajectory][i])\n temp_lats150.append(lats150[trajectory][j+1])\n temp_lons150.append(lons150[trajectory][j+1])\n temp_times.append(times[trajectory][i])\n temp_sst.append(ssts[trajectory][i])\n temp_sst150.append(ssts_150[trajectory][j+1])\n temp_times150.append(times[trajectory][j+1])\n \n # after the temporary lists are appended by sinking speed, they\n # are appended to the big lists that are returned by the function.\n # this keeps the structure of being separated by sinking speed.\n site_lats.append(temp_site_lats)\n site_lons.append(temp_site_lons)\n last_lats.append(temp_lats)\n last_lons.append(temp_lons)\n lats_150.append(temp_lats150)\n lons_150.append(temp_lons150)\n last_times.append(temp_times)\n times_150.append(temp_times150)\n last_sst.append(temp_sst)\n sst_150.append(temp_sst150)\n \n return site_lats, site_lons, last_lats, last_lons, lats_150, lons_150,\\\n last_times, times_150, last_sst, sst_150",
"def _pick_triplets(len_data, n_triplets):\n\n n_samples = n_triplets * N_CAMS\n\n indices = np.zeros((n_samples), dtype = np.int)\n\n _indices = range(0, len_data, N_CAMS)\n\n for i in range(0, n_samples, N_CAMS):\n k = np.random.choice(_indices)\n indices[i] = k + 1 # Left\n indices[i + 1] = k # Center\n indices[i + 2] = k + 2 # Right\n\n return indices",
"def locate_neighbors(grouped, row, column, width, height, reach):\n neighbors = []\n for row_val in range(2*int(reach) + 1):\n for col_val in range(2*int(reach) + 1):\n row_final = row - int(reach) + row_val\n col_final = column - int(reach) + col_val\n if col_final == column and row_final == row:\n continue\n if col_final >= width or col_final < 0:\n continue\n if row_final >= height or row_final < 0:\n continue\n row_num = (row_final * width) + col_final\n final_int = grouped[row_num][0]\n neighbors.append(final_int)\n return neighbors",
"def create_grid(data, drone_altitude, safety_distance):\n\n # minimum and maximum north coordinates\n north_min = np.floor(np.min(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.max(data[:, 0] + data[:, 3]))\n #print(north_min, north_max)\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.min(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.max(data[:, 1] + data[:, 4]))\n #print(east_min, east_max)\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil((north_max - north_min)))\n east_size = int(np.ceil((east_max - east_min)))\n #print(north_size, east_size)\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n # Center offset for grid\n north_min_center = np.min(data[:, 0])\n east_min_center = np.min(data[:, 1])\n \n # Populate the grid with obstacles\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n\n if alt + d_alt + safety_distance > drone_altitude:\n obstacle = [\n int(north - d_north - safety_distance - north_min_center),\n int(north + d_north + safety_distance - north_min_center),\n int(east - d_east - safety_distance - east_min_center),\n int(east + d_east + safety_distance - east_min_center),\n ]\n grid[obstacle[0]:obstacle[1], obstacle[2]:obstacle[3]] = 1\n\n return grid",
"def village_doors_coordinates(x1, villages, y, z1, halfDoorsInVillage, emptySpaces, axis):\n\n k = []\n assert axis in ('X', 'Z')\n\n if axis == \"Z\":\n for x in xrange(x1, x1 + villages):\n j = [[x, y, z] for z in srange(z1, halfDoorsInVillage, emptySpaces)]\n k.append(j)\n elif axis == \"X\":\n for z in xrange(z1, z1 + villages):\n j = [[x, y, z] for x in srange(x1, halfDoorsInVillage, emptySpaces)]\n k.append(j)\n return k",
"def bounds(self, resids: NDArray) -> List[Tuple[float, float]]:",
"def get_nc_BGrid_GFDL(grdfile, name='GFDL_CM2.1_North_Pacific', area='regional', \\\n xrange=(60,175), yrange=(120, 190), ystart=235):\n\n nc = pyroms.io.Dataset(grdfile)\n\n lon_t = nc.variables['geolon_t'][:]\n lat_t = nc.variables['geolat_t'][:]\n lon_uv = nc.variables['geolon_c'][:]\n lat_uv = nc.variables['geolat_c'][:]\n\n h = nc.variables['ht'][:]\n\n f = nc.variables['coriolis_param'][:]\n\n kmt = nc.variables['kmt'][:]\n z_t = nc.variables['st_ocean'][:]\n z_t_edges = nc.variables['st_edges_ocean'][:]\n\n kmu = nc.variables['kmu'][:]\n z_uv = nc.variables['sw_ocean'][:]\n z_uv_edges = nc.variables['sw_edges_ocean'][:]\n\n # compute mask at t-point\n M_t, L_t = kmt.shape\n N_t = z_t.shape[0]\n mask_t = np.zeros((N_t, M_t, L_t))\n for j in range(M_t):\n for i in range(L_t):\n try:\n mask_t[0:int(kmt[j,i]), j,i] = 1\n except:\n mask_t[:, j,i] = 0\n\n # compute mask at uv-point\n M_uv, L_uv = kmu.shape\n N_uv = z_uv.shape[0]\n mask_uv = np.zeros((N_uv, M_uv, L_uv))\n for j in range(M_uv):\n for i in range(L_uv):\n try:\n mask_uv[0:int(kmu[j,i]), j,i] = 1\n except:\n mask_uv[:, j,i] = 0\n\n if area == 'npolar':\n #add two rows in the north and the south\n lon_t = lon_t[np.r_[0,0,:np.size(lon_t,0),-1,-1]]\n lon_t = lon_t[:,np.r_[0,:np.size(lon_t,1),-1]]\n lon_t[:,0] = lon_t[:,1] - (lon_t[:,2]-lon_t[:,1])\n lon_t[:,-1] = lon_t[:,-2] + (lon_t[:,-2]-lon_t[:,-3])\n lat_t = lat_t[np.r_[0,0,:np.size(lat_t,0),-1,-1]]\n lat_t = lat_t[:,np.r_[0,:np.size(lat_t,1),-1]]\n lat_t[0,:] = -85\n lat_t[1,:] = -80\n lat_t[-2,:] = 90\n lat_t[-1,:] = 91\n lon_uv = lon_uv[np.r_[0,0,:np.size(lon_uv,0),-1,-1]]\n lon_uv = lon_uv[:,np.r_[0,:np.size(lon_uv,1),-1]]\n lon_uv[:,0] = lon_uv[:,1] - (lon_uv[:,2]-lon_t[:,1])\n lon_uv[:,-1] = lon_uv[:,-2] + (lon_uv[:,-2]-lon_uv[:,-3])\n lat_uv = lat_uv[np.r_[0,0,:np.size(lat_uv,0),-1,-1]]\n lat_uv = lat_uv[:,np.r_[0,:np.size(lat_uv,1),-1]]\n lat_uv[0,:] = -85\n lat_uv[1,:] = -80\n lat_uv[-2,:] = 90\n lat_uv[-1,:] = 91\n mask_t = mask_t[:,np.r_[0,0,:np.size(mask_t,1),-1,-1],:]\n mask_t = mask_t[:,:,np.r_[0,:np.size(mask_t,2),-1]]\n mask_t[:,:,0] = mask_t[:,:,-2]\n mask_t[:,:,-1] = mask_t[:,:,1]\n mask_uv = mask_uv[:,np.r_[0,0,:np.size(mask_uv,1),-1,-1],:]\n mask_uv = mask_uv[:,:,np.r_[0,:np.size(mask_uv,2),-1]]\n mask_uv[:,:,0] = mask_uv[:,:,-2]\n mask_uv[:,:,-1] = mask_uv[:,:,1]\n h = h[np.r_[0,0,:np.size(h,0),-1,-1]]\n h = h[:,np.r_[0,:np.size(h,1),-1]]\n h[:,0] = h[:,-2]\n h[:,-1] = h[:,1]\n f = f[np.r_[0,0,:np.size(f,0),-1,-1]]\n f = f[:,np.r_[0,:np.size(f,1),-1]]\n f[:,0] = f[:,-2]\n f[:,-1] = f[:,1]\n m,l = h.shape\n xrange=(1,l-2)\n yrange=(ystart+2,m-2)\n\n if area == 'tripole':\n #add two rows in the north and the south\n fold1 = L_t//2\n lon_t = lon_t[np.r_[0,0,:np.size(lon_t,0),-1,-1]]\n lon_t[-2,:fold1] = lon_t[-3,L_t:fold1-1:-1]\n lon_t[-2,L_t:fold1-1:-1] = lon_t[-3,:fold1]\n lon_t[-1,:fold1] = lon_t[-4,L_t:fold1-1:-1]\n lon_t[-1,L_t:fold1-1:-1] = lon_t[-4,:fold1]\n\n lon_t = lon_t[:,np.r_[0,:np.size(lon_t,1),-1]]\n lon_t[:,0] = lon_t[:,1] - (lon_t[:,2]-lon_t[:,1])\n lon_t[:,-1] = lon_t[:,-2] + (lon_t[:,-2]-lon_t[:,-3])\n lat_t = lat_t[np.r_[0,0,:np.size(lat_t,0),-1,-1]]\n lat_t = lat_t[:,np.r_[0,:np.size(lat_t,1),-1]]\n lat_t[0,:] = -85\n lat_t[1,:] = -80\n lat_t[-2,:] = lat_t[-3,:]\n lat_t[-1,:] = lat_t[-4,:]\n lon_uv = lon_uv[np.r_[0,0,:np.size(lon_uv,0),-1,-1]]\n\n lon_uv[-2,:fold1] = lon_uv[-4,L_t:fold1-1:-1]\n lon_uv[-2,L_t:fold1-1:-1] = lon_uv[-4,:fold1]\n lon_uv[-1,:fold1] = lon_uv[-5,L_t:fold1-1:-1]\n lon_uv[-1,L_t:fold1-1:-1] = lon_uv[-5,:fold1]\n\n lon_uv = lon_uv[:,np.r_[0,:np.size(lon_uv,1),-1]]\n lon_uv[:,0] = lon_uv[:,1] - (lon_uv[:,2]-lon_t[:,1])\n lon_uv[:,-1] = lon_uv[:,-2] + (lon_uv[:,-2]-lon_uv[:,-3])\n lat_uv = lat_uv[np.r_[0,0,:np.size(lat_uv,0),-1,-1]]\n lat_uv = lat_uv[:,np.r_[0,:np.size(lat_uv,1),-1]]\n lat_uv[0,:] = -85\n lat_uv[1,:] = -80\n lat_uv[-2,:] = lat_uv[-3,:]\n lat_uv[-1,:] = lat_uv[-4,:]\n mask_t = mask_t[:,np.r_[0,0,:np.size(mask_t,1),-1,-1],:]\n mask_t = mask_t[:,:,np.r_[0,:np.size(mask_t,2),-1]]\n mask_t[:,:,0] = mask_t[:,:,-2]\n mask_t[:,:,-1] = mask_t[:,:,1]\n mask_uv = mask_uv[:,np.r_[0,0,:np.size(mask_uv,1),-1,-1],:]\n mask_uv = mask_uv[:,:,np.r_[0,:np.size(mask_uv,2),-1]]\n mask_uv[:,:,0] = mask_uv[:,:,-2]\n mask_uv[:,:,-1] = mask_uv[:,:,1]\n h = h[np.r_[0,0,:np.size(h,0),-1,-1]]\n h = h[:,np.r_[0,:np.size(h,1),-1]]\n h[:,0] = h[:,-2]\n h[:,-1] = h[:,1]\n f = f[np.r_[0,0,:np.size(f,0),-1,-1]]\n f = f[:,np.r_[0,:np.size(f,1),-1]]\n f[:,0] = f[:,-2]\n f[:,-1] = f[:,1]\n m,l = h.shape\n xrange=(1,l-2)\n yrange=(ystart+2,m-2)\n\n return BGrid_GFDL(lon_t, lat_t, lon_uv, lat_uv, \\\n mask_t, mask_uv, h, z_t, z_t_edges, \\\n z_uv, z_uv_edges, f, \\\n name, xrange=xrange, yrange=yrange)",
"def find_loners(radec, radec_all, radius):\n \n loners = np.ones(len(radec))\n for i,(ra,dec) in enumerate(radec):\n dra = abs(radec_all[:,0] - ra)\n ddec = abs(radec_all[:,1] - dec)\n keep = np.logical_and(dra < radius, ddec < radius)\n r = np.sqrt((dra[keep]**2 + ddec[keep]**2))\n r = r[r != 0]\n if any(r < radius):\n loners[i] = False\n \n return loners",
"def clumping_selection(points, nb_clumps, radius_mu, radius_sigma):\n n = len(points)\n centers = sample(list(range(n)), nb_clumps)\n\n for index in centers:\n radius = normalvariate(radius_mu, radius_sigma)\n r2 = radius**2\n # filter to select all the poit in a ball of radius r2\n # Add these points to the result",
"def locate_droplets(\n phase_field: ScalarField,\n threshold: Union[float, str] = 0.5,\n modes: int = 0,\n minimal_radius: float = 0,\n refine: bool = False,\n interface_width: Optional[float] = None,\n) -> Emulsion:\n assert isinstance(phase_field, ScalarField)\n dim = phase_field.grid.dim # dimensionality of the space\n\n if modes > 0 and dim not in [2, 3]:\n raise ValueError(\"Perturbed droplets only supported for 2d and 3d\")\n\n # determine actual threshold\n if threshold == \"auto\":\n threshold = float(phase_field.data.min() + phase_field.data.max()) / 2\n else:\n threshold = float(threshold)\n\n # locate droplets in thresholded image\n img_binary = phase_field.data > threshold\n candidates = locate_droplets_in_mask(phase_field.grid, img_binary)\n\n if minimal_radius > -np.inf:\n candidates.remove_small(minimal_radius)\n\n droplets = []\n for droplet in candidates:\n # check whether we need to add the interface width\n droplet_class = droplet.__class__\n args: Dict[str, NumberOrArray] = {}\n\n # change droplet class when interface width is given\n if interface_width is not None:\n droplet_class = DiffuseDroplet\n args[\"interface_width\"] = interface_width\n\n # change droplet class when perturbed droplets are requested\n if modes > 0:\n if dim == 2:\n droplet_class = PerturbedDroplet2D\n elif dim == 3:\n droplet_class = PerturbedDroplet3D\n else:\n raise NotImplementedError(f\"Dimension {dim} is not supported\")\n args[\"amplitudes\"] = np.zeros(modes)\n\n # recreate a droplet of the correct class\n if droplet_class != droplet.__class__:\n droplet = droplet_class.from_droplet(droplet, **args)\n\n # refine droplets if necessary\n if refine:\n try:\n droplet = refine_droplet(phase_field, droplet)\n except ValueError:\n continue # do not add the droplet to the list\n droplets.append(droplet)\n\n # return droplets as an emulsion\n emulsion = Emulsion(droplets, grid=phase_field.grid)\n if minimal_radius > -np.inf:\n emulsion.remove_small(minimal_radius)\n return emulsion",
"def layer_coords(label_lst): #full path\n \n #if a fundus then do this block\n gyrus_check = all(i.__contains__(\"fundus\") for i in label_lst)\n if gyrus_check:\n for layer in label_lst:\n #read data\n df_layer = pd.read_csv(layer)\n df_layer = df_layer.iloc[1:,0]\n df_layer = pd.DataFrame( [list(map(float, i)) for i in [list(i.split()) for i in \\\n df_layer.values]], columns=['idk1', 'X', 'Y', 'Z', 'idk2'])[['X', 'Y', 'Z']]\n\n #compute slope\n yvals = [(y2 - y1) for y1, y2 in zip(df_layer['Y'], df_layer['Y'][1:])]\n xvals = [(x2 - x1) for x1, x2 in zip(df_layer['X'], df_layer['X'][1:])]\n layer_slope = [round(i,2) for i in np.divide(yvals, xvals)]\n\n #split lam label into three\n split = math.floor(len(df_layer['X'].values)/3)\n df_layer_right = df_layer[0:split]\n df_layer_left = df_layer[-split:]\n df_layer_middle = df_layer[split:-split]\n\n plt.plot(df_layer['X'], df_layer['Y'], lw=3) #color='#000000'\n # plt.plot(df_layer['X'], df_layer['Y'], linewidth=1, marker='o', markersize=5)\n plt.axis('off')\n plt.savefig('layer_contour.png')\n # plt.show()\n plt.close()\n\n #read, convert to grayscale, find edges\n layer_img = cv2.imread('layer_contour.png')\n layer_img_grey = cv2.cvtColor(layer_img, cv2.COLOR_BGR2GRAY)\n layer_edges = cv2.Canny(layer_img_grey, 30, 200)\n\n #find contours\n contours, hierachy = cv2.findContours(layer_edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n # cv2.imshow('contour', layer_edges)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n \n #order contours\n contours = [np.squeeze(i) for i in contours]\n df_contours = pd.DataFrame(contours)\n contours_ord = df_contours.loc[6].values, df_contours.loc[4].values, \\\n df_contours.loc[3].values, df_contours.loc[2].values, \\\n df_contours.loc[1].values, df_contours.loc[0].values, \\\n df_contours.loc[5].values\n contours_ord = np.squeeze(contours_ord)\n \n\n #plot all layers and add coordinate data to dict \n lay_coords_dict = {}\n for laycon, i in zip(contours_ord, list(range(len(contours)))): \n #split coordinates into top and bottom edge\n if i == 0: #0 == pial\n c_idx = int(np.floor(len(laycon)/2))\n coords_top = np.array(list(reversed(laycon[:c_idx])))\n lay_coords_dict[i] = coords_top[10:]\n # print(coords_top)\n\n else:\n c_idx = int(np.floor(len(laycon)/2))\n coords_top = np.array(list(reversed(laycon[c_idx:])))\n lay_coords_dict[i] = coords_top[5:-7]\n\n \n #plot coords\n # for key, val in lay_coords_dict.items():\n # plt.plot([i[0] for i in val], [i[1] for i in val], lw=1.75)\n # plt.gca().invert_yaxis()\n # plt.show()\n # plt.close()\n # sys.exit()\n\n #delete edge detect image and return dict\n rm_img_cmd = \"rm layer_contour.png\"\n os.system(rm_img_cmd)\n return(lay_coords_dict)\n \n\n #for crown data do this block\n else:\n for layer in label_lst:\n #read data\n df_layer = pd.read_csv(layer)\n df_layer = df_layer.iloc[1:,0]\n df_layer = pd.DataFrame( [list(map(float, i)) for i in [list(i.split()) for i in \\\n df_layer.values]], columns=['idk1', 'X', 'Y', 'Z', 'idk2'])[['X', 'Y', 'Z']]\n\n #compute slope\n yvals = [(y2 - y1) for y1, y2 in zip(df_layer['Y'], df_layer['Y'][1:])]\n xvals = [(x2 - x1) for x1, x2 in zip(df_layer['X'], df_layer['X'][1:])]\n layer_slope = [round(i,2) for i in np.divide(yvals, xvals)]\n\n #split lam label into three\n split = math.floor(len(df_layer['X'].values)/3)\n df_layer_right = df_layer[0:split]\n df_layer_left = df_layer[-split:]\n df_layer_middle = df_layer[split:-split]\n\n plt.plot(df_layer['X'], df_layer['Y'], lw=3) #color='#000000', lw=5\n # plt.plot(df_layer['X'], df_layer['Y'], linewidth=1, marker='o', markersize=5)\n plt.axis('off')\n plt.savefig('layer_contour.png')\n # plt.show()\n plt.close()\n\n #read, convert to grayscale, find edges\n layer_img = cv2.imread('layer_contour.png')\n layer_img_grey = cv2.cvtColor(layer_img, cv2.COLOR_BGR2GRAY)\n layer_edges = cv2.Canny(layer_img_grey, 30, 200)\n\n #find contours\n contours, hierachy = cv2.findContours(layer_edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n # cv2.imshow('contour', layer_edges)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n \n\n #plot all layers and add coordinate data to dict\n lay_coords_dict = {}\n for laycon, i in zip(contours, list(range( len(contours) ) )[::-1] ):#7\n #split coordinates into top and bottom edge\n # print(laycon)\n coords_lst = [list(ii) for i in laycon for ii in i] # 0 == GWB\n # print(coords_lst)\n\n c_split = math.floor(len(coords_lst)/4)\n coords_top = coords_lst[:c_split][::-1] + coords_lst[-c_split:][::-1]\n lay_coords_dict[i] = coords_top\n df_coords = pd.DataFrame(coords_top, columns=['X', 'Y'])\n # print(df_coords)\n\n #plot using all coordinates\n plt.plot(df_coords['X'].values, df_coords['Y'].values, lw=3)\n plt.gca().invert_yaxis()\n # plt.show()\n plt.close()\n\n\n # use k means to get rid of extra coords on short lines\n for i in list(range(1,6)):\n # kMEANS clustering, separate short line bottom half\n df_short = pd.DataFrame(lay_coords_dict[i], columns=['X', 'Y']) #1=L1,\n # plt.scatter( df_short['X'].values, df_short['Y'].values, s=5 )\n # plt.gca().invert_yaxis()\n # plt.show()\n\n #scale data\n scaler = StandardScaler()\n scaler.fit( df_short[['X', 'Y']].values )\n short_scale = scaler.transform( df_short[['X', 'Y']].values )\n\n init = np.array([[0.514, -0.629], [-1.101, 1.344]])\n\n #predict\n # kmeans_classifier = KMeans(n_clusters=2, init=init) #fixed centroids\n kmeans_classifier = KMeans(n_clusters=2) \n\n y_kmeans = kmeans_classifier.fit_predict(short_scale)\n centroids = kmeans_classifier.cluster_centers_\n inertia = kmeans_classifier.inertia_\n\n\n #update df\n df_short.insert(2, column='kClass', value=y_kmeans)\n\n #df scaled\n df_short_scale = pd.DataFrame(short_scale, columns=['X', 'Y'])\n df_short_scale.insert(2, column='kClass', value=y_kmeans)\n \n\n \"\"\"\n #plot data points for k means, clusters\n colmap = {0: '#029386', 1: '#D2691E', 2: '#A52A2A'}\n for i in range(2):\n new_df = df_short_scale[df_short_scale['kClass']==i]\n plt.scatter(new_df['X'].values, new_df['Y'].values, s=20, \\\n label='cluster' + str(i+1), color=colmap[i])\n\n #plot centroids\n for i in range (2):\n plt.scatter(centroids[i][0], centroids[i][1], marker='x', s=500, \\\n label='centroid' + str(i+1), color=colmap[i])\n \n plt.legend()\n plt.gca().invert_yaxis()\n plt.show()\n \"\"\"\n\n\n #new df for clean data, take centroid with more data points\n num_class0 = len(df_short[df_short['kClass']==0])\n num_class1 = len(df_short[df_short['kClass']==1])\n\n if num_class0 > num_class1:\n \n df_short_clean = df_short[df_short['kClass']==0]\n lay_coords_dict[i] = [[i,j] for i,j in zip(df_short_clean['X'].values,\\\n df_short_clean['Y'].values)]\n else:\n df_short_clean = df_short[df_short['kClass']==1]\n lay_coords_dict[i] = [[i,j] for i,j in zip(df_short_clean['X'].values,\\\n df_short_clean['Y'].values)]\n\n #plot clean short line\n # plt.scatter(df_short_clean['X'].values, df_short_clean['Y'].values, s=20)\n # plt.gca().invert_yaxis()\n # plt.show()\n\n #delete edge detect image and return dict\n rm_img_cmd = \"rm layer_contour.png\"\n os.system(rm_img_cmd)\n return(lay_coords_dict)",
"def create_grid(data, drone_altitude, safety_distance):\n\n # minimum and maximum north coordinates\n north_min = np.floor(np.amin(data[:, 0] - data[:, 3]))\n north_max = np.ceil(np.amax(data[:, 0] + data[:, 3]))\n print(0, north_max - north_min)\n\n # minimum and maximum east coordinates\n east_min = np.floor(np.amin(data[:, 1] - data[:, 4]))\n east_max = np.ceil(np.amax(data[:, 1] + data[:, 4]))\n print(0, east_max - east_min)\n\n # given the minimum and maximum coordinates we can\n # calculate the size of the grid.\n north_size = int(np.ceil(north_max - north_min))\n east_size = int(np.ceil(east_max - east_min))\n\n # Initialize an empty grid\n grid = np.zeros((north_size, east_size))\n\n # Populate the grid with obstacles\n print(data.shape[0])\n for i in range(data.shape[0]):\n north, east, alt, d_north, d_east, d_alt = data[i, :]\n # Determine which cells contain obstacles\n nc = int(north - north_min)\n ec = int(east - east_min)\n dn = int(d_north)\n de = int(d_east)\n sd = int(safety_distance)\n x0 = int(ec - (de + sd))\n y0 = int(nc - (dn + sd))\n xm = int(ec + (de + sd))\n ym = int(nc + (dn + sd))\n nm = north_max - north_min\n em = east_max - east_min\n for e in range(x0, xm):\n for n in range(y0, ym):\n # skip out of range conditions\n if e < 0:\n continue\n if e >= em:\n continue\n if n < 0:\n continue\n if n >= nm:\n continue\n if (alt + d_alt + safety_distance) <= drone_altitude:\n continue\n # plot it\n grid[n][e] = 1\n\n return grid",
"def _get_grid_cell_indexes(proj, xs, ys, bounding_box):\n # Unpack values from the projection\n eq_rad = proj.semi_major_axis\n polar_rad = proj.semi_minor_axis\n h = proj.perspective_point_height + eq_rad\n lon0 = proj.longitude_of_projection_origin\n \n # Unpack values from the area we want to grab the data\n min_lat, min_lon = bounding_box.sw_corner()\n max_lat, max_lon = bounding_box.ne_corner()\n \n with np.errstate(invalid='ignore'):\n # Calculate the lat and lon grids\n xs, ys = np.meshgrid(xs, ys)\n a_vals = np.power(np.sin(xs), 2.0) + \\\n np.power(np.cos(xs), 2.0) * (np.power(np.cos(ys), 2.0) + \\\n eq_rad * eq_rad / polar_rad / polar_rad * np.power(np.sin(ys), 2.0))\n b_vals = -2 * h * np.cos(xs) * np.cos(ys)\n c_val = h * h - eq_rad * eq_rad\n \n rs = (-b_vals - np.sqrt(np.power(b_vals, 2.0) - 4 * a_vals * c_val)) / (2 * a_vals)\n \n sx = rs * np.cos(xs) * np.cos(ys)\n sy = -rs * np.sin(xs)\n sz = rs * np.cos(xs) * np.sin(ys)\n \n lats = np.arctan((eq_rad *eq_rad * sz) \\\n / (polar_rad * polar_rad * np.sqrt(np.power(h - sx, 2.0) + np.power(sy, 2.0))))\n lats = np.degrees(lats)\n \n lons = np.radians(lon0) - np.arctan(sy / (h - sx))\n lons = np.degrees(lons)\n \n # Flatten the arrays so we get a 1D list of indexes\n lats = lats.flatten()\n lons = lons.flatten()\n \n # Filter out values not in our bounding box\n lats = np.where(np.logical_and(lats >= min_lat, lats <= max_lat))[0]\n lons = np.where(np.logical_and(lons >= min_lon, lons <= max_lon))[0]\n idxs = list(set(lons).intersection(set(lats)))\n \n return idxs",
"def bin_data(data, lat, lon, binsize=1, uv_data=False, pressure=None):\n\n # Create lats and lons based on binsize\n lonlen = 360\n latlen = 180\n\n lon_lowerlim = 0\n lon_upperlim = 360\n\n lat_lowerlim = -90\n lat_upperlim = 90\n\n if latlen % binsize == 0 and lonlen % binsize == 0:\n latbin = int(latlen/binsize)\n lonbin = int(lonlen/binsize)\n n_deg = binsize/2\n\n ll_lats = np.linspace(lat_lowerlim+(n_deg),\n lat_upperlim-(n_deg),\n latbin)\n\n ll_lons = np.linspace(lon_lowerlim+(n_deg),\n lon_upperlim-(n_deg),\n lonbin)\n\n else:\n print('ERROR: Binsize does not work for grid shape (180,360). Please use different binsize.')\n return\n\n paramlist = list(itertools.product(ll_lats, ll_lons))\n\n # Bin Data\n if uv_data == True:\n binned_u_data = np.full((latbin, lonbin), np.nan, dtype=object)\n binned_v_data = np.full((latbin, lonbin), np.nan, dtype=object)\n\n if pressure is not None:\n binned_pressure = np.full((latbin, lonbin), np.nan, dtype=object)\n\n for val in paramlist:\n # Get index of 1x1 grid lat and lon\n latidx = np.where(ll_lats == val[0])\n lonidx = np.where(ll_lons == val[1])\n # values of the 1x1 grid lat and lon\n binnedlons = val[1]\n binnedlats = val[0]\n\n # find instances where data is within 1x1 grid point of orginal data\n data_idx = np.where((lon >= binnedlons - n_deg) & (lon <= binnedlons + n_deg) &\n (lat >= binnedlats - n_deg) & (lat <= binnedlats + n_deg))\n\n latlon_idx = [latidx[0][0], lonidx[0][0]]\n\n # calculate stats if there is data at this grid point, else append np.nan\n if len(data_idx[0]) > 0:\n u = data['u'][data_idx]\n v = data['v'][data_idx]\n\n binned_u_data[latlon_idx[0], latlon_idx[1]] = u\n binned_v_data[latlon_idx[0], latlon_idx[1]] = v\n\n if pressure is not None:\n p = pressure[data_idx]\n binned_pressure[latlon_idx[0], latlon_idx[1]] = p\n\n if pressure is not None:\n return binned_u_data, binned_v_data, binned_pressure\n\n else:\n return binned_u_data, binned_v_data\n\n else:\n binned_data = np.full((latbin, lonbin), np.nan, dtype=object)\n if pressure is not None:\n binned_pressure = np.full((latbin, lonbin), np.nan, dtype=object)\n\n for val in paramlist:\n # Get index of grid lat and lon\n latidx = np.where(ll_lats == val[0])\n lonidx = np.where(ll_lons == val[1])\n # values of the 1x1 grid lat and lon\n binnedlons = val[1]\n binnedlats = val[0]\n\n # find instances where data is within 1x1 grid point of orginal data\n data_idx = np.where((lon >= binnedlons - n_deg) & (lon <= binnedlons + n_deg) &\n (lat >= binnedlats - n_deg) & (lat <= binnedlats + n_deg))\n\n latlon_idx = [latidx[0][0], lonidx[0][0]]\n\n # calculate stats if there is data at this grid point\n if len(data_idx[0]) > 0:\n d = data[data_idx]\n binned_data[latlon_idx[0], latlon_idx[1]] = d\n\n if pressure is not None:\n p = pressure[data_idx]\n binned_pressure[latlon_idx[0], latlon_idx[1]] = p\n\n if pressure is not None:\n return binned_data, binned_pressure\n\n else:\n return binned_data",
"def get_inside_point_ids(gui, ugrid, ugrid_flipped, model_name,\n representation='points'):\n nids = None\n points = ugrid.GetPointData()\n if points is None:\n return ugrid, nids\n\n ids = points.GetArray('Ids')\n if ids is None:\n return ugrid, nids\n\n # all points associated with the correctly selected cells are returned\n # but we get extra points for the cells that are inside and out\n point_ids = vtk_to_numpy(ids)\n nids = gui.get_node_ids(model_name, point_ids)\n\n # these are the points outside the box/frustum (and also include the bad point)\n points_flipped = ugrid_flipped.GetPointData()\n ids_flipped = points_flipped.GetArray('Ids')\n point_ids_flipped = vtk_to_numpy(ids_flipped)\n nids_flipped = gui.get_node_ids(model_name, point_ids_flipped)\n #nids = gui.gui.get_reverse_node_ids(model_name, point_ids_flipped)\n\n # setA - setB\n nids2 = np.setdiff1d(nids, nids_flipped, assume_unique=True)\n\n #narrays = points.GetNumberOfArrays()\n #for iarray in range(narrays):\n #name = points.GetArrayName(iarray)\n #print('iarray=%s name=%r' % (iarray, name))\n\n #------------------\n if representation == 'points':\n # we need to filter the nodes that were filtered by the\n # numpy setdiff1d, so we don't show extra points\n ugrid = create_filtered_point_ugrid(ugrid, nids, nids2)\n\n nids = nids2\n return ugrid, nids"
] | [
"0.69664097",
"0.6491768",
"0.5973075",
"0.58128065",
"0.5637239",
"0.5464887",
"0.5392235",
"0.53456414",
"0.52834636",
"0.5262984",
"0.5244792",
"0.5209711",
"0.51998144",
"0.5157793",
"0.51553285",
"0.5138001",
"0.5137105",
"0.50955933",
"0.50665283",
"0.50569886",
"0.5023663",
"0.5023194",
"0.49990186",
"0.49954703",
"0.49937353",
"0.49887308",
"0.49827206",
"0.49825683",
"0.49541405",
"0.4944831"
] | 0.6496282 | 1 |
locate droplets in a data set on a (periodic) cylindrical grid This function locates droplets respecting periodic boundary conditions. | def _locate_droplets_in_mask_cylindrical(
grid: CylindricalSymGrid, mask: np.ndarray
) -> Emulsion:
assert np.all(mask.shape == grid.shape)
if grid.periodic[1]:
# locate droplets respecting periodic boundary conditions in z-direction
# pad the array to simulate periodic boundary conditions
dim_r, dim_z = grid.shape
mask_padded = np.pad(mask, [[0, 0], [dim_z, dim_z]], mode="wrap")
assert mask_padded.shape == (dim_r, 3 * dim_z)
# locate droplets in the extended image
candidates = _locate_droplets_in_mask_cylindrical_single(grid, mask_padded)
grid._logger.info(f"Found {len(candidates)} droplet candidates.")
# keep droplets that are inside the central area
droplets = Emulsion(grid=grid)
for droplet in candidates:
# correct for the additional padding of the array
droplet.position[2] -= grid.length
# check whether the droplet lies in the original box
if grid.contains_point(droplet.position):
droplets.append(droplet)
grid._logger.info(f"Kept {len(droplets)} central droplets.")
# filter overlapping droplets (e.g. due to duplicates)
droplets.remove_overlapping()
else:
# simply locate droplets in the mask
droplets = _locate_droplets_in_mask_cylindrical_single(grid, mask)
return droplets | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _locate_droplets_in_mask_cartesian(\n grid: CartesianGridBase, mask: np.ndarray\n) -> Emulsion:\n if mask.shape != grid.shape:\n raise ValueError(\n f\"The shape {mask.shape} of the data is not compatible with the grid \"\n f\"shape {grid.shape}\"\n )\n\n # pad the array to simulate periodic boundary conditions\n offset = np.array([dim if p else 0 for p, dim in zip(grid.periodic, grid.shape)])\n pad = np.c_[offset, offset].astype(np.intc)\n mask_padded = np.pad(mask, pad, mode=\"wrap\")\n assert np.all(mask_padded.shape == np.array(grid.shape) + 2 * offset)\n\n # locate individual clusters in the padded image\n labels, num_labels = ndimage.label(mask_padded)\n if num_labels == 0:\n return Emulsion([], grid=grid)\n indices = range(1, num_labels + 1)\n\n # create and emulsion from this of droplets\n grid._logger.info(f\"Found {num_labels} droplet candidate(s)\")\n\n # determine position from binary image and scale it to real space\n positions = ndimage.measurements.center_of_mass(mask_padded, labels, index=indices)\n # correct for the additional padding of the array\n positions = grid.cell_to_point(positions - offset)\n\n # determine volume from binary image and scale it to real space\n volumes = ndimage.measurements.sum(mask_padded, labels, index=indices)\n volumes = np.asanyarray(volumes) * np.prod(grid.discretization)\n\n # only retain droplets that are inside the central area\n droplets = (\n SphericalDroplet.from_volume(position, volume)\n for position, volume in zip(positions, volumes)\n if grid.cuboid.contains_point(position)\n )\n\n # filter overlapping droplets (e.g. due to duplicates)\n emulsion = Emulsion(droplets, grid=grid)\n num_candidates = len(emulsion)\n if num_candidates < num_labels:\n grid._logger.info(f\"Only {num_candidates} candidate(s) inside bounds\")\n\n emulsion.remove_overlapping()\n if len(emulsion) < num_candidates:\n grid._logger.info(f\"Only {num_candidates} candidate(s) not overlapping\")\n\n return emulsion",
"def locate_droplets_in_mask(grid: GridBase, mask: np.ndarray) -> Emulsion:\n if isinstance(grid, CartesianGridBase):\n return _locate_droplets_in_mask_cartesian(grid, mask)\n elif isinstance(grid, SphericalSymGridBase):\n return _locate_droplets_in_mask_spherical(grid, mask)\n elif isinstance(grid, CylindricalSymGrid):\n return _locate_droplets_in_mask_cylindrical(grid, mask)\n elif isinstance(grid, GridBase):\n raise NotImplementedError(f\"Locating droplets is not possible for grid {grid}\")\n else:\n raise ValueError(f\"Invalid grid {grid}\")",
"def _locate_droplets_in_mask_cylindrical_single(\n grid: CylindricalSymGrid, mask: np.ndarray\n) -> Emulsion:\n # locate the individual clusters\n labels, num_features = ndimage.label(mask)\n if num_features == 0:\n return Emulsion([], grid=grid)\n\n # locate clusters on the symmetry axis\n object_slices = ndimage.measurements.find_objects(labels)\n indices = []\n for index, slices in enumerate(object_slices, 1):\n if slices[0].start == 0: # contains point on symmetry axis\n indices.append(index)\n else:\n logger = logging.getLogger(grid.__class__.__module__)\n logger.warning(\"Found object not located on symmetry axis\")\n\n # determine position from binary image and scale it to real space\n pos = ndimage.measurements.center_of_mass(mask, labels, index=indices)\n pos = grid.cell_to_point(pos)\n\n # determine volume from binary image and scale it to real space\n vol_r, dz = grid.cell_volume_data\n cell_volumes = vol_r * dz\n vol = ndimage.measurements.sum(cell_volumes, labels, index=indices)\n\n # return an emulsion of droplets\n droplets = (\n SphericalDroplet.from_volume(np.array([0, 0, p[2]]), v)\n for p, v in zip(pos, vol)\n )\n return Emulsion(droplets, grid=grid)",
"def cut_bonds_z_random(xy, NL, KL, BL, target_z, min_coord=2, bulk_determination='Triangulation', check=False):\n print ' Cutting bonds z...'\n NP = len(xy)\n NN = np.shape(NL)[1]\n\n # Identify boundary pts, bulk pts\n print ' cut_bonds_z : extract boundary...'\n boundary = extract_boundary(xy, NL, KL, BL)\n # print 'boundary = ', boundary\n bulk = np.setdiff1d(np.arange(NP), boundary)\n NP_bulk = len(bulk)\n NP_bound = len(np.unique(boundary))\n print 'NP_bound = ', NP_bound\n print 'NP_bulk = ', NP_bulk\n\n if bulk_determination == 'Triangulation':\n # Form indices of BL in bulk. Bulk bonds appear in two simplices.\n # CHANGE THIS TO TEST IF BOND TWO SIMPLICES\n TRI = BL2TRI(BL, xy)\n Binds_list = []\n for ii in range(len(BL)):\n row = BL[ii]\n # get rows of TRI where each elem of row lives\n is_a = np.where(TRI == row[0])[0]\n is_b = np.where(TRI == row[1])[0]\n # The intersection of those rows gives where both live\n simplices = np.intersect1d(is_a, is_b)\n # print 'simplices = ', simplices\n # print 'np.size(simplices) = ', np.size(simplices)\n # If more than one simplex, bulk bond\n if np.size(simplices) < 2:\n # add to boundary list\n Binds_list.append(ii)\n # print ' --> Binds = ', Binds_list\n\n Binds = np.array(Binds_list).ravel()\n # Get the BL indices of bulk bonds --> (binds)\n binds = np.setdiff1d(np.arange(len(BL)), Binds)\n\n elif bulk_determination == 'Endpts':\n # Define bulk bonds as connecting at least one bulk particle\n is_a = np.in1d(BL[:, 0], bulk)\n is_b = np.in1d(BL[:, 1], bulk)\n binds = np.where(np.logical_or(is_a, is_b))[0]\n Binds = np.setdiff1d(np.arange(len(BL)), binds)\n else:\n raise RuntimeError('ERROR: argument <bulk_determination> did not match known method!')\n\n # print 'binds = ', binds\n # print 'Binds = ', Binds\n print 'len(binds) = ', len(binds)\n print 'len(Binds) = ', len(Binds)\n\n # Check\n if check:\n # plt.triplot(xy[:,0], xy[:,1], TRI, 'bo-')\n for bii in binds:\n XX = xy[BL[bii], 0]\n YY = xy[BL[bii], 1]\n plt.plot(XX, YY, 'b-')\n for Bii in Binds:\n XX = xy[BL[Bii], 0]\n YY = xy[BL[Bii], 1]\n plt.plot(XX, YY, 'r-')\n # for i in range(len(xy)):\n # plt.text(xy[i,0]+0.2,xy[i,1],str(i))\n plt.gca().set_aspect('equal')\n plt.show()\n\n # Compute the starting z in the bulk\n countKL = [KL[jj] for jj in bulk]\n # print 'found = ', np.count_nonzero(countKL), ' connections for ', NP_bulk, ' bulk particles...'\n z_start = float(np.count_nonzero(countKL)) / float(NP_bulk)\n print 'z_start = ', z_start\n print 'target_z = ', target_z\n\n # number of bonds to cut in the bulk\n # Be sure to divide the number of bonds by 2, since each bond double counts\n nbulk2cut = int(max([0, round((z_start - target_z) * 0.5 * float(NP_bulk))]))\n print 'nbulk2cut = ', nbulk2cut\n # number of bonds to cut in the boundary = nbulk2cut * (# boundary bonds)/(#bulk bonds)\n nB2cut = int(round(nbulk2cut * float(len(Binds)) / float(len(binds))))\n print 'nB2cut = ', nB2cut\n\n # CUT RANDOM BONDS\n\n ############################################\n ## DO BOUNDARY FIRST --> to avoid dangling particles\n # Choose nB2cut randomly from bulk\n # Shuffle bulk in-place\n np.random.shuffle(Binds)\n # Now work slowly towards selecting nbulk2cut: of the bonds,\n # but ensure that never leave a particle dangling without bonds\n done_cutting = False\n dmyi = 0\n # Set up mask for BL\n mask = np.ones(len(BL), dtype=bool)\n\n #################################\n # # Check :\n # plt.figure()\n # plt.gca().set_aspect('equal')\n # for ii in range(len(BL)):\n # XX = xy[BL[ii],0]\n # YY = xy[BL[ii],1]\n # plt.plot(XX, YY, 'b-')\n # plt.text(np.mean(XX), np.mean(YY), str(ii))\n # plt.show()\n #################################\n\n while not done_cutting:\n if len(np.where(mask == False)[0]) == nB2cut:\n done_cutting = True\n else:\n if np.mod(dmyi, 200) == 1:\n print 'cutting boundary bond: pass ', dmyi, ' (need to cut', nB2cut, ')'\n # consider adding dmyi element of bind to cut (make a test list)\n test = copy.deepcopy(mask)\n test[Binds[dmyi]] = False\n BLtmp = BL[test]\n # Check that BL leads to no dangling particles\n KLtmp = BL2KL(BLtmp, NL)\n # if all the rows in KLtmp have at least one nonzero bond, add dmyi to cut\n # print 'KLtmp.any(axis=1) = ', KLtmp.any(axis=1)\n if (np.where(~KLtmp.any(axis=1))[0]).size > 0:\n dmyi += 1\n else:\n mask[Binds[dmyi]] = False\n dmyi += 1\n\n ############################################\n # Choose nbulk2cut randomly from bulk\n # Shuffle bulk in-place\n np.random.shuffle(binds)\n # print 'binds = ', binds\n # Now work slowly towards selecting nbulk2cut: of the bonds,\n # but ensure that never leave a particle dangling without bonds\n done_cutting = False\n dmyi = 0\n while not done_cutting:\n if len(np.where(mask == False)[0]) == nB2cut + nbulk2cut:\n done_cutting = True\n else:\n if np.mod(dmyi, 200) == 1:\n print 'cutting bulk bond: pass ', dmyi, ' (need to cut', nbulk2cut, ')'\n # consider adding dmyi element of bind to cut (make a test list)\n test = copy.deepcopy(mask)\n test[binds[dmyi]] = False\n BLtmp = BL[test]\n # Check that BL leads to no dangling particles\n KLtmp = BL2KL(BLtmp, NL)\n # print 'KL = ', KLtmp\n # print 'np.where(~KLtmp.any(axis=1))[0] = ', np.where(~KLtmp.any(axis=1))[0]\n # if all the rows in KLtmp have at least one nonzero bond, add dmyi to cut\n if (np.where(~KLtmp.any(axis=1))[0]).size > min_coord - 1:\n dmyi += 1\n else:\n mask[binds[dmyi]] = False\n dmyi += 1\n\n # drop the nbulk2cut + nB2cut rows from total Bond List\n BL = BL[mask]\n # print 'BLout = ', BLout\n NL, KL = BL2NLandKL(BL, NN=NN)\n if check:\n display_lattice_2D(xy, BL)\n\n print '\\nReturning lattice with ', len(BL), ' bonds for ', NP, ' particles...'\n print 'KL[bulk] = ', KL[bulk]\n\n return NL, KL, BL",
"def locate_droplets(\n phase_field: ScalarField,\n threshold: Union[float, str] = 0.5,\n modes: int = 0,\n minimal_radius: float = 0,\n refine: bool = False,\n interface_width: Optional[float] = None,\n) -> Emulsion:\n assert isinstance(phase_field, ScalarField)\n dim = phase_field.grid.dim # dimensionality of the space\n\n if modes > 0 and dim not in [2, 3]:\n raise ValueError(\"Perturbed droplets only supported for 2d and 3d\")\n\n # determine actual threshold\n if threshold == \"auto\":\n threshold = float(phase_field.data.min() + phase_field.data.max()) / 2\n else:\n threshold = float(threshold)\n\n # locate droplets in thresholded image\n img_binary = phase_field.data > threshold\n candidates = locate_droplets_in_mask(phase_field.grid, img_binary)\n\n if minimal_radius > -np.inf:\n candidates.remove_small(minimal_radius)\n\n droplets = []\n for droplet in candidates:\n # check whether we need to add the interface width\n droplet_class = droplet.__class__\n args: Dict[str, NumberOrArray] = {}\n\n # change droplet class when interface width is given\n if interface_width is not None:\n droplet_class = DiffuseDroplet\n args[\"interface_width\"] = interface_width\n\n # change droplet class when perturbed droplets are requested\n if modes > 0:\n if dim == 2:\n droplet_class = PerturbedDroplet2D\n elif dim == 3:\n droplet_class = PerturbedDroplet3D\n else:\n raise NotImplementedError(f\"Dimension {dim} is not supported\")\n args[\"amplitudes\"] = np.zeros(modes)\n\n # recreate a droplet of the correct class\n if droplet_class != droplet.__class__:\n droplet = droplet_class.from_droplet(droplet, **args)\n\n # refine droplets if necessary\n if refine:\n try:\n droplet = refine_droplet(phase_field, droplet)\n except ValueError:\n continue # do not add the droplet to the list\n droplets.append(droplet)\n\n # return droplets as an emulsion\n emulsion = Emulsion(droplets, grid=phase_field.grid)\n if minimal_radius > -np.inf:\n emulsion.remove_small(minimal_radius)\n return emulsion",
"def _locate_droplets_in_mask_spherical(\n grid: SphericalSymGridBase, mask: np.ndarray\n) -> Emulsion:\n assert np.all(mask.shape == grid.shape)\n\n # locate clusters in the binary image\n labels, num_labels = ndimage.label(mask)\n if num_labels == 0:\n return Emulsion([], grid=grid)\n\n # locate clusters around origin\n object_slices = ndimage.measurements.find_objects(labels)\n droplet = None\n for slices in object_slices:\n if slices[0].start == 0: # contains point around origin\n radius = grid.cell_to_point(slices[0].stop).flat[-1]\n droplet = SphericalDroplet(np.zeros(grid.dim), radius=radius)\n else:\n logger = logging.getLogger(grid.__class__.__module__)\n logger.warning(\"Found object not located at origin\")\n\n # return an emulsion of droplets\n if droplet:\n return Emulsion([droplet], grid=grid)\n else:\n return Emulsion([], grid=grid)",
"def extract_polygons_lattice(xy, BL, NL=None, KL=None, PVx=None, PVy=None, PVxydict=None, viewmethod=False,\n check=False, eps=1e-10):\n viewmethod = True\n NP = len(xy)\n\n if KL is None or NL is None:\n NL, KL = BL2NLandKL(BL, NP=NP, NN='min')\n if (BL < 0).any():\n if len(PVxydict) > 0:\n PVx, PVy = PVxydict2PVxPVy(PVxydict, NL, KL)\n else:\n raise RuntimeError('Must specify either PVxydict or KL and NL in extract_polygons_lattice()' +\n ' when periodic bonds exist!')\n elif (BL < 0).any():\n if PVx is None or PVy is None:\n if PVxydict is None:\n raise RuntimeError('Must specify either PVxydict or PVx and PVy in extract_polygons_lattice()' +\n ' when periodic bonds exist!')\n else:\n PVx, PVy = PVxydict2PVxPVy(PVxydict, NL, KL)\n\n NN = np.shape(KL)[1]\n # Remove dangling bonds\n # dangling bonds have one particle with only one neighbor\n finished_dangles = False\n while not finished_dangles:\n dangles = np.where([np.count_nonzero(row) == 1 for row in KL])[0]\n if len(dangles) > 0:\n # Check if need to build PVxy dictionary from PVx and PVy before changing NL and KL\n if (BL < 0).any() and len(PVxydict) == 0:\n PVxydict = PVxy2PVxydict(PVx, PVy, NL, KL=KL)\n\n # Make sorted bond list of dangling bonds\n dpair = np.sort(np.array([[d0, NL[d0, np.where(KL[d0] != 0)[0]]] for d0 in dangles]), axis=1)\n # Remove those bonds from BL\n BL = dh.setdiff2d(BL, dpair.astype(BL.dtype))\n # print 'dpair = ', dpair\n # print 'ending BL = ', BL\n NL, KL = BL2NLandKL(BL, NP=NP, NN=NN)\n\n # Now that NL and KL rebuilt (changed), (re)build PVx and PVy if periodic bcs\n if (BL < 0).any():\n if len(PVxydict) > 0:\n PVx, PVy = PVxydict2PVxPVy(PVxydict, NL, KL)\n else:\n finished_dangles = True\n\n if viewmethod or check:\n print 'Plotting result after chopped dangles, if applicable...'\n display_lattice_2D(xy, BL, NL=NL, KL=KL, PVx=PVx, PVy=PVy, PVxydict=PVxydict,\n title='Result after chopping dangling bonds', close=False)\n for i in range(len(xy)):\n plt.text(xy[i, 0] + 0.2, xy[i, 1], str(i))\n plt.show()\n\n # bond markers for counterclockwise, clockwise\n used = np.zeros((len(BL), 2), dtype=bool)\n polygons = []\n finished = False\n if viewmethod:\n f, (ax1, ax2) = plt.subplots(1, 2)\n\n # For periodicity, remember which bonds span periodic boundary\n periB = np.array([(row < 0).any() for row in BL])\n\n if periB.any() and PVxydict is None and (PVx is None or PVy is None):\n raise RuntimeError('Periodic boundaries have been detected, but no periodic vectors supplied to ' +\n 'extract_polygons_lattice()')\n\n if not periB.any():\n print 'no PBCs, calculating polygons...'\n while not finished:\n # Check if all bond markers are used in order A-->B\n # print 'Checking AB (A-->B): '\n todoAB = np.where(~used[:, 0])[0]\n # print 'len(todoAB) = ', len(todoAB)\n # print 'used = ', used\n # print 'todoAB = ', todoAB\n # print polygons\n if len(todoAB) > 0:\n bond = BL[todoAB[0]]\n # if (bond == [21, 22]).all():\n # for todoab in todoAB:\n # ax1.plot([xy[BL[todoab, 0], 0], xy[BL[todoab, 1], 0]],\n # [xy[BL[todoab, 0], 1], xy[BL[todoab, 1], 1]], 'b-', lw=3)\n # todoBA = np.where(~used[:, 1])[0]\n # for todoba in todoBA:\n # ax1.plot([xy[BL[todoba, 0], 0], xy[BL[todoba, 1], 0]],\n # [xy[BL[todoba, 0], 1], xy[BL[todoba, 1], 1]], 'g--')\n # print 'bond = ', bond\n # plt.pause(40)\n # sys.exit()\n\n # bb will be list of polygon indices\n # Start with orientation going from bond[0] to bond[1]\n nxt = bond[1]\n bb = [bond[0], nxt]\n dmyi = 1\n\n # Now mark the new bond that has now been added to bb as used\n # Get index of used matching thisbond\n mark_used = np.where((np.logical_or(BL == bb[0], BL == bb[1])).all(axis=1))\n # print 'marking bond [', thisbond, '] as used'\n used[mark_used, 0] = True\n\n ###############\n # check\n if viewmethod:\n ax1.plot(xy[:, 0], xy[:, 1], 'k.')\n ax1.annotate(\"\", xy=(xy[bb[dmyi], 0], xy[bb[dmyi], 1]), xycoords='data',\n xytext=(xy[nxt, 0], xy[nxt, 1]), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",\n color=\"r\",\n shrinkA=5, shrinkB=5,\n patchA=None,\n patchB=None,\n connectionstyle=\"arc3,rad=0.2\", ), )\n for i in range(len(xy)):\n ax1.text(xy[i, 0] + 0.2, xy[i, 1], str(i))\n ax2.imshow(used)\n ax1.set_aspect('equal')\n ###############\n\n # as long as we haven't completed the full outer polygon, add next index\n while nxt != bond[0]:\n n_tmp = NL[nxt, np.argwhere(KL[nxt]).ravel()]\n # Exclude previous boundary particle from the neighbors array, unless its the only one\n # (It cannot be the only one, if we removed dangling bonds)\n if len(n_tmp) == 1:\n '''The bond is a lone bond, not part of a triangle.'''\n neighbors = n_tmp\n else:\n neighbors = np.delete(n_tmp, np.where(n_tmp == bb[dmyi - 1])[0])\n\n angles = np.mod(np.arctan2(xy[neighbors, 1] - xy[nxt, 1], xy[neighbors, 0] - xy[nxt, 0]).ravel() \\\n - np.arctan2(xy[bb[dmyi - 1], 1] - xy[nxt, 1],\n xy[bb[dmyi - 1], 0] - xy[nxt, 0]).ravel(), 2 * np.pi)\n nxt = neighbors[angles == max(angles)][0]\n bb.append(nxt)\n\n ###############\n # # Check\n # if viewmethod:\n # plt.annotate(\"\", xy=(xy[bb[dmyi],0],xy[bb[dmyi],1] ), xycoords='data',\n # xytext=(xy[nxt,0], xy[nxt,1]), textcoords='data',\n # arrowprops=dict(arrowstyle=\"->\",\n # color=\"r\",\n # shrinkA=5, shrinkB=5,\n # patchA=None,\n # patchB=None,\n # connectionstyle=\"arc3,rad=0.2\",), )\n #\n ###############\n\n # Now mark the new bond that has now been extended (added) as used\n thisbond = [bb[dmyi], bb[dmyi + 1]]\n # Get index of used matching thisbond\n mark_used = np.where((np.logical_or(BL == bb[dmyi], BL == bb[dmyi + 1])).all(axis=1))\n\n # mark_used = np.where((BL == thisbond).all(axis=1))\n if not used[mark_used, 0]:\n # print 'marking bond [', thisbond, '] as used'\n used[mark_used, 0] = True\n else:\n # Get index of used matching reversed thisbond (this list boolean is directional)\n # mark_used = np.where((BL == thisbond[::-1]).all(axis=1))\n # Used this bond in reverse order\n used[mark_used, 1] = True\n # print 'used = ', used\n dmyi += 1\n\n polygons.append(bb)\n ###############\n # Check new polygon\n if viewmethod:\n ax1.plot(xy[:, 0], xy[:, 1], 'k.')\n for i in range(len(xy)):\n ax1.text(xy[i, 0] + 0.2, xy[i, 1], str(i))\n for dmyi in range(len(bb)):\n nxt = bb[np.mod(dmyi + 1, len(bb))]\n ax1.annotate(\"\", xy=(xy[bb[dmyi], 0], xy[bb[dmyi], 1]), xycoords='data',\n xytext=(xy[nxt, 0], xy[nxt, 1]), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",\n color=\"r\",\n shrinkA=5, shrinkB=5,\n patchA=None,\n patchB=None,\n connectionstyle=\"arc3,rad=0.2\", ), )\n ax2.cla()\n ax2.imshow(used)\n plt.pause(0.00001)\n ###############\n\n else:\n # Check for remaining bonds unused in reverse order (B-->A)\n # print 'CHECKING REVERSE (B-->A): '\n todoBA = np.where(~used[:, 1])[0]\n if len(todoBA) > 0:\n bond = BL[todoBA[0]]\n\n ###############\n # # check\n # if viewmethod:\n # plt.annotate(\"\", xy=(xy[bb[dmyi],0],xy[bb[dmyi],1] ), xycoords='data',\n # xytext=(xy[nxt,0], xy[nxt,1]), textcoords='data',\n # arrowprops=dict(arrowstyle=\"->\",\n # color=\"b\",\n # shrinkA=5, shrinkB=5,\n # patchA=None,\n # patchB=None,\n # connectionstyle=\"arc3,rad=0.6\",), )\n # ###############\n\n # bb will be list of polygon indices\n # Start with orientation going from bond[0] to bond[1]\n nxt = bond[0]\n bb = [bond[1], nxt]\n dmyi = 1\n\n # Now mark the new bond that has now been added to bb as used\n # Get index of used matching thisbond\n thisbond = [bb[dmyi], bb[dmyi - 1]]\n mark_used = np.where((BL == thisbond).all(axis=1))\n # print 'marking bond [', thisbond, '] as used'\n used[mark_used, 1] = True\n\n # as long as we haven't completed the full outer polygon, add nextIND\n while nxt != bond[1]:\n n_tmp = NL[nxt, np.argwhere(KL[nxt]).ravel()]\n # Exclude previous boundary particle from the neighbors array, unless its the only one\n # (It cannot be the only one, if we removed dangling bonds)\n if len(n_tmp) == 1:\n '''The bond is a lone bond, not part of a triangle.'''\n neighbors = n_tmp\n else:\n neighbors = np.delete(n_tmp, np.where(n_tmp == bb[dmyi - 1])[0])\n\n angles = np.mod(np.arctan2(xy[neighbors, 1] - xy[nxt, 1], xy[neighbors, 0] - xy[nxt, 0]).ravel() \\\n - np.arctan2(xy[bb[dmyi - 1], 1] - xy[nxt, 1],\n xy[bb[dmyi - 1], 0] - xy[nxt, 0]).ravel(), 2 * np.pi)\n nxt = neighbors[angles == max(angles)][0]\n bb.append(nxt)\n\n ###############\n # Check\n # if viewmethod:\n # plt.annotate(\"\", xy=(xy[bb[dmyi],0],xy[bb[dmyi],1] ), xycoords='data',\n # xytext=(xy[nxt,0], xy[nxt,1]), textcoords='data',\n # arrowprops=dict(arrowstyle=\"->\",\n # color=\"b\",\n # shrinkA=5, shrinkB=5,\n # patchA=None,\n # patchB=None,\n # connectionstyle=\"arc3,rad=0.6\", #connectionstyle,\n # ), )\n ###############\n\n # Now mark the current bond as used --> note the inversion of the bond order to match BL\n thisbond = [bb[dmyi + 1], bb[dmyi]]\n # Get index of used matching [bb[dmyi-1],nxt]\n mark_used = np.where((BL == thisbond).all(axis=1))\n if len(mark_used) > 0:\n used[mark_used, 1] = True\n else:\n raise RuntimeError('Cannot mark polygon bond as used: this bond was already used '\n 'in its attempted orientation. (All bonds in first column '\n 'should already be marked as used.)')\n\n dmyi += 1\n\n polygons.append(bb)\n\n # Check new polygon\n if viewmethod:\n ax1.plot(xy[:, 0], xy[:, 1], 'k.')\n for i in range(len(xy)):\n ax1.text(xy[i, 0] + 0.2, xy[i, 1], str(i))\n for dmyi in range(len(bb)):\n nxt = bb[np.mod(dmyi + 1, len(bb))]\n ax1.annotate(\"\", xy=(xy[bb[dmyi], 0], xy[bb[dmyi], 1]), xycoords='data',\n xytext=(xy[nxt, 0], xy[nxt, 1]), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",\n color=\"b\",\n shrinkA=5, shrinkB=5,\n patchA=None,\n patchB=None,\n connectionstyle=\"arc3,rad=0.6\", ), )\n ax2.cla()\n ax2.imshow(used)\n plt.pause(0.00001)\n ###############\n\n else:\n # All bonds have been accounted for\n finished = True\n else:\n print 'detected periodicity...'\n # get particles on the finite (non-periodic) system's boundary. This allows massive speedup.\n KLfin = np.zeros_like(KL)\n KLfin[KL > 0] = 1\n # Create BLfin to pass to extract_boundary()\n prows = np.where(BL < 0)[0]\n nprows = np.setdiff1d(np.arange(len(BL)), prows)\n if check:\n print 'rows of BL that are periodic: ', prows\n print 'BL[prows] = ', BL[prows]\n BLfin = BL[nprows]\n finbd = extract_boundary(xy, NL, KLfin, BLfin, check=check)\n\n # If there were dangling points in the non-periodic representation, then we need to add those to finbd because\n # they will have periodic bonds attached to them.\n dangles = np.where(~KLfin.any(axis=1))[0]\n print 'dangles = ', dangles\n if len(dangles) > 0:\n print 'Found dangling points in the finite/non-periodic representation. Adding to finbd...'\n finbd = np.hstack((finbd, np.array(dangles)))\n\n if check:\n print 'finite boundary: finbd = ', finbd\n plt.clf()\n display_lattice_2D(xy, BL, NL=NL, KL=KLfin, PVx=PVx, PVy=PVy, PVxydict=PVxydict,\n title='Identified finite boundary', close=False)\n for i in range(len(xy)):\n plt.text(xy[i, 0] + 0.2, xy[i, 1], str(i))\n plt.plot(xy[finbd, 0], xy[finbd, 1], 'ro')\n plt.show()\n first_check = True\n\n # Then erase periodicity in BL\n BL = np.abs(BL)\n\n while not finished:\n if len(polygons) % 20 == 0:\n print 'constructed ', len(polygons), ' polygons...'\n # Check if all bond markers are used in order A-->B\n # print 'Checking AB (A-->B): '\n todoAB = np.where(~used[:, 0])[0]\n # print 'len(todoAB) = ', len(todoAB)\n # print 'used = ', used\n # print 'todoAB = ', todoAB\n if len(todoAB) > 0:\n bond = BL[todoAB[0]]\n\n # bb will be list of polygon indices\n # Start with orientation going from bond[0] to bond[1]\n nxt = bond[1]\n bb = [bond[0], nxt]\n dmyi = 1\n\n # define 'previous angle' as backwards of current angle -- ie angle(prev-current_pos)\n # Must include effect of PV on this angle -- do in ref frame of nxt particle\n PVind = np.argwhere(NL[nxt] == bond[0])[0][0]\n addx = PVx[nxt, PVind]\n addy = PVy[nxt, PVind]\n xyb0 = xy[bond[0], :] + np.array([addx, addy])\n prev_angle = np.arctan2(xyb0[1] - xy[nxt, 1], xyb0[0] - xy[nxt, 0]).ravel()\n\n ###############\n # check\n if viewmethod:\n if first_check:\n ax1.plot(xy[:, 0], xy[:, 1], 'k.')\n for i in range(len(xy)):\n ax1.text(xy[i, 0] + 0.2, xy[i, 1], str(i))\n first_check = False\n\n ax1.annotate(\"\", xy=(xy[bb[dmyi - 1], 0], xy[bb[dmyi - 1], 1]), xycoords='data',\n xytext=(xy[nxt, 0], xy[nxt, 1]), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",\n color=\"r\",\n shrinkA=5, shrinkB=5,\n patchA=None,\n patchB=None,\n connectionstyle=\"arc3,rad=0.2\", ), )\n ax2.imshow(used, aspect=1. / len(used), interpolation='none')\n ax1.set_aspect('equal')\n ###############\n # define the displacment from the starting point that we have moved so far\n displ = xy[nxt] - xyb0\n\n # as long as we haven't completed the full outer polygon, add next index\n while nxt != bond[0] or abs(displ[0]**2 + displ[1]**2) > eps:\n # print nxt\n # o o neighbors\n # \\ /\n # \\ /\n # o nxt\n # /\n # /\n # o bb[dmyi-1]\n #\n n_tmp = NL[nxt, np.argwhere(KL[nxt]).ravel()]\n # Exclude previous boundary particle from the neighbors array, unless its the only one\n # (It cannot be the only one, if we removed dangling bonds)\n if len(n_tmp) == 1:\n '''The bond is a lone bond, not part of a triangle/polygon.'''\n neighbors = n_tmp\n else:\n # Remove the current particle from the list of its next nearest neighbors\n # Note that we may add this particle back later if bb[dmyi - 1] is its own NNN\n neighbors = np.delete(n_tmp, np.where(n_tmp == bb[dmyi - 1])[0])\n # Here, handle the case where a periodic bond links the neighbor back to the original particle,\n # as in the bond linkage of 0-1-0.\n if len(neighbors) == 0:\n neighbors = n_tmp\n\n # check if neighbors CAN be connected across periodic bc--\n # ie if particle on finite boundary (finbd)\n if nxt in finbd:\n # Since on finite system boundary, particle could have periodic bonds\n # Find x values to add to neighbors, by first getting indices of row of\n # PV (same as of NL) matching neighbors\n # PVinds = [np.argwhere(NL[nxt] == nnn)[0][0] for nnn in neighbors] <--- this assumed no 0-1-0\n PVinds = []\n for nnn in dh.unique_nosort(neighbors):\n okinds = np.ravel(np.argwhere(np.logical_and(NL[nxt] == nnn, np.abs(KL[nxt]) > eps)))\n # print 'neighbors = ', neighbors\n # print 'okinds = ', okinds\n # print 'NL = ', NL\n # print 'KL = ', KL\n # print NL[nxt] == nnn, np.abs(KL[nxt]) > eps\n # print np.argwhere(np.logical_and(NL[nxt] == nnn, np.abs(KL[nxt]) > eps))\n for okind in okinds:\n PVinds.append(okind)\n\n addx = PVx[nxt, PVinds]\n addy = PVy[nxt, PVinds]\n\n # print 'nxt = ', nxt\n # print 'PVinds', PVinds\n # print 'xy[neighbors, :] = ', xy[neighbors, :]\n # print 'np.dstack([addx, addy])[0] = ', np.dstack([addx, addy])[0]\n\n xynb = xy[neighbors, :] + np.dstack([addx, addy])[0]\n xynxt = xy[nxt, :]\n current_angles = np.arctan2(xynb[:, 1] - xynxt[1], xynb[:, 0] - xynxt[0]).ravel()\n angles = np.mod(current_angles - prev_angle, 2 * np.pi)\n\n if check:\n print '\\n'\n print 'particle ', nxt, ' is on finbd'\n print 'nxt = ', nxt\n print 'neighbors = ', neighbors\n print 'xy[neighbors,:] =', xy[neighbors, :]\n print 'addxy = ', np.dstack([addx, addy])[0]\n print 'xynb = ', xynb\n print 'xynxt = ', xynxt\n print 'current_angles = ', current_angles\n print 'prev_angle = ', prev_angle\n print 'angles = ', angles\n print 'redefining nxt = ', neighbors[angles == max(angles)][0]\n\n # redefine previous angle as backwards of current angle -- ie angle(prev-current_pos)\n prev_angletmp = np.arctan2(xynxt[1] - xynb[:, 1], xynxt[0] - xynb[:, 0]).ravel()\n prev_angle = prev_angletmp[angles == max(angles)][0]\n\n # CHECK\n # ax1 = plt.gca()\n # ax1.plot(xy[:,0],xy[:,1],'k.')\n # for i in range(len(xy)):\n # ax1.text(xy[i,0]+0.2,xy[i,1],str(i))\n # plt.show()\n\n else:\n current_angles = np.arctan2(xy[neighbors, 1] - xy[nxt, 1],\n xy[neighbors, 0] - xy[nxt, 0]).ravel()\n angles = np.mod(current_angles - prev_angle, 2 * np.pi)\n # redefine previous angle as backwards of current angle -- ie angle(prev-current_pos)\n # prev_angle = np.arctan2(xy[bb[dmyi-1],1] - xynxt[1], xy[bb[dmyi-1],0] - xynxt[0] ).ravel()\n xynxt = xy[nxt, :]\n xynb = xy[neighbors, :]\n prev_angletmp = np.arctan2(xynxt[1] - xy[neighbors, 1], xynxt[0] - xy[neighbors, 0]).ravel()\n prev_angle = prev_angletmp[angles == max(angles)][0]\n\n nxt = neighbors[angles == max(angles)][0]\n bb.append(nxt)\n # update displacement\n displ += xynb[angles == max(angles)][0] - xynxt\n\n ###############\n # Check bond\n if viewmethod:\n # Check individually\n # ax1 = plt.gca()\n # ax1.plot(xy[:,0],xy[:,1],'k.')\n if first_check:\n for i in range(len(xy)):\n ax1.text(xy[i, 0] + 0.2, xy[i, 1], str(i))\n\n plt.annotate(\"\", xy=(xy[bb[dmyi], 0], xy[bb[dmyi], 1]), xycoords='data',\n xytext=(xy[nxt, 0], xy[nxt, 1]), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",\n color=\"r\",\n shrinkA=5, shrinkB=5,\n patchA=None,\n patchB=None,\n connectionstyle=\"arc3,rad=0.2\", ), )\n\n ###############\n\n # Now mark the current bond as used\n # thisbond = [bb[dmyi-1], bb[dmyi]]\n # Get index of used matching thisbond\n mark_used = np.where((np.logical_or(BL == bb[dmyi - 1], BL == bb[dmyi])).all(axis=1))[0]\n # mark_used = np.where((BL == thisbond).all(axis=1))\n # print 'mark_used = ', mark_used\n # I adjusted the line below to allow multiple entries in mark_used (2018-04-26)'\n if not (used[mark_used, 0]).all():\n # print 'marking bond [', thisbond, '] as used'\n marking, kk = True, 0\n while marking:\n if not used[mark_used[kk], 0]:\n used[mark_used[kk], 0] = True\n marking = False\n kk += 1\n else:\n # Get index of used matching reversed thisbond (this list boolean is directional)\n # mark_used = np.where((BL == thisbond[::-1]).all(axis=1))\n # Used this bond in reverse order\n marking, kk = True, 0\n while marking:\n print 'mark_used = ', mark_used\n print 'mark_used[kk] = ', mark_used[kk]\n print 'used[mark_used[kk]] = ', used[mark_used[kk]]\n print '--------------------------'\n if not used[mark_used[kk], 1]:\n used[mark_used[kk], 1] = True\n marking = False\n # except IndexError:\n # print 'mark_used = ', mark_used\n # print 'used[mark_used] = ', used[mark_used[kk]]\n # print 'marking bond ', BL[mark_used[kk]]\n # print 'kk = ', kk\n # print 'bb = ', bb\n # print 'Encountered index error in marking bond used'\n # plt.show()\n # sys.exit()\n kk += 1\n if kk == len(mark_used):\n marking = False\n\n # print 'used = ', used\n dmyi += 1\n if check:\n print 'bb = ', bb\n\n polygons.append(bb)\n ###############\n # Check new polygon\n if viewmethod:\n if first_check:\n ax1.plot(xy[:, 0], xy[:, 1], 'k.')\n for i in range(len(xy)):\n ax1.text(xy[i, 0] + 0.2, xy[i, 1], str(i))\n\n for dmyi in range(len(bb)):\n nxt = bb[np.mod(dmyi + 1, len(bb))]\n ax1.annotate(\"\", xy=(xy[bb[dmyi], 0], xy[bb[dmyi], 1]), xycoords='data',\n xytext=(xy[nxt, 0], xy[nxt, 1]), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",\n color=\"r\",\n shrinkA=5, shrinkB=5,\n patchA=None,\n patchB=None,\n connectionstyle=\"arc3,rad=0.2\", ), )\n ax2.cla()\n ax2.imshow(used, aspect=1. / len(used), interpolation='none')\n print 'polygons = ', polygons\n # plt.show()\n plt.pause(0.00001)\n ###############\n\n else:\n # Check for remaining bonds unused in reverse order (B-->A)\n # print 'CHECKING REVERSE (B-->A): '\n todoBA = np.where(~used[:, 1])[0]\n # print 'len(todoBA) = ', len(todoBA)\n if len(todoBA) > 0:\n bond = BL[todoBA[0]]\n\n ###############\n # # check\n if viewmethod:\n plt.annotate(\"\", xy=(xy[bb[dmyi], 0], xy[bb[dmyi], 1]), xycoords='data',\n xytext=(xy[nxt, 0], xy[nxt, 1]), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",\n color=\"b\",\n shrinkA=5, shrinkB=5,\n patchA=None,\n patchB=None,\n connectionstyle=\"arc3,rad=0.6\", ), )\n # ###############\n\n # bb will be list of polygon indices\n # Start with orientation going from bond[0] to bond[1]\n nxt = bond[0]\n bb = [bond[1], nxt]\n dmyi = 1\n\n # define 'previous angle' as backwards of current angle -- ie angle(prev-current_pos)\n # Must include effect of PV on this angle -- do in ref frame of nxt particle\n PVind = np.argwhere(NL[nxt] == bond[1])[0][0]\n addx = PVx[nxt, PVind]\n addy = PVy[nxt, PVind]\n xyb0 = xy[bond[1], :] + np.array([addx, addy])\n prev_angle = np.arctan2(xyb0[1] - xy[nxt, 1], xyb0[0] - xy[nxt, 0]) # .ravel()\n\n # as long as we haven't completed the full outer polygon, add nextIND\n # define the displacment from the starting point that we have moved so far\n displ = xy[nxt] - xyb0\n\n # as long as we haven't completed the full outer polygon, add next index\n while nxt != bond[1] or abs(displ[0] ** 2 + displ[1] ** 2) > eps:\n n_tmp = NL[nxt, np.argwhere(KL[nxt]).ravel()]\n # Exclude previous boundary particle from the neighbors array, unless its the only one\n # (It cannot be the only one, if we removed dangling bonds)\n if len(n_tmp) == 1:\n '''The bond is a lone bond, not part of a triangle.'''\n neighbors = n_tmp\n else:\n neighbors = np.delete(n_tmp, np.where(n_tmp == bb[dmyi - 1])[0])\n # Add neighbors back in if this bond is not dangling but we have a NNN structure of 0-1-0\n if len(neighbors) == 0:\n neighbors = n_tmp\n\n ########\n # check if neighbors CAN be connected across periodic bc-- ie if particle is\n # on the finite boundary (finbd)\n if nxt in finbd:\n # Since on finite system boundary, particle could have periodic bonds\n # Find x values to add to neighbors, by first getting indices of row of PV\n # (same as of NL) matching neighbors\n # ALL CALCS in frame of reference of NXT particle\n # PVinds = [np.argwhere(NL[nxt] == nnn)[0][0] for nnn in neighbors]\n PVinds = []\n for nnn in dh.unique_nosort(neighbors):\n okinds = np.ravel(np.argwhere(np.logical_and(NL[nxt] == nnn, np.abs(KL[nxt]) > eps)))\n for okind in okinds:\n PVinds.append(okind)\n\n addx = PVx[nxt, PVinds]\n addy = PVy[nxt, PVinds]\n\n xynb = xy[neighbors, :] + np.dstack([addx, addy])[0]\n xynxt = xy[nxt, :]\n # print '\\n'\n # print 'nxt = ', nxt\n # print 'neighbors = ', neighbors\n # print 'xy[neighbors,:] =', xy[neighbors,:]\n # print 'addxy = ', np.dstack([addx, addy])[0]\n # print 'xynb = ', xynb\n # print 'xynxt = ', xynxt\n current_angles = np.arctan2(xynb[:, 1] - xynxt[1], xynb[:, 0] - xynxt[0]).ravel()\n angles = np.mod(current_angles - prev_angle, 2 * np.pi)\n selectIND = np.where(angles == max(angles))[0][0]\n # print 'selectIND = ', selectIND\n # print 'current_angles = ', current_angles/np.pi\n # print 'prev_angle = ', prev_angle/np.pi\n # print 'angles = ', angles/np.pi\n\n # redefine previous angle as backwards of current angle -- ie angle(nxt - neighbor )\n prev_angletmp = np.arctan2(xynxt[1] - xynb[:, 1], xynxt[0] - xynb[:, 0]).ravel()\n prev_angle = prev_angletmp[selectIND]\n\n # print 'new prev_angle = ', prev_angle/np.pi\n # print 'NL[nxt] = ', NL[nxt]\n # print 'bb = ', bb\n # # CHECK\n # ax1 = plt.gca()\n # ax1.plot(xy[:,0],xy[:,1],'k.')\n # for i in range(len(xy)):\n # ax1.text(xy[i,0]+0.2,xy[i,1],str(i))\n # plt.arrow(xynxt[0], xynxt[1], np.cos(angles[selectIND]),\n # np.sin(angles[selectIND]),fc='r', ec='r')\n # plt.arrow(xynb[selectIND,0], xynb[selectIND,1],\n # np.cos(prev_angle), np.sin(prev_angle),fc='b', ec='b')\n # plt.show()\n\n else:\n current_angles = np.arctan2(xy[neighbors, 1] - xy[nxt, 1],\n xy[neighbors, 0] - xy[nxt, 0]).ravel()\n angles = np.mod(current_angles - prev_angle, 2 * np.pi)\n # redefine previous angle as backwards of current angle -- ie angle(prev-current_pos)\n xynxt = xy[nxt, :]\n xynb = xy[neighbors, :]\n prev_angletmp = np.arctan2(xynxt[1] - xynb[:, 1], xynxt[0] - xynb[:, 0]).ravel()\n selectIND = np.where(angles == max(angles))[0][0]\n # print '\\n'\n # print 'nxt = ', nxt\n # print 'bb = ', bb\n # print 'neighbors = ', neighbors\n # print 'current_angles = ', current_angles/np.pi\n # print 'prev_angle = ', prev_angle/np.pi\n # print 'angles = ', angles/np.pi\n # print 'selectIND = ', selectIND\n # print('xynxt[1] - xynb[:,1], xynxt[0] - xynb[:,0] = ', xynxt[1] - xynb[:,1],\n # xynxt[0] - xynb[:,0])\n # print('np.arctan2(xynxt[1] - xynb[:,1], xynxt[0] - xynb[:,0]) = ',\n # np.arctan2(xynxt[1] - xynb[:,1], xynxt[0] - xynb[:,0]))\n # print 'prev_angletmp = ', prev_angletmp/np.pi\n\n prev_angle = prev_angletmp[selectIND]\n # print 'new prev_angle = ', prev_angle/np.pi\n\n ###############\n nxt = neighbors[angles == max(angles)][0]\n bb.append(nxt)\n # update displacement of particle at nxt from first site (keeping track of periodic bonds)\n displ += xynb[angles == max(angles)][0] - xynxt\n\n ###############\n # Check\n if viewmethod:\n # If checking individual bonds\n # ax1 = plt.gca()\n # ax1.plot(xy[:,0],xy[:,1],'k.')\n # for i in range(len(xy)):\n # ax1.text(xy[i,0]+0.2,xy[i,1],str(i))\n\n plt.annotate(\"\", xy=(xy[bb[dmyi], 0], xy[bb[dmyi], 1]), xycoords='data',\n xytext=(xy[nxt, 0], xy[nxt, 1]), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",\n color=\"b\",\n shrinkA=5, shrinkB=5,\n patchA=None,\n patchB=None,\n connectionstyle=\"arc3,rad=0.6\",\n ), )\n # plt.show()\n ###############\n\n # Now mark the current bond as used --> note the inversion of the bond order to match BL\n thisbond = [bb[dmyi], bb[dmyi - 1]]\n # Get index of used matching [bb[dmyi-1],nxt]\n mark_used = np.where((BL == thisbond).all(axis=1))\n if len(mark_used) > 0:\n used[mark_used, 1] = True\n else:\n messg = 'Cannot mark polygon bond as used: this bond was already used in its attempted' + \\\n ' orientation. (All bonds in first column should already be marked as used.)'\n raise RuntimeError(messg)\n\n dmyi += 1\n\n polygons.append(bb)\n # print 'added polygon = ', bb\n\n # Check new polygon\n if viewmethod:\n if first_check:\n ax1.plot(xy[:, 0], xy[:, 1], 'k.')\n for i in range(len(xy)):\n ax1.text(xy[i, 0] + 0.2, xy[i, 1], str(i))\n\n for dmyi in range(len(bb)):\n nxt = bb[np.mod(dmyi + 1, len(bb))]\n ax1.annotate(\"\", xy=(xy[bb[dmyi], 0], xy[bb[dmyi], 1]), xycoords='data',\n xytext=(xy[nxt, 0], xy[nxt, 1]), textcoords='data',\n arrowprops=dict(arrowstyle=\"->\",\n color=\"b\",\n shrinkA=5, shrinkB=5,\n patchA=None,\n patchB=None,\n connectionstyle=\"arc3,rad=0.6\", ), )\n ax2.cla()\n ax2.imshow(used)\n # plt.show()\n plt.pause(0.0001)\n ###############\n\n else:\n # All bonds have been accounted for\n print 'all finished with finding polygons...'\n finished = True\n # check\n if viewmethod:\n plt.show()\n\n # Check for duplicates (up to cyclic permutations and inversions) in polygons\n # Note that we need to ignore the last element of each polygon (which is also starting pt)\n keep = np.ones(len(polygons), dtype=bool)\n for ii in range(len(polygons)):\n print 'ii = ', ii\n polyg = polygons[ii]\n for p2 in polygons[ii + 1:]:\n if is_cyclic_permutation(polyg[:-1], p2[:-1]):\n keep[ii] = False\n\n polygons = [polygons[i] for i in np.where(keep)[0]]\n\n # Remove duplicates via inversion (maybe not necessary?)\n\n # Remove the polygon which is the entire lattice boundary, except dangling bonds\n if not periB.any():\n print 'le.extract_polygons_lattice: Removing entire lattice boundary from list of polygons...'\n boundary = extract_boundary(xy, NL, KL, BL)\n # print 'boundary = ', boundary\n keep = np.ones(len(polygons), dtype=bool)\n for ii in range(len(polygons)):\n polyg = polygons[ii]\n if is_cyclic_permutation(polyg[:-1], boundary.tolist()):\n keep[ii] = False\n elif is_cyclic_permutation(polyg[:-1], boundary[::-1].tolist()):\n keep[ii] = False\n\n polygons = [polygons[i] for i in np.where(keep)[0]]\n\n # Check order of each polygon so that it is oriented counterclockwise\n # for polys in polygons:\n # angle_poly = 0\n # # Make sure that oriented counterclockwise\n # print 'polys = ', polys\n # for i in range(len(polys)):\n # p0 = polys[ np.mod(i-1, len(polys)-1)]\n # p1 = polys[i]\n # p2 = polys[ np.mod(i+1,len(polys)-1) ]\n # print 'p0,p1,p2 = ', p0, p1, p2\n # angle_tmp = np.mod(np.arctan2(xy[p2,1]-xy[p1,1], xy[p2,0]-xy[p1,0]) - np.arctan2( xy[p1,1]-xy[p0,1],\n # xy[p1,0]-xy[p0,0] ), 2*np.pi)\n # print 'angle_tmp = ', angle_tmp\n # angle_poly += angle_tmp\n #\n # print 'angle = ', angle_poly/6.\n print 'le: polygons = ', polygons\n if check:\n polygons2PPC(xy, polygons, BL=BL, PVxydict=PVxydict, check=True)\n\n return polygons",
"def extract_boundary(xy, NL, KL, BL, check=False):\n # Clear periodic bonds from KL\n pbonds = np.where(KL.ravel() < 0)[0]\n if len(pbonds) > 0:\n print 'le: Found periodic bonds in le.extract_boundary(), clearing...'\n KLr = KL.ravel()\n KLr[pbonds] = 0\n KL = KLr.reshape(np.shape(KL))\n print 'le: pbonds = ', pbonds\n\n # If there are dangling points, remove them for now and adjust indices later\n dangles = np.where(~KL.any(axis=1))[0]\n if len(dangles) > 0:\n print 'le: extract_boundary: Removing dangling points: dangles = ', dangles\n if check:\n plt.plot(xy[:, 0], xy[:, 1], 'b.')\n for ii in range(len(xy)):\n plt.text(xy[ii, 0] + 0.1, xy[ii, 1], str(ii))\n plt.plot(xy[dangles, 0], xy[dangles, 1], 'ro')\n plt.title('Original point indices, before removing dangles. Dangles circled in red.')\n plt.show()\n\n translate_at_end = True\n\n NP = len(xy)\n\n nondangles = np.setdiff1d(np.arange(NP), dangles)\n # Note that remove_pts can handle periodic BL\n\n if len(nondangles) == 0:\n print 'There are no particles that are not part of dangling bonds. All particles are part of the boundary.'\n return np.arange(len(xy))\n\n xy, NL, KL, BL, PVxydict = remove_pts(nondangles, xy, BL)\n\n # Remove bonds which were periodic.\n pbonds = np.where(KL.ravel() < 0)[0]\n print 'le: pbonds = ', pbonds\n if pbonds:\n print 'le: Found periodic bonds in extract_boundary(), clearing...'\n KLr = KL.ravel()\n KLr[pbonds] = 0\n KL = KLr.reshape(np.shape(KL))\n print 'le: pbonds = ', pbonds\n\n if check:\n print 'le: NL = ', NL\n display_lattice_2D(xy, BL, NL=NL, KL=KL, title='Removed points in extract_boundary()')\n\n # xy = xy[nondangles]\n # NL = NL[nondangles]\n # KL = KL[nondangles]\n\n # translation converts indices of long old xy to small new xy\n # backtrans converts indices of small, new xy to indices of long, old xy\n # .1 .0\n # .0 trans ----->\n # . 2 <----- backtrans .1\n # .3 .2\n translation = np.arange(NP, dtype=int)\n for IND in dangles:\n translation[IND:] -= 1\n # mark the removed point by -5\n translation[IND] = -5\n\n backtrans = np.where(translation > -1)[0]\n if check:\n print 'le: backtrans = ', backtrans\n print 'le: translation = ', translation\n\n # translation = np.where()\n\n else:\n translate_at_end = False\n\n # Initialize the list of boundary indices to be larger than necessary\n bb = np.zeros(2 * len(xy), dtype=int)\n\n # Start with the rightmost point, which is guaranteed to be\n # at the convex hull and thus also at the outer edge.\n # Then take the first step to be along the minimum angle bond\n rightIND = np.where(xy[:, 0] == max(xy[:, 0]))[0]\n # If there are more than one rightmost point, choose one\n if rightIND.size > 1:\n rightIND = rightIND[0]\n\n if check:\n print 'le.extract_boundary(): Found rightmost pt: ', rightIND\n print 'le.extract_boundary(): with neighbors: ', NL[rightIND]\n print 'le.extract_boundary(): with connectns: ', KL[rightIND]\n plt.plot(xy[:, 0], xy[:, 1], 'k.')\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'bo')\n for ii in range(len(xy)):\n plt.text(xy[ii, 0] + 0.1, xy[ii, 1], str(ii))\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'ro')\n plt.pause(0.01)\n\n # Grab the true neighbors of this starting point\n print 'le.extract_boundary(): NL[rightIND, :] = ', NL[rightIND, :]\n neighbors = NL[rightIND, np.argwhere(KL[rightIND].ravel()).ravel()]\n print 'le.extract_boundary(): neighbors = ', neighbors\n print 'le.extract_boundary(): rightIND = ', rightIND\n\n # Compute the angles of the neighbor bonds\n angles = np.mod(np.arctan2(xy[neighbors, 1] - xy[rightIND, 1], xy[neighbors, 0] - xy[rightIND, 0]).ravel(),\n 2 * np.pi)\n if check:\n print 'KL[rightIND] = ', KL[rightIND]\n print 'KL[rightIND,0] = ', KL[rightIND, 0]\n print 'KL[rightIND,0] ==0 ', KL[rightIND, 0] == 0\n print 'np.argwhere(KL[rightIND]) = ', np.argwhere(KL[rightIND])\n print 'np.argwhere(KL[rightIND].ravel())= ', np.argwhere(KL[rightIND].ravel())\n print 'neighbors = ', neighbors\n print 'angles = ', angles\n\n # Take the second particle to be the one with the lowest bond angle (will be >= pi/2)\n # print ' angles==min--> ', angles==min(angles)\n nextIND = neighbors[angles == min(angles)][0]\n bb[0] = rightIND\n\n dmyi = 1\n # as long as we haven't completed the full outer edge/boundary, add nextIND\n while nextIND != rightIND:\n # print '\\n nextIND = ', nextIND\n # print 'np.argwhere(KL[nextIND]) = ', np.argwhere(KL[nextIND]).ravel()\n bb[dmyi] = nextIND\n angles, neighbors = bond_angles_wrt_bond(bb[dmyi - 1], nextIND, xy, NL, KL)\n nextIND = neighbors[angles == min(angles)][0]\n # print 'nextIND = ', nextIND\n\n if check:\n # plt.plot(xy[:,0],xy[:,1],'k.')\n XY = np.vstack([xy[bb[dmyi], :], xy[nextIND, :]])\n plt.plot(XY[:, 0], XY[:, 1], 'r-')\n # for i in range(len(xy)):\n # plt.text(xy[i,0]+0.2,xy[i,1],str(i))\n plt.gca().set_aspect('equal')\n plt.pause(0.01)\n\n dmyi += 1\n\n # Truncate the list of boundary indices\n boundary = bb[0:dmyi]\n\n # Since some points were removed from the boundary identification, translate\n # indices back to indices of original xy\n if translate_at_end:\n print 'le.extract_boundary(): Translating boundary points back into original indices...'\n # print 'boundary = ', boundary\n # print 'translation = ', translation\n # print 'backtrans = ', backtrans\n boundary = backtrans[boundary]\n\n return boundary",
"def find_cut_bonds(BL, keep):\n # ensure that keep is int array of indices, not bool\n if keep.dtype == 'bool':\n print 'converting bool keep to int array...'\n keep = np.where(keep)[0]\n\n # Make output BLcut and the indices of BL that are cut (cutIND)\n # Find rows of BL for which both elems are in keep\n inBL0 = np.in1d(np.abs(BL[:, 0]), keep)\n inBL1 = np.in1d(np.abs(BL[:, 1]), keep)\n cutIND = np.logical_xor(inBL0, inBL1)\n BLcut = BL[cutIND, :]\n\n return BLcut, cutIND",
"def buffered_pts_to_periodicstrip(xy, BL, LL, BBox='auto', check=False):\n if BBox == 'auto':\n # Assuming that BBox is centered and has width, height of LL[0], LL[1]\n BBox = 0.5 * np.array([[-LL[0], -LL[1]], [LL[0], -LL[1]], [LL[0], LL[1]], [-LL[0], LL[1]]])\n keep = np.where(np.logical_and(abs(xy[:, 0]) < LL[0] * 0.5, abs(xy[:, 1]) < LL[1] * 0.5))[0]\n else:\n bpath = mplpath.Path(BBox)\n keep = np.where(bpath.contains_points(xy))[0]\n if check:\n print 'checking that keep is not a logical ==> '\n print ' this would be bool keep = ', bpath.contains_points(xy)\n print ' and this is keep = ', keep\n\n minX = np.min(BBox[:, 0])\n maxX = np.max(BBox[:, 0])\n minY = np.min(BBox[:, 1])\n maxY = np.max(BBox[:, 1])\n PVdict = {'e': np.array([LL[0], 0.0]),\n 'n': np.array([0.0, LL[1]]),\n 'w': np.array([-LL[0], 0.0]),\n 's': np.array([0.0, -LL[1]]),\n 'ne': np.array([LL[0], LL[1]]),\n 'nw': np.array([-LL[0], LL[1]]),\n 'sw': np.array([-LL[0], -LL[1]]),\n 'se': np.array([LL[0], -LL[1]])}\n\n # Create a kd tree of the points\n tree = scipy.spatial.KDTree(xy)\n\n # Find bonds that will be cut. For each bond, match to other particle and add pair to BL and PVxydict\n BLcut, cutIND = find_cut_bonds(BL, keep)\n\n if check:\n plt.scatter(xy[:, 0], xy[:, 1], c='g', marker='x')\n plt.scatter(xy[keep, 0], xy[keep, 1], c='b', marker='o')\n highlight_bonds(xy, BL, ax=plt.gca(), color='b', show=False)\n highlight_bonds(xy, BLcut, ax=plt.gca(), color='r', lw=5, alpha=0.4, show=False)\n xxtmp = np.hstack((BBox[:, 0], np.array(BBox[:, 0])))\n print 'xxtmp = ', xxtmp\n yytmp = np.hstack((BBox[:, 1], np.array(BBox[:, 1])))\n print 'yytmp = ', yytmp\n plt.plot(xxtmp, yytmp, 'k-', lw=1)\n plt.title('Showing bonds that are cut, btwn original and mirrored network')\n plt.show()\n\n # preallocate BL2add and PVs\n BL2add = np.zeros((len(BLcut), 2), dtype=int)\n PVd = {} # = np.zeros((len(BLcut),2), dtype=float)\n kk = 0\n for bond in BLcut:\n # which endpt is outside?\n ptA = bond[0]\n ptB = bond[1]\n # mpt is short for 'mirror point', the point outside the bounding box\n if ptA not in keep:\n mpt, kpt = ptA, ptB\n else:\n mpt, kpt = ptB, ptA\n\n # Assume that the bond should remain broken unless the PV is 'e' or 'w' (east or west)\n ok_stripbc = False\n if xy[mpt, 0] < minX:\n if xy[mpt, 1] < minY:\n # Mirror particle is SW\n PV = PVdict['sw']\n elif xy[mpt, 1] > maxY:\n # Mirror particle is NW\n PV = PVdict['nw']\n else:\n # Mirror particle is West\n PV = PVdict['w']\n ok_stripbc = True\n elif xy[mpt, 0] > maxX:\n if xy[mpt, 1] < minY:\n # Mirror particle is SE\n PV = PVdict['se']\n elif xy[mpt, 1] > maxY:\n # Mirror particle is NE\n PV = PVdict['ne']\n else:\n # Mirror particle is East\n PV = PVdict['e']\n ok_stripbc = True\n elif xy[mpt, 1] < minY:\n # Mirror particle is South\n PV = PVdict['s']\n else:\n # Mirror particle is North\n PV = PVdict['n']\n\n if ok_stripbc:\n # Get index of the particle that resides a vector -PV away from mirror particle\n dist, ind = tree.query(xy[mpt] - PV)\n if (kpt, ind) not in PVd and (ind, kpt) not in PVd:\n BL2add[kk] = np.array([-kpt, -ind])\n PVd[(kpt, ind)] = PV\n print 'adding (kpt, ind) = ', (kpt, ind)\n kk += 1\n\n BL2add = BL2add[0:kk]\n\n if check:\n print 'PVd = ', PVd\n display_lattice_2D(xy, np.abs(BL), title=\"showing extended lattice (w/o strip PBCs)\")\n\n # Crop network, and add back cut bonds as periodic ones\n BL = np.vstack((BL, BL2add))\n xytrim, NL, KL, BLtrim, PVxydict = remove_pts(keep, xy, BL)\n # Adjusting BL2add to account for smaller #npts (post-cropping) is already done in remove_pts\n # Adjust PVs to account for smaller #npts (post-cropping)\n remove = np.setdiff1d(np.arange(len(xy)), keep)\n PVxydict = {}\n for key in PVd:\n # adjust key to lower indices\n # count how many pts in remove are lower than key[0] and key[1], respectively\n lower0 = np.sum(remove < key[0])\n lower1 = np.sum(remove < key[1])\n newkey = (key[0] - lower0, key[1] - lower1)\n PVxydict[newkey] = PVd[key]\n\n if check:\n # Plot lattice without PBCs\n display_lattice_2D(xytrim, np.abs(BLtrim), title=\"showing lattice connectivity w/o strip PBCs\")\n display_lattice_2D(xytrim, BLtrim, PVxydict=PVxydict, title=\"showing lattice connectivity with strip PBCs\")\n\n return xytrim, NL, KL, BLtrim, PVxydict",
"def find_dirac_nodes():\n\n vasprun = Vasprun('vasprun.xml')\n dirac = False\n if vasprun.get_band_structure().get_band_gap()['energy'] < 0.1:\n efermi = vasprun.efermi\n bsp = BSPlotter(vasprun.get_band_structure('KPOINTS', line_mode=True,\n efermi=efermi))\n bands = []\n data = bsp.bs_plot_data(zero_to_efermi=True)\n for d in range(len(data['distances'])):\n for i in range(bsp._nb_bands):\n x = data['distances'][d],\n y = [data['energy'][d][str(Spin.up)][i][j]\n for j in range(len(data['distances'][d]))]\n band = [x, y]\n bands.append(band)\n\n considered = []\n for i in range(len(bands)):\n for j in range(len(bands)):\n if i != j and (j, i) not in considered:\n considered.append((j, i))\n for k in range(len(bands[i][0])):\n if ((-0.1 < bands[i][1][k] < 0.1) and\n (-0.1 < bands[i][1][k] - bands[j][1][k] < 0.1)):\n dirac = True\n return dirac",
"def cut_bonds(BL, xy, thres):\n i2cut = (xy[BL[:, 0], 0] - xy[BL[:, 1], 0]) ** 2 + (xy[BL[:, 0], 1] - xy[BL[:, 1], 1]) ** 2 < thres ** 2\n BLtrim = BL[i2cut]\n return BLtrim",
"def droplet(r_drop=0.02): # [dm]\n alpha_pom = float(76.8)\n r_real = r_drop / np.sin(alpha_pom) # [dm]\n height = r_real * (1 - np.cos(alpha_pom)) # [dm]\n s_drop = np.pi * (4 * r_real * height - height ** 2) # [dm2]\n v_drop = np.pi * height ** 2 * (r_real - height / 3) # [dm3]\n s0 = np.pi * r_drop ** 2 # [dm2]\n return s_drop, v_drop, s0 # , h_max, s_max, v_max, s1",
"def detect_dirac_spikes(spikes):\n # If a single slice is considered, insure we have a two-dimention spikes\n # array\n if spikes.ndim == 1:\n spikes.shape += (1, )\n\n # Deal with the first column\n first_row_diracs = np.logical_and((spikes[0, :] == 1), (spikes[1, :] == 0))\n\n # Deal with the last column\n last_row_diracs = np.logical_and((spikes[-1, :] == 1), (spikes[-2, :] == 0))\n\n # Deal now with the rest\n nb_of_timepoints = spikes.shape[0]\n others = np.logical_and((spikes[1: nb_of_timepoints - 1, :] == 1),\n (spikes[2: nb_of_timepoints, :] == 0))\n others = np.logical_and((spikes[0: nb_of_timepoints - 2, :] == 0), others)\n\n # Concatenate the result\n diracs = np.vstack((first_row_diracs, others, last_row_diracs))\n\n return diracs",
"def cut_bonds_z_highest(xy, NL, KL, BL, target_z, check=False):\n print ' Cutting bonds z...'\n NP = len(xy)\n NN = np.shape(NL)[1]\n\n # Identify boundary pts, bulk pts\n print ' cut_bonds_z : extract boundary...'\n boundary = extract_boundary(xy, NL, KL, BL)\n # print 'boundary = ', boundary\n bulk = np.setdiff1d(np.arange(NP), boundary)\n NP_bulk = len(bulk)\n NP_bound = len(np.unique(boundary))\n print 'NP_bound = ', NP_bound\n print 'NP_bulk = ', NP_bulk\n\n # Define bulk bonds as connecting at least one bulk particle\n is_a = np.in1d(BL[:, 0], bulk)\n is_b = np.in1d(BL[:, 1], bulk)\n binds = np.where(np.logical_or(is_a, is_b))[0]\n Binds = np.setdiff1d(np.arange(len(BL)), binds)\n BLbulk = BL[binds]\n BLboun = BL[Binds]\n\n # bBinds bonds connect bulk to boundary\n # Treat these as is connecting bulk(z) to bulk(z)\n bBinds = np.where(np.logical_xor(is_a, is_b))[0]\n BLbB = BL[bBinds]\n\n print 'len(binds) = ', len(binds)\n print 'len(Binds) = ', len(Binds)\n\n # Check\n if check:\n # plt.triplot(xy[:,0], xy[:,1], TRI, 'bo-')\n for bii in binds:\n XX = xy[BL[bii], 0]\n YY = xy[BL[bii], 1]\n plt.plot(XX, YY, 'b-')\n\n for Bii in Binds:\n XX = xy[BL[Bii], 0]\n YY = xy[BL[Bii], 1]\n plt.plot(XX, YY, 'r-')\n\n # for i in range(len(xy)):\n # plt.text(xy[i,0]+0.2,xy[i,1],str(i))\n plt.gca().set_aspect('equal')\n plt.show()\n\n # number of bonds to cut in the bulk\n # Be sure to divide the number of bonds by 2, since each bond double counts\n # Can write in terms of bonds? 2have = zt\n # nbulk2cut = int(max([0,round((z_start - target_z)*0.5*float(NP_bulk))]))\n # nbulk2have = len(binds) - nbulk2cut\n # print 'nboun2have = ', nboun2have\n # print 'nbulk2have = ', nbulk2have\n\n # CUT BONDS FROM HIGHEST Z NODES (sum of endpts)\n # Unfortunately, this has to be done iteratively.\n # Algorithm: find zvals of all bonds. For all bonds with zval = max(zval),\n # cut all the bonds that don't share endpts with any of the other bonds.\n # Find these by going through in-place-randomized B2cut and cross off if later bonds share indices.\n # Let boundary bonds be cut, or not, and pay no attention to them, since lattice will be cropped.\n\n # First cut most coordinated, whether on bulk or boundary, but keep track of which.\n # Get bonds with highest z pairs of nodes\n NN = np.shape(KL)[1]\n zz = np.sum(KL, axis=1)\n # print 'zz = ', zz\n zbulk = float(np.sum(zz[bulk])) / float(len(bulk))\n print 'zbulk so far = ', zbulk\n\n # As long as we haven't cut enough bonds, cut some more\n while zbulk > target_z:\n print 'zbulk = ', zbulk\n zb = zz[BL[:, 0]] + zz[BL[:, 1]]\n zcut = np.where(zb == max(zb))[0]\n np.random.shuffle(zcut)\n B2cut = BL[zcut]\n # print 'B2cut = ', B2cut\n\n # Check --> show bond numbers and bond to cut\n if check:\n display_lattice_2D(xy, BL, close=False)\n # for ii in range(len(BL)):\n # plt.text((xy[BL[ii,0],0]+xy[BL[ii,1],0])*0.5,(xy[BL[ii,0],1]+xy[BL[ii,1],1])*0.5,str(ii))\n # plt.text((xy[BL[ii,0],0]+xy[BL[ii,1],0])*0.5,(xy[BL[ii,0],1]+xy[BL[ii,1],1])*0.5,str(zb[ii]))\n for row in B2cut:\n plt.plot([xy[row[0], 0], xy[row[1], 0]], [xy[row[0], 1], xy[row[1], 1]], 'r-')\n plt.title('Initial counting marks these')\n plt.pause(0.01)\n plt.clf()\n\n # print 'B2cut = ', B2cut\n # Cross off if later bonds share indices\n keep = np.ones(len(B2cut), dtype=bool)\n for ii in range(len(B2cut)):\n row = B2cut[ii]\n if row[0] in B2cut[ii + 1:, :].ravel():\n # print 'found ', row[0], 'in rest of array '\n # print ' --> len BL[ii+1:,:] = ', len(B2cut[ii+1:,:] )\n keep[ii] = False\n elif row[1] in B2cut[ii + 1:, :].ravel():\n keep[ii] = False\n\n # print 'keep = ', keep\n # print 'keep.any() = ', keep.any()\n if keep.any():\n B2cut = B2cut[keep]\n else:\n print 'The highest nodes are all connected to at least one other. Killing one bond...'\n B2cut = B2cut[0:1]\n\n # Only interested in the bulk bonds for measurement, but cutting boundary\n # bonds will get us out of a situation where bulk is less coordinated than\n # boundary so don't do --> B2cut = intersect2d(B2cut,BLbulk)\n\n N2cut = len(B2cut)\n\n # See what would happen if we cut all of these\n BLt = dh.setdiff2d(BL, B2cut)\n NLt, KLt = BL2NLandKL(BLt, NP=NP, NN=NN)\n zzt = np.sum(KLt, axis=1)\n zbulk = np.float(np.sum(zzt[bulk])) / float(len(bulk))\n\n # If we can cut all of these, do that. Otherwise, cut only as many as needed after shuffling.\n if len(np.where(zzt == 0)[0]) > 0:\n print 'There are dangling points. Removing bonds2cut that would make these...'\n # There are dangling points.\n # Remove the bonds that make zzt elems zero from the bonds to cut list\n # and recalculate.\n dangle_pts = np.where(zzt == 0)[0]\n # protect dangle points --> there is only one bond to find since we have run a \"keep\" search on B2cut\n inb0 = np.where(np.in1d(B2cut[:, 0], dangle_pts))[0]\n inb1 = np.where(np.in1d(B2cut[:, 1], dangle_pts))[0]\n keep = np.setdiff1d(np.arange(len(B2cut)), inb0)\n keep = np.setdiff1d(keep, inb1)\n print 'Protecting dangling bond: keep for dangle =', keep\n\n # Check --> show bond numbers and bond to cut and protect (dangles)\n if check:\n display_lattice_2D(xy, BL, close=False)\n for ii in range(len(BL)):\n # plt.text((xy[BL[ii,0],0]+xy[BL[ii,1],0])*0.5,(xy[BL[ii,0],1]+xy[BL[ii,1],1])*0.5,str(ii))\n plt.text((xy[BL[ii, 0], 0] + xy[BL[ii, 1], 0]) * 0.5, (xy[BL[ii, 0], 1] + xy[BL[ii, 1], 1]) * 0.5,\n str(zb[ii]))\n for row in B2cut:\n plt.plot([xy[row[0], 0], xy[row[1], 0]], [xy[row[0], 1], xy[row[1], 1]], 'r-')\n plt.plot([xy[B2cut[keep, 0], 0], xy[B2cut[keep, 1], 0]], [xy[B2cut[keep, 0], 1], xy[B2cut[keep, 1], 1]],\n 'b-', lw=5)\n plt.show()\n plt.clf()\n\n B2cut = B2cut[keep]\n N2cut = len(B2cut)\n\n BLt = dh.setdiff2d(BL, B2cut)\n NLt, KLt = BL2NLandKL(BLt, NP=NP, NN=NN)\n zzt = np.sum(KLt, axis=1)\n zbulk = np.float(np.sum(zzt[bulk])) / float(len(bulk))\n\n # If we end up in a place where these are the only bonds to cut, raise exception\n # --> means target_z is just too low for our given lattice.\n if np.size(B2cut) == 0:\n raise RuntimeError('target_z is too low for the given lattice! Cutting bonds led to dangling points.')\n\n if zbulk > target_z:\n print 'Still above: zbulk = ', zbulk\n\n # Check --> show bond numbers and bond to cut\n if check:\n display_lattice_2D(xy, BL, close=False)\n # for ii in range(len(BL)):\n # plt.text((xy[BL[ii,0],0]+xy[BL[ii,1],0])*0.5,(xy[BL[ii,0],1]+xy[BL[ii,1],1])*0.5,str(ii))\n # plt.text((xy[BL[ii,0],0]+xy[BL[ii,1],0])*0.5,(xy[BL[ii,0],1]+xy[BL[ii,1],1])*0.5,str(zb[ii]))\n for row in B2cut:\n plt.plot([xy[row[0], 0], xy[row[1], 0]], [xy[row[0], 1], xy[row[1], 1]], 'r-')\n\n plt.pause(0.01)\n plt.clf()\n\n # move pointers\n BL, BLt = BLt, BL\n NL, NLt = NLt, NL\n KL, KLt = KLt, KL\n zz, zzt = zzt, zz\n else:\n print 'Approaching z = ', target_z, ' tuning one bond at a time...'\n # Cut a bond unless there is only one to cut\n # (in which case we are within threshold)\n if N2cut == 1:\n zbulk = 0.\n # move pointers\n BL, BLt = BLt, BL\n NL, NLt = NLt, NL\n KL, KLt = KLt, KL\n zz, zzt = zzt, zz\n else:\n # Check --> show bond numbers and bond to cut\n if check:\n display_lattice_2D(xy, BL, close=False)\n for ii in range(len(BL)):\n # plt.text((xy[BL[ii,0],0]+xy[BL[ii,1],0])*0.5,(xy[BL[ii,0],1]+xy[BL[ii,1],1])*0.5,str(ii))\n plt.text((xy[BL[ii, 0], 0] + xy[BL[ii, 1], 0]) * 0.5,\n (xy[BL[ii, 0], 1] + xy[BL[ii, 1], 1]) * 0.5, str(zb[ii]))\n for row in B2cut:\n plt.plot([xy[row[0], 0], xy[row[1], 0]], [xy[row[0], 1], xy[row[1], 1]], 'r-')\n plt.pause(0.01)\n plt.clf()\n\n BL = dh.setdiff2d(BL, B2cut[0:1])\n NL, KL = BL2NLandKL(BL, NP=NP, NN=NN)\n zz = np.sum(KLt, axis=1)\n print 'zz = ', zz\n zbulk = np.float(np.sum(zz[bulk])) / float(len(bulk))\n\n # IGNORE BOUNDARY: MUST CUT OUT DESIRED REGION. OTHERWISE, IT'S JUST TOO HARD TO MAKE IT RIGHT.\n # Only interested in the boundary bonds now\n # number of bonds to cut in the boundary = nbulkcut * (# boundary bonds)/(#bulk bonds)\n # nB2cut = int(round(nbulk2cut * float(len(Binds))/float(len(binds))))\n # nboun2have = len(Binds) - nB2cut\n #\n # while nboun > nboun2have:\n # zz = np.sum(KL, axis=1)\n # zb = zz[BL[:,0]] + zz[BL[:,1]]\n # zcut = np.where(zb== max(zb))[0]\n # np.random.shuffle(zcut)\n # B2cut = BL[zcut]\n # # Only interested in the boundary bonds now\n # B2cut = intersect2d(B2cut,BLboun)\n # # Cross off if later bonds share indices\n # keep = np.ones(len(B2cut),dtype = bool)\n # for ii in range(len(B2cut)):\n # row = B2cut[ii]\n # if row[0] in BL[ii+1,:].ravel():\n # keep[ii] = False\n # B2cut = B2cut[keep]\n # # Cut only as many as needed\n # nboun2cut = min([nboun - nboun2have, len(B2cut)])\n # BL = dh.setdiff2d(BL,B2cut[0:nboun2cut])\n # nboun = len(intersect2d(BL,BLboun))\n # print 'nbound so far =', nboun\n # NL, KL = BL2NLandKL(BL,NP=NP,NN=NN)\n\n zz = np.sum(KL, axis=1)\n zbulk = np.float(np.sum(zz[bulk])) / float(len(bulk))\n print 'Tuned to zbulk = ', zbulk\n\n if check:\n display_lattice_2D(xy, BL, close=False)\n plt.show()\n\n print '\\nReturning lattice with ', len(BL), ' bonds for ', NP, ' particles...'\n\n return NL, KL, BL",
"def dropoffLocator(*args, **kwargs)->List[AnyStr]:\n pass",
"def identify_bonds(chosen_atom, atom_list):\n list_of_hydrogens = ['H15', 'H14', 'H13', 'H12', 'H11', 'H10', 'H9', 'H8', 'H7', 'H6', 'H5', 'H4', 'H3', 'H2', 'H1'] \n if ((chosen_atom.atom_name not in list_of_hydrogens) and (chosen_atom.residue_name != \"P1A\")):\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 2) and (abs(chosen_atom.y - atom.y) <= 2) and (abs(chosen_atom.z - atom.z) <= 2))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 2)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n elif ((chosen_atom.atom_name not in list_of_hydrogens) and (chosen_atom.residue_name == \"P1A\")):\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 2) and (abs(chosen_atom.y - atom.y) <= 2) and (abs(chosen_atom.z - atom.z) <= 2))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 1.8)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n else:\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 1.6) and (abs(chosen_atom.y - atom.y) <= 1.6) and (abs(chosen_atom.z - atom.z) <= 1.6))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 1.6)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n for elements in nearby_atoms:\n if (check_if_no_bond(chosen_atom, elements, bond_list, bond_list_3) == True):\n nearby_atoms.remove(elements)\n if (len(nearby_atoms) == len(identified_bonds)):\n return identified_bonds\n else:\n return []",
"def _bonds_peturbation(\n nbo: Dict[str, Any],\n index: int,\n poss_coord: Dict[Optional[int], List[Optional[int]]],\n energy_cutoff: float,\n metal_indices: List[int],\n):\n\n bonds = set() # type: ignore\n\n # No metals, so don't need to use perturbation analysis to get bonds\n if len(metal_indices) == 0:\n return bonds\n\n if len(nbo[\"perturbation_energy\"]) > index:\n for inter_ind in nbo[\"perturbation_energy\"][index].get(\"donor type\", list()):\n coord = False\n m_ind: Optional[int] = None\n x_ind: Optional[int] = None\n if (\n int(\n nbo[\"perturbation_energy\"][index][\"acceptor atom 1 number\"][\n inter_ind\n ]\n )\n - 1\n in metal_indices\n ):\n if (\n nbo[\"perturbation_energy\"][index][\"donor type\"][inter_ind] == \"LP\"\n and nbo[\"perturbation_energy\"][index][\"acceptor type\"][inter_ind]\n == \"LV\"\n ):\n coord = True\n m_ind = (\n int(\n nbo[\"perturbation_energy\"][index][\"acceptor atom 1 number\"][\n inter_ind\n ]\n )\n - 1\n )\n x_ind = (\n int(\n nbo[\"perturbation_energy\"][index][\"donor atom 1 number\"][\n inter_ind\n ]\n )\n - 1\n )\n elif (\n nbo[\"perturbation_energy\"][index][\"donor type\"][inter_ind] == \"LP\"\n and nbo[\"perturbation_energy\"][index][\"acceptor type\"][inter_ind]\n == \"RY*\"\n ):\n coord = True\n m_ind = (\n int(\n nbo[\"perturbation_energy\"][index][\"acceptor atom 1 number\"][\n inter_ind\n ]\n )\n - 1\n )\n x_ind = (\n int(\n nbo[\"perturbation_energy\"][index][\"donor atom 1 number\"][\n inter_ind\n ]\n )\n - 1\n )\n elif (\n nbo[\"perturbation_energy\"][index][\"donor atom 1 number\"][inter_ind] - 1\n in metal_indices\n ):\n if (\n nbo[\"perturbation_energy\"][index][\"donor type\"][inter_ind] == \"LP\"\n and nbo[\"perturbation_energy\"][index][\"acceptor type\"][inter_ind]\n == \"LV\"\n ):\n coord = True\n m_ind = (\n int(\n nbo[\"perturbation_energy\"][index][\"donor atom 1 number\"][\n inter_ind\n ]\n )\n - 1\n )\n x_ind = (\n int(\n nbo[\"perturbation_energy\"][index][\"acceptor atom 1 number\"][\n inter_ind\n ]\n )\n - 1\n )\n\n if not coord:\n continue\n elif x_ind not in poss_coord[m_ind]:\n continue\n\n energy = float(\n nbo[\"perturbation_energy\"][index][\"perturbation energy\"][inter_ind]\n )\n if energy >= energy_cutoff:\n bonds.add((x_ind, m_ind, \"electrostatic\"))\n return bonds",
"def buffered_pts_to_periodic_network(xy, BL, LL, BBox=None, check=False):\n if BBox is None or isinstance(BBox, str):\n # Assuming that BBox is centered and has width, height of LL[0], LL[1]\n BBox = 0.5 * np.array([[-LL[0], -LL[1]], [LL[0], -LL[1]], [LL[0], LL[1]], [-LL[0], LL[1]]])\n keep = np.where(np.logical_and(abs(xy[:, 0]) < LL[0] * 0.5, abs(xy[:, 1]) < LL[1] * 0.5))[0]\n else:\n bpath = mplpath.Path(BBox)\n keep = np.where(bpath.contains_points(xy))[0]\n if check:\n print 'checking that keep is not a logical ==> '\n print ' this would be bool keep = ', bpath.contains_points(xy)\n print ' and this is keep = ', keep\n\n minX = np.min(BBox[:, 0])\n maxX = np.max(BBox[:, 0])\n minY = np.min(BBox[:, 1])\n maxY = np.max(BBox[:, 1])\n PVdict = {'e': np.array([LL[0], 0.0]),\n 'n': np.array([0.0, LL[1]]),\n 'w': np.array([-LL[0], 0.0]),\n 's': np.array([0.0, -LL[1]]),\n 'ne': np.array([LL[0], LL[1]]),\n 'nw': np.array([-LL[0], LL[1]]),\n 'sw': np.array([-LL[0], -LL[1]]),\n 'se': np.array([LL[0], -LL[1]])}\n\n # Create a kd tree of the points\n tree = scipy.spatial.KDTree(xy)\n\n # Find bonds that will be cut. For each bond, match to other particle and add pair to BL and PVxydict\n BLcut, cutIND = find_cut_bonds(BL, keep)\n\n if check:\n plt.scatter(xy[:, 0], xy[:, 1], c='g', marker='x')\n plt.scatter(xy[keep, 0], xy[keep, 1], c='b', marker='o')\n highlight_bonds(xy, BL, ax=plt.gca(), color='b', show=False)\n highlight_bonds(xy, BLcut, ax=plt.gca(), color='r', lw=1, show=False)\n xxtmp = np.hstack((BBox[:, 0], np.array(BBox[:, 0])))\n print 'xxtmp = ', xxtmp\n yytmp = np.hstack((BBox[:, 1], np.array(BBox[:, 1])))\n print 'yytmp = ', yytmp\n plt.plot(xxtmp, yytmp, 'k-', lw=2)\n plt.title('Showing bonds that are cut, btwn original and mirrored network')\n plt.show()\n\n # preallocate BL2add and PVs\n BL2add = np.zeros((len(BLcut), 2), dtype=int)\n PVd = {} # = np.zeros((len(BLcut),2), dtype=float)\n kk = 0\n for bond in BLcut:\n # which endpt is outside?\n ptA = bond[0]\n ptB = bond[1]\n # mpt is short for 'mirror point', the point outside the bounding box\n if ptA not in keep:\n mpt, kpt = ptA, ptB\n else:\n mpt, kpt = ptB, ptA\n if xy[mpt, 0] < minX:\n if xy[mpt, 1] < minY:\n # Mirror particle is SW\n PV = PVdict['sw']\n elif xy[mpt, 1] > maxY:\n # Mirror particle is NW\n PV = PVdict['nw']\n else:\n # Mirror particle is West\n PV = PVdict['w']\n elif xy[mpt, 0] > maxX:\n if xy[mpt, 1] < minY:\n # Mirror particle is SE\n PV = PVdict['se']\n elif xy[mpt, 1] > maxY:\n # Mirror particle is NE\n PV = PVdict['ne']\n else:\n # Mirror particle is East\n PV = PVdict['e']\n elif xy[mpt, 1] < minY:\n # Mirror particle is South\n PV = PVdict['s']\n else:\n # Mirror particle is North\n PV = PVdict['n']\n\n # Get index of the particle that resides a vector -PV away from mirror particle\n dist, ind = tree.query(xy[mpt] - PV)\n BL2add[kk] = np.array([-kpt, -ind])\n PVd[(kpt, ind)] = PV\n kk += 1\n\n if check:\n print 'PVd = ', PVd\n display_lattice_2D(xy, np.abs(BL), title=\"showing extended lattice (w/o PBCs)\")\n\n # Crop network, and add back cut bonds as periodic ones\n BL = np.vstack((BL, BL2add))\n xytrim, NL, KL, BLtrim, PVxydict = remove_pts(keep, xy, BL)\n # Adjusting BL2add to account for smaller #npts (post-cropping) is already done in remove_pts\n # Adjust PVs to account for smaller #npts (post-cropping)\n remove = np.setdiff1d(np.arange(len(xy)), keep)\n\n # PVxydict should be correct as is, from output of remove_pts...\n PVxydict_check = {}\n for key in PVd:\n # adjust key to lower indices\n # count how many pts in remove are lower than key[0] and key[1], respectively\n lower0 = np.sum(remove < key[0])\n lower1 = np.sum(remove < key[1])\n newkey = (key[0] - lower0, key[1] - lower1)\n PVxydict_check[newkey] = PVd[key]\n print 'PVxydict = ', PVxydict\n print 'PVxydict_check = ', PVxydict_check\n if PVxydict is None:\n PVxydict = PVxydict_check\n else:\n raise RuntimeError('Are these PVxydicts the same?')\n\n if check:\n # Plot lattice without PBCs\n display_lattice_2D(xytrim, np.abs(BLtrim), title=\"showing lattice connectivity w/o PBCs\")\n display_lattice_2D(xytrim, BLtrim, PVxydict=PVxydict, title=\"showing lattice connectivity with PBCs\")\n\n return xytrim, NL, KL, BLtrim, PVxydict",
"def extract_1d_boundaries(xy, NL, KL, BL, PVx, PVy, check=False):\n if PVx is None and PVy is None:\n raise RuntimeError('Not designed to allow openBC networks.')\n # PVx = np.zeros_like(KL, dtype=float)\n # PVy = np.zeros_like(KL, dtype=float)\n\n # If there are dangling points, remove them for now and adjust indices later\n dangles, xy, NL, KL, BL, backtrans = remove_dangling_points(xy, NL, KL, BL, check=check)\n # If no dangling bonds, no need to translate indices at the end\n translate_at_end = len(dangles) > 0\n\n # Initialize the list of boundary indices to be larger than necessary\n boundaries = []\n for boundaryloc in ['top', 'bottom']:\n # Initialize the boundary list to be as long as possible (will truncate later)\n bb = np.zeros(2 * len(xy), dtype=int)\n if boundaryloc == 'top':\n # Start with the topmost point, which is guaranteed to be\n # at the convex hull and thus also at the top outer edge.\n # Then take the first step to be along the minimum angle bond\n rightIND = np.where(xy[:, 1] == np.max(xy[:, 1]))[0]\n # If there are more than one rightmost point, choose one\n if rightIND.size > 1:\n rightIND = rightIND[0]\n else:\n # Start with the bottom most point, which is guaranteed to be\n # at the convex hull and thus also at the bottom outer edge.\n # Then take the first step to be along the minimum angle bond\n rightIND = np.where(xy[:, 1] == np.min(xy[:, 1]))[0]\n # If there are more than one rightmost point, choose one\n if rightIND.size > 1:\n rightIND = rightIND[0]\n\n if check:\n print 'le.extract_1d_boundaries(): Found extremal pt: ', rightIND\n print 'le.extract_1d_boundaries(): with neighbors: ', NL[rightIND]\n print 'le.extract_1d_boundaries(): with connectns: ', KL[rightIND]\n plt.plot(xy[:, 0], xy[:, 1], 'k.')\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'bo')\n for ii in range(len(xy)):\n plt.text(xy[ii, 0] + 0.1, xy[ii, 1], str(ii))\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'ro')\n plt.pause(0.01)\n\n # Grab the true neighbors of this starting point\n # print 'le.extract_boundary(): NL[rightIND, :] = ', NL[rightIND, :]\n connect = np.argwhere(np.abs(KL[rightIND]).ravel()).ravel()\n neighbors = NL[rightIND, connect]\n if check:\n print 'le.extract_1d_boundaries(): neighbors = ', neighbors\n print 'le.extract_1d_boundaries(): rightIND = ', rightIND\n\n # Compute the angles of the neighbor bonds\n angles = np.mod(np.arctan2(xy[neighbors, 1] - xy[rightIND, 1] + PVy[rightIND, connect],\n xy[neighbors, 0] - xy[rightIND, 0] + PVx[rightIND, connect]).ravel(),\n 2 * np.pi)\n if check:\n print 'le.extract_1d_boundaries(): KL[rightIND] = ', KL[rightIND]\n print 'le.extract_1d_boundaries(): KL[rightIND,0] = ', KL[rightIND, 0]\n print 'le.extract_1d_boundaries(): KL[rightIND,0] ==0 ', KL[rightIND, 0] == 0\n print 'le.extract_1d_boundaries(): np.argwhere(KL[rightIND]) = ', np.argwhere(KL[rightIND])\n print 'le.extract_1d_boundaries(): np.argwhere(KL[rightIND].ravel())= ', np.argwhere(KL[rightIND].ravel())\n print 'le.extract_1d_boundaries(): neighbors = ', neighbors\n print 'le.extract_1d_boundaries(): angles = ', angles\n\n # Assign this pvx and pvy as pvx_prev and pvy_prev for next time around.\n # Note that this must preceed the redefinition of nextIND\n pvx_prev = PVx[rightIND, connect[angles == min(angles)][0]]\n pvy_prev = PVy[rightIND, connect[angles == min(angles)][0]]\n\n # Take the second particle to be the one with the lowest bond angle (will be >= pi/2)\n nextIND = neighbors[angles == min(angles)][0]\n bb[0] = rightIND\n\n dmyi = 1\n # as long as we haven't completed the full outer edge/boundary, add nextIND\n while nextIND != rightIND:\n # print '\\n nextIND = ', nextIND\n # print 'np.argwhere(KL[nextIND]) = ', np.argwhere(KL[nextIND]).ravel()\n bb[dmyi] = nextIND\n connect = np.argwhere(np.abs(KL[nextIND]).ravel())\n n_tmp = NL[nextIND, connect]\n\n # Get position in row of NL where NL == bb[dmyi - 1] (the previous boundary particle/site)\n # and where the PVx and PVy are opposite of the last used PVx and PVy values (to make sure we\n # are looking backwards along the boundary). We will use this to get the 'backward angle' -- the\n # angle of the previous bond in the boundary\n # Note that bb[dmyi - 1] may have been index 0, so there could be multiple matches\n nlpos = np.where(np.logical_and(NL[nextIND] == bb[dmyi - 1],\n np.abs(KL[nextIND]).ravel().astype(bool)))[0]\n if len(nlpos) > 1:\n # There is more than one connection to the previous particle. Check for where PVx and PVy\n # values are opposite the previously used values.\n ind_nlpos = np.where(np.logical_and(PVx[nextIND, nlpos] == -pvx_prev,\n PVy[nextIND, nlpos] == -pvy_prev))[0]\n print 'ind_nlpos = ', ind_nlpos\n nlpos = nlpos[ind_nlpos]\n\n # Exclude previous boundary particle (the copy of that particle in the nlpos position)\n # from the neighbors array, UNLESS IT IS THE ONLY ONE,\n # since its angle with itself is zero!\n\n # Used to remove previous particle, but this assumes that boundary is more than 2\n # particles long, which might not be true for periodic_strip bcs\n if len(n_tmp) == 1:\n print 'le: The bond is a lone bond, not part of a triangle.'\n neighbors = n_tmp\n else:\n print 'n_tmp = ', n_tmp\n neighbors = np.delete(n_tmp, nlpos)\n connect = np.delete(connect, nlpos)\n print 'n_tmp = ', n_tmp\n print 'neighbors = ', neighbors\n\n # print 'le: nlpos = ', nlpos\n forward_angles = np.arctan2(xy[neighbors, 1] - xy[nextIND, 1] + PVy[nextIND, connect],\n xy[neighbors, 0] - xy[nextIND, 0] + PVx[nextIND, connect]).ravel()\n backward_angle = np.arctan2(xy[bb[dmyi - 1], 1] - xy[nextIND, 1] + PVy[nextIND, nlpos],\n xy[bb[dmyi - 1], 0] - xy[nextIND, 0] + PVx[nextIND, nlpos]).ravel()\n if check:\n print 'le: connect = ', connect\n print 'le: forward_angles = ', forward_angles\n print 'le: backward_angle = ', backward_angle\n\n angles = np.mod(forward_angles - backward_angle, 2 * np.pi)\n if check:\n print 'le: angles = ', angles\n print 'le: angles==min--> ', angles == min(angles)\n print 'le: neighbors = ', neighbors\n print 'le.extract_1d_boundaries(): angles==min--> ', angles == min(angles)\n print 'le.extract_1d_boundaries(): neighbors[angles == min(angles)] --> ', neighbors[angles == min(angles)]\n\n # Assign this pvx and pvy as pvx_prev and pvy_prev for next time around.\n # Note that this must preceed the redefinition of nextIND\n pvx_prev = PVx[nextIND, connect[angles == min(angles)][0]]\n pvy_prev = PVy[nextIND, connect[angles == min(angles)][0]]\n # Redefine nextIND to be the new boundary index\n nextIND = neighbors[angles == min(angles)][0]\n # print 'nextIND = ', nextIND\n\n if check:\n # plt.plot(xy[:,0],xy[:,1],'k.')\n XY = np.vstack([xy[bb[dmyi], :], xy[nextIND, :]])\n plt.plot(XY[:, 0], XY[:, 1], 'r-')\n # for i in range(len(xy)):\n # plt.text(xy[i,0]+0.2,xy[i,1],str(i))\n plt.gca().set_aspect('equal')\n plt.pause(0.01)\n\n dmyi += 1\n\n # Truncate the list of boundary indices\n boundary = bb[0:dmyi]\n\n # Since some points were removed from the boundary identification, translate\n # indices back to indices of original xy\n if translate_at_end:\n print 'le.extract_boundary(): Translating boundary points back into original indices...'\n # print 'boundary = ', boundary\n # print 'translation = ', translation\n # print 'backtrans = ', backtrans\n boundary = backtrans[boundary]\n\n boundaries.append(boundary)\n\n return tuple(boundaries)",
"def cut_bonds_strain_BL(BL, xy, bL0, bstrain):\n i2cut = (np.sqrt(\n (xy[BL[:, 0], 0] - xy[BL[:, 1], 0]) ** 2 + (xy[BL[:, 0], 1] - xy[BL[:, 1], 1]) ** 2) - bL0) < bstrain * bL0\n bL0trim = bL0[i2cut]\n BLtrim = BL[i2cut]\n return BLtrim, bL0trim",
"def cut_bonds_strain(xy, NL, KL, BM0, bstrain):\n NP, NN = np.shape(NL)\n BL = NL2BL(NL, KL)\n bL0 = BM2bL(NL, BM0, BL)\n BLtrim, bL0trim = cut_bonds_strain_BL(BL, xy, bL0, bstrain)\n KL = BL2KL(BLtrim, NL)\n # i2cut = (np.sqrt((xy[BL[:,0],0]-xy[BL[:,1],0])**2+(xy[BL[:,0],1]-xy[BL[:,1],1])**2) - bL0) < bstrain*bL0\n return KL, BLtrim, bL0trim",
"def extract_inner_boundary(xy, NL, KL, BL, inner_pt=None, check=False):\n # Center the points around some point that is inside the inner region to be extracted\n if inner_pt is not None:\n xy -= inner_pt\n else:\n xy -= np.mean(xy, axis=0)\n\n # Clear periodic bonds from KL\n pbonds = np.where(KL.ravel() < 0)[0]\n if len(pbonds) > 0:\n print 'le: Found periodic bonds in le.extract_inner_boundary(), clearing...'\n KLr = KL.ravel()\n KLr[pbonds] = 0\n KL = KLr.reshape(np.shape(KL))\n print 'le: pbonds = ', pbonds\n\n # If there are dangling points, remove them for now and adjust indices later\n dangles, xy, NL, KL, BL, backtrans = remove_dangling_points(xy, NL, KL, BL, check=check)\n translate_at_end = len(dangles) > 0\n\n # Initialize the list of boundary indices to be larger than necessary\n bb = np.zeros(2 * len(xy), dtype=int)\n\n # Start with the centermost point that is on the right side of the y axis, which is guaranteed to be\n # at the convex hull for an annular sample and thus also at the inner edge.\n # Then take the first step to be along the minimum angle bond\n # Compute radial distance of each particle\n distr2 = xy[:, 0] ** 2 + xy[:, 1] ** 2\n xpositive = np.where(xy[:, 0] > 0)[0]\n if translate_at_end:\n # avoid choosing a dangling particle with no bonds\n selection = np.intersect1d(xpositive, nodangles)\n rightIND = np.where(distr2 == np.min(distr2[selection]))[0]\n else:\n rightIND = np.where(distr2 == np.min(distr2[xpositive]))[0]\n # print 'rightIND = ', rightIND\n # plt.plot(xy[:, 0], xy[:, ])\n # for ii in range(len(xy)):\n # plt.text(xy[ii, 0] + 0.1, xy[ii, 1], str(ii))\n # plt.show()\n # sys.exit()\n # If there are more than one rightmost point, choose one\n if rightIND.size > 1:\n rightIND = rightIND[0]\n\n if check:\n print 'le.extract_inner_boundary(): Found innermost pt: ', rightIND\n print 'le.extract_inner_boundary(): with neighbors: ', NL[rightIND]\n print 'le.extract_inner_boundary(): with connectns: ', KL[rightIND]\n plt.plot(xy[:, 0], xy[:, 1], 'k.')\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'bo')\n for ii in range(len(xy)):\n plt.text(xy[ii, 0] + 0.1, xy[ii, 1], str(ii))\n plt.plot(xy[rightIND, 0], xy[rightIND, 1], 'ro')\n plt.pause(0.1)\n\n # Grab the true neighbors of this starting point\n print 'le.extract_inner_boundary(): NL[rightIND, :] = ', NL[rightIND, :]\n neighbors = NL[rightIND, np.argwhere(KL[rightIND].ravel()).ravel()]\n print 'le.extract_inner_boundary(): neighbors = ', neighbors\n print 'le.extract_inner_boundary(): rightIND = ', rightIND\n\n # Take the second particle to be the one with the smallest bond angle above pi (might be <= 3pi/2, but not\n # necessarily).\n # Compute the angles of the neighbor bonds and add pi\n angles = np.mod(np.arctan2(xy[neighbors, 1] - xy[rightIND, 1], xy[neighbors, 0] - xy[rightIND, 0]).ravel() + np.pi,\n 2 * np.pi)\n nextIND = neighbors[angles == min(angles)][0]\n bb[0] = rightIND\n dmyi = 1\n\n if check:\n print 'KL[rightIND] = ', KL[rightIND]\n print 'KL[rightIND,0] = ', KL[rightIND, 0]\n print 'KL[rightIND,0] ==0 ', KL[rightIND, 0] == 0\n print 'np.argwhere(KL[rightIND]) = ', np.argwhere(KL[rightIND])\n print 'np.argwhere(KL[rightIND].ravel())= ', np.argwhere(KL[rightIND].ravel())\n print 'neighbors = ', neighbors\n print 'angles = ', angles\n\n # This part, commented out, was a red herring\n # It is possible for the first particle to be attached to only one other site. If this is the case, then we need to\n # add its neighbor to the bb array and take the next max angle with respect to that bond instead of the min angle.\n # while len(angles) == 1:\n # print 'le.extract_inner_boundary(): there is only one neighbor for the first identified boundary particle'\n # bb[dmyi] = nextIND\n # angles, neighbors = bond_angles_wrt_bond(bb[dmyi - 1], nextIND, xy, BL, KL)\n # nextIND = neighbors[angles == max(angles)][0]\n # # print 'nextIND = ', nextIND\n\n if check:\n print 'bb = ', bb\n # sys.exit()\n # as long as we haven't completed the full outer edge/boundary, add nextIND\n while nextIND != rightIND:\n # print '\\n nextIND = ', nextIND\n # print 'np.argwhere(KL[nextIND]) = ', np.argwhere(KL[nextIND]).ravel()\n bb[dmyi] = nextIND\n angles, neighbors = bond_angles_wrt_bond(bb[dmyi - 1], nextIND, xy, NL, KL)\n nextIND = neighbors[angles == min(angles)][0]\n # print 'nextIND = ', nextIND\n\n if check:\n plt.plot(xy[:,0],xy[:,1],'k.')\n XY = np.vstack([xy[bb[dmyi], :], xy[nextIND, :]])\n plt.plot(XY[:, 0], XY[:, 1], 'r-')\n for i in range(len(xy)):\n plt.text(xy[i,0] + 0.2, xy[i, 1], str(i))\n plt.gca().set_aspect('equal')\n plt.show()\n\n dmyi += 1\n\n # Truncate the list of boundary indices\n inner_boundary = bb[0:dmyi]\n\n # Since some points were removed from the boundary identification, translate\n # indices back to indices of original xy\n if translate_at_end:\n print 'le.extract_boundary(): Translating boundary points back into original indices...'\n inner_boundary = backtrans[inner_boundary]\n\n return inner_boundary",
"def detect_doublets(adata,marker_genes=[\"GCG\",\"INS\",\"SST\",\"PPY\",\"COL3A1\",\"CFTR\",\"PRSS2\",\"GHRL\"],inplace=True):\n counts=np.zeros((1,adata.shape[0]))\n for gene in marker_genes:\n gm = mixture.GaussianMixture(n_components=2, covariance_type='full',reg_covar=0.3)\n expressions = (adata[:,gene].X).reshape(-1,1)\n gm.fit(expressions)\n predictions = gm.predict(expressions)\n if gm.predict([[0]]):\n predictions = 1 - predictions\n counts= counts + predictions\n if inplace:\n adata._inplace_subset_obs((counts <=1)[0])\n else: \n #In that case, the doublets won't be removed, but the \"doublet score\" will be added to the anndata. This is useful for testing that this filter correctly identifies the doublets.\n adata.obs[\"doublets\"] = counts[0]",
"def delaunay_periodic_network_from_pts(xy, PV, BBox='auto', check=False, target_z=-1, max_bond_length=-1,\n zmethod='random', minimum_bonds=-1, ensure_periodic=False):\n # Algorithm for handling boundaries:\n # - Copy parts of lattice to buffer up the edges\n # - Cut the bonds with the bounding box of the loaded configuration\n # - For each cut bond, match the outside endpt with its corresponding mirror particle\n xytmp = buffer_points_for_periodicBC(xy, PV)\n if check:\n plt.show()\n plt.plot(xytmp[:, 0], xytmp[:, 1], 'b.')\n plt.title('Buffered points')\n plt.show()\n xy, NL, KL, BL, BM = delaunay_lattice_from_pts(xytmp, trimbound=False, target_z=target_z,\n max_bond_length=max_bond_length,\n zmethod=zmethod, minimum_bonds=minimum_bonds,\n check=check)\n if ensure_periodic:\n BL = ensure_periodic_connectivity(xy, NL, KL, BL)\n NL, KL = BL2NLandKL(BL)\n\n # todo: allow for other shapes of periodic boundaries other than parallelogram\n xytrim, NL, KL, BLtrim, PVxydict = \\\n buffered_pts_to_periodic_network_parallelogram(xy, BL, PV, BBox=BBox, check=check)\n return xytrim, NL, KL, BLtrim, PVxydict",
"def delaunay_centroid_periodicstrip_from_pts(xy, LL, BBox='auto', check=False):\n # Algorithm for handling boundaries:\n # - Copy parts of lattice to buffer up the edges\n # - Cut the bonds with the bounding box of the loaded configuration\n # - For each cut bond, match the outside endpt with its corresponding mirror particle\n xytmp = buffer_points_for_rectangular_periodicBC(xy, LL)\n xy, NL, KL, BL = delaunay_centroid_lattice_from_pts(xytmp, polygon=None, trimbound=False, check=check)\n xytrim, NL, KL, BLtrim, PVxydict = buffered_pts_to_periodicstrip(xy, BL, LL, BBox=BBox, check=check)\n return xytrim, NL, KL, BLtrim, PVxydict",
"def find_endpoints(batch_trajectories):\n # empty lists to fill\n site_lats = []\n site_lons = []\n last_lats = []\n last_lons = []\n lats_150 = []\n lons_150 = [] \n last_times = []\n times_150 = []\n last_sst = []\n sst_150 = []\n \n # temporary lists as placeholders\n temp_site_lats = []\n temp_site_lons = []\n temp_lats = []\n temp_lons = []\n temp_lats150 = []\n temp_lons150 = []\n temp_times = []\n temp_times150 = []\n temp_sst = []\n temp_sst150 = []\n\n for speed in range(len(batch_trajectories)):\n # working with one speed at a time means working with one nc file at\n # a time\n \n # reset temporary lists\n temp_site_lats = []\n temp_site_lons = []\n temp_lats = []\n temp_lons = []\n temp_lats150 = []\n temp_lons150 = []\n temp_times = []\n temp_times150 = []\n temp_sst = []\n temp_sst150 = []\n\n # extract variables into lists\n lats = batch_trajectories[speed].variables['lat'][:]\n lons = batch_trajectories[speed].variables['lon'][:]\n lats150 = batch_trajectories[speed].variables['lat150'][:]\n lons150 = batch_trajectories[speed].variables['lon150'][:]\n times = batch_trajectories[speed].variables['time'][:]\n ssts = batch_trajectories[speed].variables['temp'][:]\n ssts_150 = batch_trajectories[speed].variables['temp150'][:]\n\n # if a particle is deleted before time is up, values are masked. \n # We'd like to get the last valid number.\n for trajectory in range(len(lats)):\n i = -1 # index for the last value\n while np.ma.is_masked(lats[trajectory][i]) is True:\n i -= 1 # if the value is masked, go to one value sooner\n \n j = i # use j for the 150m values\n while lats150[trajectory][j] > 0:\n # we want the first index where the latitude is recorded.\n # j is actually the last one where it's not recorded, so we\n # extract the information at index j+1\n j -= 1\n\n # once i and j are determined for a trajectory, we can extract the\n # variables and append them to temporary lists.\n temp_site_lats.append(lats[trajectory][0])\n temp_site_lons.append(lons[trajectory][0])\n temp_lats.append(lats[trajectory][i])\n temp_lons.append(lons[trajectory][i])\n temp_lats150.append(lats150[trajectory][j+1])\n temp_lons150.append(lons150[trajectory][j+1])\n temp_times.append(times[trajectory][i])\n temp_sst.append(ssts[trajectory][i])\n temp_sst150.append(ssts_150[trajectory][j+1])\n temp_times150.append(times[trajectory][j+1])\n \n # after the temporary lists are appended by sinking speed, they\n # are appended to the big lists that are returned by the function.\n # this keeps the structure of being separated by sinking speed.\n site_lats.append(temp_site_lats)\n site_lons.append(temp_site_lons)\n last_lats.append(temp_lats)\n last_lons.append(temp_lons)\n lats_150.append(temp_lats150)\n lons_150.append(temp_lons150)\n last_times.append(temp_times)\n times_150.append(temp_times150)\n last_sst.append(temp_sst)\n sst_150.append(temp_sst150)\n \n return site_lats, site_lons, last_lats, last_lons, lats_150, lons_150,\\\n last_times, times_150, last_sst, sst_150",
"def _call_doublets(self):\n\n # look through the nearest_neighbors_dict to find cell barcodes\n # which are regularly marked as similar to artificial doublets\n for _, v in self.nearest_neighbors_dict.items():\n for _, cell_idx in v:\n self.num_times_knn[cell_idx][1] += 1\n\n self.doublet_barcodes = sorted(self.num_times_knn, key=lambda x: x[1])[\n -(self.num_doublets) : # pylint: disable=invalid-unary-operand-type\n ]\n # print(sorted(self.num_times_knn, key=lambda x: x[1])[-40:])",
"def BL2PVxydict(BL, xy, PV):\n # The ijth element of PVx is the xcomponent of the vector taking NL[i,j] to its image as seen by particle i.\n PVxydict = {}\n # check both directions along each periodic vector\n PVtmp = np.vstack((PV, -PV))\n\n # For each bond that is a periodic bond, determine its periodic boundary vector (a row of the array PV)\n pBs = np.unique(np.where(BL < 0)[0])\n print 'le: BL[pBs] = ', BL[pBs]\n print 'le: pBs = ', pBs\n for ind in pBs:\n # Find the PV (periodic vector) that brings the second particle (j) closest to the first (i).\n # This will be PVxydict[(i,j)], since particle i sees j at xy[j]+PVxydict[(i,j)]\n a1 = xy[np.abs(BL[ind, 0])]\n a2 = xy[np.abs(BL[ind, 1])]\n try:\n distxy = a2 + PVtmp - a1\n except ValueError:\n print 'a1 = ', a1\n print 'a2 = ', a2\n print 'PVtmp = ', PVtmp\n raise RuntimeError('dimensions do not match')\n dist = distxy[:, 0] ** 2 + distxy[:, 1] ** 2\n # print 'a1, a2 = ', a1, a2\n # print 'distxy = ', distxy\n # print 'PV = ', PV\n # print 'dist = ', dist\n if np.argmin(dist) > len(PV) - 1:\n PVxydict[(np.abs(BL[ind, 0]), np.abs(BL[ind, 1]))] = -PV[np.argmin(dist) % len(PV)]\n else:\n PVxydict[(np.abs(BL[ind, 0]), np.abs(BL[ind, 1]))] = PV[np.argmin(dist) % len(PV)]\n\n print 'le: PVxydict = ', PVxydict\n return PVxydict",
"def find_doublets(self, k=15, save_pca_path=None, save_mtx_path=None, save_barcodes_path=None):\n\n if save_mtx_path:\n self._save_matrix(save_mtx_path)\n self._create_artificial_doublets()\n self._reduce_matrix_dimensions()\n if save_pca_path:\n self._save_pca_matrix(save_pca_path)\n self._find_nearest_neighbors(k)\n self._call_doublets()\n if save_barcodes_path:\n self._save_barcodes(save_barcodes_path)"
] | [
"0.65388036",
"0.6282355",
"0.60239565",
"0.56848073",
"0.5602002",
"0.5525652",
"0.5487211",
"0.5334124",
"0.52467597",
"0.52423966",
"0.5184897",
"0.5184084",
"0.5181235",
"0.5123486",
"0.50454843",
"0.50172555",
"0.5012011",
"0.5001637",
"0.4987533",
"0.49609968",
"0.4915463",
"0.49007034",
"0.48996055",
"0.4881176",
"0.4878629",
"0.48668778",
"0.48269024",
"0.47905812",
"0.47884095",
"0.47567177"
] | 0.7693398 | 0 |
Locates droplets in the phase field This uses a binarized image to locate clusters of large concentration in the phase field, which are interpreted as droplets. Basic quantities, like position and size, are determined for these clusters. | def locate_droplets(
phase_field: ScalarField,
threshold: Union[float, str] = 0.5,
modes: int = 0,
minimal_radius: float = 0,
refine: bool = False,
interface_width: Optional[float] = None,
) -> Emulsion:
assert isinstance(phase_field, ScalarField)
dim = phase_field.grid.dim # dimensionality of the space
if modes > 0 and dim not in [2, 3]:
raise ValueError("Perturbed droplets only supported for 2d and 3d")
# determine actual threshold
if threshold == "auto":
threshold = float(phase_field.data.min() + phase_field.data.max()) / 2
else:
threshold = float(threshold)
# locate droplets in thresholded image
img_binary = phase_field.data > threshold
candidates = locate_droplets_in_mask(phase_field.grid, img_binary)
if minimal_radius > -np.inf:
candidates.remove_small(minimal_radius)
droplets = []
for droplet in candidates:
# check whether we need to add the interface width
droplet_class = droplet.__class__
args: Dict[str, NumberOrArray] = {}
# change droplet class when interface width is given
if interface_width is not None:
droplet_class = DiffuseDroplet
args["interface_width"] = interface_width
# change droplet class when perturbed droplets are requested
if modes > 0:
if dim == 2:
droplet_class = PerturbedDroplet2D
elif dim == 3:
droplet_class = PerturbedDroplet3D
else:
raise NotImplementedError(f"Dimension {dim} is not supported")
args["amplitudes"] = np.zeros(modes)
# recreate a droplet of the correct class
if droplet_class != droplet.__class__:
droplet = droplet_class.from_droplet(droplet, **args)
# refine droplets if necessary
if refine:
try:
droplet = refine_droplet(phase_field, droplet)
except ValueError:
continue # do not add the droplet to the list
droplets.append(droplet)
# return droplets as an emulsion
emulsion = Emulsion(droplets, grid=phase_field.grid)
if minimal_radius > -np.inf:
emulsion.remove_small(minimal_radius)
return emulsion | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _locate_droplets_in_mask_cylindrical(\n grid: CylindricalSymGrid, mask: np.ndarray\n) -> Emulsion:\n assert np.all(mask.shape == grid.shape)\n\n if grid.periodic[1]:\n # locate droplets respecting periodic boundary conditions in z-direction\n\n # pad the array to simulate periodic boundary conditions\n dim_r, dim_z = grid.shape\n mask_padded = np.pad(mask, [[0, 0], [dim_z, dim_z]], mode=\"wrap\")\n assert mask_padded.shape == (dim_r, 3 * dim_z)\n\n # locate droplets in the extended image\n candidates = _locate_droplets_in_mask_cylindrical_single(grid, mask_padded)\n grid._logger.info(f\"Found {len(candidates)} droplet candidates.\")\n\n # keep droplets that are inside the central area\n droplets = Emulsion(grid=grid)\n for droplet in candidates:\n # correct for the additional padding of the array\n droplet.position[2] -= grid.length\n # check whether the droplet lies in the original box\n if grid.contains_point(droplet.position):\n droplets.append(droplet)\n\n grid._logger.info(f\"Kept {len(droplets)} central droplets.\")\n\n # filter overlapping droplets (e.g. due to duplicates)\n droplets.remove_overlapping()\n\n else:\n # simply locate droplets in the mask\n droplets = _locate_droplets_in_mask_cylindrical_single(grid, mask)\n\n return droplets",
"def _locate_droplets_in_mask_cartesian(\n grid: CartesianGridBase, mask: np.ndarray\n) -> Emulsion:\n if mask.shape != grid.shape:\n raise ValueError(\n f\"The shape {mask.shape} of the data is not compatible with the grid \"\n f\"shape {grid.shape}\"\n )\n\n # pad the array to simulate periodic boundary conditions\n offset = np.array([dim if p else 0 for p, dim in zip(grid.periodic, grid.shape)])\n pad = np.c_[offset, offset].astype(np.intc)\n mask_padded = np.pad(mask, pad, mode=\"wrap\")\n assert np.all(mask_padded.shape == np.array(grid.shape) + 2 * offset)\n\n # locate individual clusters in the padded image\n labels, num_labels = ndimage.label(mask_padded)\n if num_labels == 0:\n return Emulsion([], grid=grid)\n indices = range(1, num_labels + 1)\n\n # create and emulsion from this of droplets\n grid._logger.info(f\"Found {num_labels} droplet candidate(s)\")\n\n # determine position from binary image and scale it to real space\n positions = ndimage.measurements.center_of_mass(mask_padded, labels, index=indices)\n # correct for the additional padding of the array\n positions = grid.cell_to_point(positions - offset)\n\n # determine volume from binary image and scale it to real space\n volumes = ndimage.measurements.sum(mask_padded, labels, index=indices)\n volumes = np.asanyarray(volumes) * np.prod(grid.discretization)\n\n # only retain droplets that are inside the central area\n droplets = (\n SphericalDroplet.from_volume(position, volume)\n for position, volume in zip(positions, volumes)\n if grid.cuboid.contains_point(position)\n )\n\n # filter overlapping droplets (e.g. due to duplicates)\n emulsion = Emulsion(droplets, grid=grid)\n num_candidates = len(emulsion)\n if num_candidates < num_labels:\n grid._logger.info(f\"Only {num_candidates} candidate(s) inside bounds\")\n\n emulsion.remove_overlapping()\n if len(emulsion) < num_candidates:\n grid._logger.info(f\"Only {num_candidates} candidate(s) not overlapping\")\n\n return emulsion",
"def _locate_droplets_in_mask_spherical(\n grid: SphericalSymGridBase, mask: np.ndarray\n) -> Emulsion:\n assert np.all(mask.shape == grid.shape)\n\n # locate clusters in the binary image\n labels, num_labels = ndimage.label(mask)\n if num_labels == 0:\n return Emulsion([], grid=grid)\n\n # locate clusters around origin\n object_slices = ndimage.measurements.find_objects(labels)\n droplet = None\n for slices in object_slices:\n if slices[0].start == 0: # contains point around origin\n radius = grid.cell_to_point(slices[0].stop).flat[-1]\n droplet = SphericalDroplet(np.zeros(grid.dim), radius=radius)\n else:\n logger = logging.getLogger(grid.__class__.__module__)\n logger.warning(\"Found object not located at origin\")\n\n # return an emulsion of droplets\n if droplet:\n return Emulsion([droplet], grid=grid)\n else:\n return Emulsion([], grid=grid)",
"def _locate_droplets_in_mask_cylindrical_single(\n grid: CylindricalSymGrid, mask: np.ndarray\n) -> Emulsion:\n # locate the individual clusters\n labels, num_features = ndimage.label(mask)\n if num_features == 0:\n return Emulsion([], grid=grid)\n\n # locate clusters on the symmetry axis\n object_slices = ndimage.measurements.find_objects(labels)\n indices = []\n for index, slices in enumerate(object_slices, 1):\n if slices[0].start == 0: # contains point on symmetry axis\n indices.append(index)\n else:\n logger = logging.getLogger(grid.__class__.__module__)\n logger.warning(\"Found object not located on symmetry axis\")\n\n # determine position from binary image and scale it to real space\n pos = ndimage.measurements.center_of_mass(mask, labels, index=indices)\n pos = grid.cell_to_point(pos)\n\n # determine volume from binary image and scale it to real space\n vol_r, dz = grid.cell_volume_data\n cell_volumes = vol_r * dz\n vol = ndimage.measurements.sum(cell_volumes, labels, index=indices)\n\n # return an emulsion of droplets\n droplets = (\n SphericalDroplet.from_volume(np.array([0, 0, p[2]]), v)\n for p, v in zip(pos, vol)\n )\n return Emulsion(droplets, grid=grid)",
"def phase_derivative_var_map(image, k):\n dx_phase = delta_x(image)\n dy_phase = delta_y(image)\n\n ny, nx = dx_phase.shape\n assert(ny == nx) ## assert a square image for simplicity\n if (k%2 == 0):\n print(\"k has to be an uneven integer!\")\n return\n N = nx\n i, j = np.arange(N), np.arange(N)\n ii, jj = np.meshgrid(i, j)\n zmn = np.zeros((N,N))\n \n \n\n inside = (jj[k/2:N-(k/2), k/2:N-(k/2)].flatten(), ii[k/2:N-(k/2), k/2:N-(k/2)].flatten())\n krange = np.linspace(-1 * (k/2), (k/2), k, dtype = 'int64') ## amount of added spaces, if k = 5, it ranges from -2 to 2\n krange_tile = np.tile(krange * N, (k, 1)).T ## tile them to make a (k/2)**2 matrix, containing for instance -2N, -N, 0, N, 2N for k=5\n k_tile = np.tile(krange, (k, 1)) ## tile to add to krange_tile\n coords_add = (krange_tile + k_tile).flatten() ## all coordinates, in a (k/2)**2 matrix, from -2N - 2: -2N + 2, -N-2 : -N+2 , -2 : 2, N -2 : N +2, 2N -2 : 2N +2\n inside = np.ravel_multi_index(inside, (N, N))\n coords_add = np.tile(coords_add, (len(inside), 1)) ## stack all differences to add to inside\n inside_tile = np.tile(inside, (coords_add.shape[1],1)).T ## stack all inside to add to differences\n all_coords = inside_tile + coords_add### a matrix of len(inside) x (k/2)**2 with all coordinates in a k x k square around a certain coordinate\n unrav_coords = np.unravel_index(all_coords, (N, N)) ## unraveled coordinates of all coordinates\n \n avg_x, avg_y = np.sum(dx_phase[unrav_coords], axis = 1)/k**2, np.sum(dy_phase[unrav_coords], axis = 1)/k**2\n avg_x_tile, avg_y_tile = np.tile(avg_x, (all_coords.shape[1], 1)).T, np.tile(avg_y, (all_coords.shape[1], 1)).T\n sum_x, sum_y = np.sum(np.square(dx_phase[unrav_coords] - avg_x_tile), axis = 1), np.sum(np.square(dy_phase[unrav_coords] - avg_y_tile), axis = 1)\n zmn[np.unravel_index(inside, (N, N))] = (np.sqrt(sum_x) + np.sqrt(sum_y)) / (k**2)\n\n\n\n #### top layers\n for i in range(k/2):\n ## for indices directly above the \"inside square\"\n top = (jj[i, k/2:N-(k/2)].flatten(), ii[i, k/2: N - (k/2)].flatten())\n coords_add = (krange_tile + k_tile)[(k/2)-i:, :].flatten()\n top = np.ravel_multi_index(top, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n top_tile = np.tile(top, (coords_add.shape[1],1)).T\n top_coords = top_tile + coords_add\n unrav_coords = np.unravel_index(top_coords, (N, N))\n avg_x, avg_y = np.sum(dx_phase[unrav_coords], axis = 1)/k**2, np.sum(dy_phase[unrav_coords], axis = 1)/k**2\n avg_x_tile, avg_y_tile = np.tile(avg_x, (top_coords.shape[1], 1)).T, np.tile(avg_y, (top_coords.shape[1], 1)).T\n sum_x, sum_y = np.sum(np.square(dx_phase[unrav_coords] - avg_x_tile), axis = 1), np.sum(np.square(dy_phase[unrav_coords] - avg_y_tile), axis = 1)\n zmn[np.unravel_index(top, (N, N))] = (np.sqrt(sum_x) + np.sqrt(sum_y)) / (k**2)\n## sum_sin_top = np.sum(np.sin(image[unrav_coords]), axis = 1)\n## sum_cos_top = np.sum(np.cos(image[unrav_coords]), axis = 1)\n## psi_top = np.arctan2(sum_sin_top, sum_cos_top)\n## filt_psi[np.unravel_index(top, (N, N))] = psi_top\n\n ## indices directly below the \"inside square\"\n bot = (jj[N- 1 - i, k/2:N-(k/2)].flatten(), ii[N-1-i, k/2: N - (k/2)].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:(k/2) + 1 + i, :].flatten()\n bot = np.ravel_multi_index(bot, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n bot_tile = np.tile(bot, (coords_add.shape[1],1)).T\n bot_coords = bot_tile + coords_add\n unrav_coords = np.unravel_index(bot_coords, (N, N))\n avg_x, avg_y = np.sum(dx_phase[unrav_coords], axis = 1)/k**2, np.sum(dy_phase[unrav_coords], axis = 1)/k**2\n avg_x_tile, avg_y_tile = np.tile(avg_x, (bot_coords.shape[1], 1)).T, np.tile(avg_y, (bot_coords.shape[1], 1)).T\n sum_x, sum_y = np.sum(np.square(dx_phase[unrav_coords] - avg_x_tile), axis = 1), np.sum(np.square(dy_phase[unrav_coords] - avg_y_tile), axis = 1)\n zmn[np.unravel_index(bot, (N, N))] = (np.sqrt(sum_x) + np.sqrt(sum_y)) / (k**2)\n\n ## indices directly left of the \"inside square\"\n left = (jj[k/2:N-(k/2), i].flatten(), ii[k/2:N-(k/2), i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, (k/2)-i:].flatten()\n left = np.ravel_multi_index(left, (N, N))\n coords_add = np.tile(coords_add, (len(left), 1))\n left_tile = np.tile(left, (coords_add.shape[1],1)).T\n left_coords = left_tile + coords_add\n unrav_coords = np.unravel_index(left_coords, (N, N))\n avg_x, avg_y = np.sum(dx_phase[unrav_coords], axis = 1)/k**2, np.sum(dy_phase[unrav_coords], axis = 1)/k**2\n avg_x_tile, avg_y_tile = np.tile(avg_x, (left_coords.shape[1], 1)).T, np.tile(avg_y, (left_coords.shape[1], 1)).T\n sum_x, sum_y = np.sum(np.square(dx_phase[unrav_coords] - avg_x_tile), axis = 1), np.sum(np.square(dy_phase[unrav_coords] - avg_y_tile), axis = 1)\n zmn[np.unravel_index(left, (N, N))] = (np.sqrt(sum_x) + np.sqrt(sum_y)) / (k**2)\n\n ## indices directly left of the \"inside square\"\n right = (jj[k/2:N-(k/2), N - 1 - i].flatten(), ii[k/2:N-(k/2), N - 1 - i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, :(k/2)+1+i].flatten()\n right = np.ravel_multi_index(right, (N, N))\n coords_add = np.tile(coords_add, (len(right), 1))\n right_tile = np.tile(right, (coords_add.shape[1],1)).T\n right_coords = right_tile + coords_add\n unrav_coords = np.unravel_index(right_coords, (N, N))\n avg_x, avg_y = np.sum(dx_phase[unrav_coords], axis = 1)/k**2, np.sum(dy_phase[unrav_coords], axis = 1)/k**2\n avg_x_tile, avg_y_tile = np.tile(avg_x, (right_coords.shape[1], 1)).T, np.tile(avg_y, (right_coords.shape[1], 1)).T\n sum_x, sum_y = np.sum(np.square(dx_phase[unrav_coords] - avg_x_tile), axis = 1), np.sum(np.square(dy_phase[unrav_coords] - avg_y_tile), axis = 1)\n zmn[np.unravel_index(right, (N, N))] = (np.sqrt(sum_x) + np.sqrt(sum_y)) / (k**2)\n\n return zmn",
"def filter_wrapped_phase(image, k):\n ny, nx = image.shape\n assert(ny == nx) ## assert a square image for simplicity\n if (k%2 == 0):\n print(\"k has to be an integer!\")\n return\n N = nx\n i, j = np.arange(N), np.arange(N)\n ii, jj = np.meshgrid(i, j)\n filt_psi = np.zeros((N,N))\n\n inside = (jj[k/2:N-(k/2), k/2:N-(k/2)].flatten(), ii[k/2:N-(k/2), k/2:N-(k/2)].flatten())\n krange = np.linspace(-1 * (k/2), (k/2), k, dtype = 'int64') ## amount of added spaces, if k = 5, it ranges from -2 to 2\n krange_tile = np.tile(krange * N, (k, 1)).T ## tile them to make a (k/2)**2 matrix, containing for instance -2N, -N, 0, N, 2N for k=5\n k_tile = np.tile(krange, (k, 1)) ## tile to add to krange_tile\n coords_add = (krange_tile + k_tile).flatten() ## all coordinates, in a (k/2)**2 matrix, from -2N - 2: -2N + 2, -N-2 : -N+2 , -2 : 2, N -2 : N +2, 2N -2 : 2N +2\n inside = np.ravel_multi_index(inside, (N, N))\n coords_add = np.tile(coords_add, (len(inside), 1)) ## stack all differences to add to inside\n inside_tile = np.tile(inside, (coords_add.shape[1],1)).T ## stack all inside to add to differences\n all_coords = inside_tile + coords_add### a matrix of len(inside) x (k/2)**2 with all coordinates in a k x k square around a certain coordinate\n unrav_coords = np.unravel_index(all_coords, (N, N)) ## unraveled coordinates of all coordinates\n sum_sin_psi = np.sum(np.sin(image[unrav_coords]), axis = 1) ## sum over a sin (psi) over a k x k square\n sum_cos_psi = np.sum(np.cos(image[unrav_coords]), axis = 1) ## sum over a cos (psi) over a k x k square\n psi_app = np.arctan2(sum_sin_psi, sum_cos_psi)\n filt_psi[np.unravel_index(inside, (N, N))] = psi_app \n\n #### top layers\n for i in range(k/2):\n ## for indices directly above the \"inside square\"\n top = (jj[i, k/2:N-(k/2)].flatten(), ii[i, k/2: N - (k/2)].flatten())\n coords_add = (krange_tile + k_tile)[(k/2)-i:, :].flatten()\n top = np.ravel_multi_index(top, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n top_tile = np.tile(top, (coords_add.shape[1],1)).T\n top_coords = top_tile + coords_add\n unrav_coords = np.unravel_index(top_coords, (N, N))\n sum_sin_top = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_top = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_top = np.arctan2(sum_sin_top, sum_cos_top)\n filt_psi[np.unravel_index(top, (N, N))] = psi_top\n\n ## indices directly below the \"inside square\"\n bot = (jj[N- 1 - i, k/2:N-(k/2)].flatten(), ii[N-1-i, k/2: N - (k/2)].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:(k/2) + 1 + i, :].flatten()\n bot = np.ravel_multi_index(bot, (N, N))\n coords_add = np.tile(coords_add, (len(top), 1))\n bot_tile = np.tile(bot, (coords_add.shape[1],1)).T\n bot_coords = bot_tile + coords_add\n unrav_coords = np.unravel_index(bot_coords, (N, N))\n sum_sin_bot = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_bot = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_bot = np.arctan2(sum_sin_bot, sum_cos_bot)\n filt_psi[np.unravel_index(bot, (N, N))] = psi_bot\n\n ## indices directly left of the \"inside square\"\n left = (jj[k/2:N-(k/2), i].flatten(), ii[k/2:N-(k/2), i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, (k/2)-i:].flatten()\n left = np.ravel_multi_index(left, (N, N))\n coords_add = np.tile(coords_add, (len(left), 1))\n left_tile = np.tile(left, (coords_add.shape[1],1)).T\n left_coords = left_tile + coords_add\n unrav_coords = np.unravel_index(left_coords, (N, N))\n sum_sin_left = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_left = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_left = np.arctan2(sum_sin_left, sum_cos_left)\n filt_psi[np.unravel_index(left, (N, N))] = psi_left\n\n ## indices directly left of the \"inside square\"\n right = (jj[k/2:N-(k/2), N - 1 - i].flatten(), ii[k/2:N-(k/2), N - 1 - i].flatten()) ## starting at the bottom working inwards\n coords_add = (krange_tile + k_tile)[:, :(k/2)+1+i].flatten()\n right = np.ravel_multi_index(right, (N, N))\n coords_add = np.tile(coords_add, (len(right), 1))\n right_tile = np.tile(right, (coords_add.shape[1],1)).T\n right_coords = right_tile + coords_add\n unrav_coords = np.unravel_index(right_coords, (N, N))\n sum_sin_right = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_right = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_right = np.arctan2(sum_sin_right, sum_cos_right)\n filt_psi[np.unravel_index(right, (N, N))] = psi_right\n \n ## calculate boundaries diagonals\n left_t, right_t, left_b, right_b = (i, i), (i, -1 -i), (-1 - i, i), (-1 - i, -1 - i) \n left_t, right_t, left_b, right_b = (jj[left_t], ii[left_t]), (jj[right_t], ii[right_t]), (jj[left_b], ii[left_b]), (jj[right_b], ii[right_b])\n left_t, right_t, left_b, right_b = np.ravel_multi_index(left_t, (N, N)), np.ravel_multi_index(right_t, (N, N)), np.ravel_multi_index(left_b, (N, N)), np.ravel_multi_index(right_b, (N, N))\n coord_mat = krange_tile + k_tile\n coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb = coord_mat[(k/2)-i:, (k/2)-i:].flatten(), coord_mat[(k/2)-i:, :(k/2)+1+i].flatten(), coord_mat[:(k/2)+i+1, (k/2)-i:].flatten(), coord_mat[:(k/2)+i+1, :(k/2)+i+1].flatten()\n coords_add_tot = np.vstack((coords_add_lt, coords_add_rt, coords_add_lb, coords_add_rb))\n lt_tile, rt_tile, lb_tile, rb_tile = np.tile(left_t, (coords_add_lt.shape[0],1)).T, np.tile(right_t, (coords_add_lt.shape[0],1)).T, np.tile(left_b, (coords_add_lt.shape[0],1)).T, np.tile(right_b, (coords_add_lt.shape[0],1)).T\n coords_tile_tot = np.squeeze(np.stack((lt_tile, rt_tile, lb_tile, rb_tile)))\n coords_tot = coords_add_tot + coords_tile_tot\n unrav_coords = np.unravel_index(coords_tot, (N, N))\n sum_sin_diag = np.sum(np.sin(image[unrav_coords]), axis = 1)\n sum_cos_diag = np.sum(np.cos(image[unrav_coords]), axis = 1)\n psi_diag = np.arctan(sum_sin_diag, sum_cos_diag)\n filt_psi[np.unravel_index(np.stack((left_t, right_t, left_b, right_b)), (N, N))] = psi_diag\n\n return filt_psi",
"def find_components(image,deltaPix,lens_rad_arcsec = 6.0,lens_rad_ratio = None,\n center_x = None,center_y = None, gal_rad_ratio = 0.1,\n min_size_arcsec=0.7,thresh=0.5, many_sources = True,\n show_locations=False, title = None):\n\n # convert minimum component size in pixel units\n min_size = int(min_size_arcsec / deltaPix)\n \n #Convert lens radius and central galaxy radius to pixels\n if lens_rad_ratio == None:\n lens_rad = int(lens_rad_arcsec / deltaPix)\n else: lens_rad = int(len(image) * lens_rad_ratio)\n gal_rad = int(len(image) * gal_rad_ratio)\n \n \n# im2[im2 < im2.min() + 10.*thresh] = 0.\n \n # downscale source image to data resolution (for speed + easier for converting to data units)\n #down = image_util.re_size(image, factor=supersampling_factor_source)\n \n # apply laplacian of gaussian (LoG) filter to enhance maxima\n LoG = - gaussian_laplace(deepcopy(image), sigma = min_size, mode='constant', cval=0.) \n \n# LoG = - gaussian_laplace(deepcopy(im2), sigma = 2., mode='constant', cval=0.)\n \n filtered = deepcopy(LoG)\n \n# print(LoG.min(),LoG.max(),np.abs(LoG.min()) + thresh )\n \n# print(type(filtered))\n \n #background mean and std of filtered image \n corners = np.zeros([4,5,5])\n corners[0] = LoG[0:5,0:5]\n corners[1] = LoG[-5:,0:5]\n corners[2] = LoG[0:5,-5:]\n corners[3] = LoG[-5:,-5:]\n means = []\n stds = []\n for c in corners:\n mn,med,s = sigma_clipped_stats(c,sigma=3.0)\n means.append(mn)\n stds.append(s)\n \n stds=np.array(stds)\n means = np.array(means)\n means_std = np.std(means)\n# means_good = means[(means >= means.mean() - 1.0 * means_std) & (means <= means.mean() + 1.0 * means_std)]\n means_good = means[(np.abs(means) <= np.abs(means).min() + 1.0 * means_std)]\n mean_bg = np.mean(means_good)\n std_bg = np.mean(stds[(np.abs(means) <= np.abs(means).min() + 1.0 * means_std)])\n# print('LoG means: {}, Log means std: {}, Log means good: {}, LoG avg mean: {}'.format(means,means_std,means_good,mean_bg))\n# print('min: {}, max: {}, cut: {}'.format(LoG.min(),LoG.max(),mean_bg + thresh))\n# print(LoG.min(),LoG.max(),filtered.min() + thresh)\n \n \n # assume all value below max*threshold can not be maxima, so put all to zero\n# filtered[filtered < thresh*filtered.max()] = 0.\n \n# assume all value below min*threshold can not be maxima, so put all to zero\n# filtered[filtered < filtered.min() + thresh * np.abs(filtered.min())] = 0.\n# filtered[filtered < mean_bg + thresh] = 0.\n filtered[filtered < mean_bg + 6.*std_bg] = 0. #set pixels below the mean + 6x threshold to 0\n \n # find coordinates of local maxima\n #print(int(0.5 * min_size))\n max_idx_2d_small = peak_local_max(filtered, min_distance=0) #All bright pixels\n max_idx_2d_large = peak_local_max(filtered, min_distance=1) #peaks with min size of 1 pixel\n \n x_list_small, y_list_small = max_idx_2d_small[:, 1], max_idx_2d_small[:, 0]\n x_list_large, y_list_large = max_idx_2d_large[:, 1], max_idx_2d_large[:, 0]\n \n im_center_x, im_center_y = len(image) / 2., len(image) / 2. #center of image\n \n if (center_x == None) & (center_y == None):\n new_center_x, new_center_y = im_center_x,im_center_y\n else:\n new_center_x, new_center_y = center_x,center_y #new \"center\" = location of lens galaxy\n \n \n #distance of each detected peak from center\n R_small = np.sqrt((x_list_small - new_center_x)**2 + (y_list_small - new_center_y)**2) \n R_large = np.sqrt((x_list_large - new_center_x)**2 + (y_list_large - new_center_y)**2)\n \n #Contaminant light is only bright pixels further from center than lens_rad\n x_sats, y_sats = x_list_small[R_small > lens_rad], y_list_small[R_small > lens_rad]\n \n if many_sources:\n x_lens, y_lens = deepcopy(x_list_small), deepcopy(y_list_small)\n else:\n x_lens, y_lens = deepcopy(x_list_large), deepcopy(y_list_large)\n \n# x_lens, y_lens = x_list_small[R_small <= lens_rad], y_list_small[R_small <= lens_rad]\n \n if (len(x_lens) == 0) & (len(y_lens) == 0):\n x_lens = [0,15]\n y_lens = [0,15]\n \n sources = QTable([x_lens, y_lens],names={'x_local_peak','y_local_peak'}) #make table of all detected objects\n# print(x_list_large)\n# print(y_list_large)\n# print(sources)\n \n # show maxima on image for debug\n \n if show_locations:\n# fig = plt.figure(figsize=(4, 4))\n #plt.imshow(image, origin='lower', cmap=cmap_flux, norm=LogNorm(1e-2))\n \n f, axes = plt.subplots(1, 5, figsize=(20,5), sharex=False, sharey=False)\n# plt.figure(figsize = (8,8))\n# plt.subplot(1,2,1)\n \n axes[0].imshow(image, origin='lower', norm=SymLogNorm(5))\n axes[0].set_title('Image')\n axes[0].set_axis_off()\n \n \n axes[1].imshow(LoG, origin='lower', norm=SymLogNorm(5))\n axes[1].set_title('LoG Filtered Image')\n axes[1].set_axis_off()\n\n# plt.subplot(1,2,2)\n axes[2].imshow(filtered, origin='lower', norm=SymLogNorm(5))\n axes[2].set_title('Final Filtered Image')\n axes[2].set_axis_off()\n \n axes[3].imshow(image, origin='lower', norm=SymLogNorm(5))\n for i in range(len(x_lens)):\n axes[3].scatter([x_lens[i]], [y_lens[i]], c='red', s=60, marker='+')\n \n for i in range(len(x_list_large)):\n axes[3].scatter([x_list_large[i]], [y_list_large[i]], c='black', s=100, marker='x')\n axes[3].set_title('Detected Objects')\n axes[3].set_axis_off()\n \n axes[4].imshow(image, origin='lower', norm=SymLogNorm(5))\n \n for i in range(len(x_sats)):\n axes[4].scatter([x_sats[i]], [y_sats[i]], c='red', s=60, marker='+')\n \n# plt.annotate(i+1, (x_list[i], y_list[i]), color='black')\n \n# for i in range(len(x_mask)):\n# plt.scatter([x_mask[i]], [y_mask[i]], c='red', s=100, marker='*')\n# plt.annotate(i+1, (x_mask[i], y_mask[i]), color='red')\n axes[4].scatter(new_center_x, new_center_y,c='red', s=100, marker='*')\n \n draw_lens_circle = Circle((new_center_x, new_center_y),lens_rad ,fill=False)\n draw_gal_circle = Circle((new_center_x, new_center_y),gal_rad, fill = False)\n# plt.gcf().gca().add_artist(draw_lens_circle)\n# plt.gcf().gca().add_artist(draw_gal_circle)\n axes[4].add_patch(draw_lens_circle)\n# axes[4].add_patch(draw_gal_circle)\n \n axes[4].set_title('Pixels to Mask: \\n r = {:.3f}'.format(lens_rad_arcsec))\n axes[4].text(1, 1, \"detected components\", color='red')\n axes[4].set_axis_off()\n \n if title != None:\n f.suptitle(title, fontsize = 15)\n# plt.show()\n \n \n return (x_sats, y_sats), (new_center_x, new_center_y), sources",
"def at_binPhaseFold(self,ph,bwmin):\n\t # Default dtype\n\t dtype = [('count', '<f8'), \n\t ('mean', '<f8'), \n\t ('std', '<f8'), \n\t ('med', '<f8'), \n\t ('tb', '<f8')] \n\n\t key = 'lcPF%i' % ph\n\t d = dict(ph=ph,bwmin=bwmin,key=key)\n\t desc = 'Binned %(key)s light curve ph=%(ph)i, binsize=%(bwmin)i' % d\n\t name = 'blc%(bwmin)iPF%(ph)i' % d\n\n\t assert hasattr(self,key),'Must run at_phaseFold first' \n\t lcPF = getattr(self,key)\n\t lcPF = pd.DataFrame(lcPF['tPF f'.split()])\n \n\t if len(lcPF) < 2:\n\t print(\"Phase-folded photometry has less than 2 valid values\")\n\t print(\"Adding in place holder array and terminating\") \n\t blcPF = np.zeros(2,dtype)\n\t self.add_dset(name,blcPF,description=desc) \n\t return None\n\n\t # Add a tiny bit to xma to get the last element\n\t bw = bwmin / 60./24. # converting minutes to days\n\t xmi,xma = lcPF.tPF.min(),lcPF.tPF.max() \n\t nbins = int( np.round( (xma-xmi)/bw ) )\n\t bins = np.linspace(xmi-0.001,xma,nbins+1)\n\t tb = 0.5*(bins[1:]+bins[:-1])\n\n\t # Compute info along columns\n\t g = lcPF.groupby(pd.cut(lcPF.tPF,bins))\n\t blcPF = g['f'].agg([np.size, np.mean, np.std, np.median])\n\t blcPF['tb'] = tb\n\t blcPF = blcPF.rename(columns={'size':'count','median':'med'})\n\t blcPF = blcPF.dropna()\n\t blcPF = blcPF.to_records(index=False)\n\t self.add_dset(name,blcPF,description=desc)",
"def at_binPhaseFold(self,ph,bwmin):\n\t # Default dtype\n\t dtype = [('count', '<f8'), \n\t ('mean', '<f8'), \n\t ('std', '<f8'), \n\t ('med', '<f8'), \n\t ('tb', '<f8')] \n\n\t key = 'lcPF%i' % ph\n\t d = dict(ph=ph,bwmin=bwmin,key=key)\n\t desc = 'Binned %(key)s light curve ph=%(ph)i, binsize=%(bwmin)i' % d\n\t name = 'blc%(bwmin)iPF%(ph)i' % d\n\n\t assert hasattr(self,key),'Must run at_phaseFold first' \n\t lcPF = getattr(self,key)\n\t lcPF = pd.DataFrame(lcPF['tPF f'.split()])\n \n\t if len(lcPF) < 2:\n\t print(\"Phase-folded photometry has less than 2 valid values\")\n\t print(\"Adding in place holder array and terminating\") \n\t blcPF = np.zeros(2,dtype)\n\t self.add_dset(name,blcPF,description=desc) \n\t return None\n\n\t # Add a tiny bit to xma to get the last element\n\t bw = bwmin / 60./24. # converting minutes to days\n\t xmi,xma = lcPF.tPF.min(),lcPF.tPF.max() \n\t nbins = int( np.round( (xma-xmi)/bw ) )\n\t bins = np.linspace(xmi-0.001,xma,nbins+1)\n\t tb = 0.5*(bins[1:]+bins[:-1])\n\n\t # Compute info along columns\n\t g = lcPF.groupby(pd.cut(lcPF.tPF,bins))\n\t blcPF = g['f'].agg([np.size, np.mean, np.std, np.median])\n\t blcPF['tb'] = tb\n\t blcPF = blcPF.rename(columns={'size':'count','median':'med'})\n\t blcPF = blcPF.dropna()\n\t blcPF = blcPF.to_records(index=False)\n\t self.add_dset(name,blcPF,description=desc)",
"def identify_leaflets(u, time_ts):\n z = u.select_atoms(\"all\").center_of_geometry()[2]\n COM_z= np.array([0,0,z]) #defines the global midplane position along z\n x, y, z = u.trajectory.ts.triclinic_dimensions[0][0], u.trajectory.ts.triclinic_dimensions[1][1], u.trajectory.ts.triclinic_dimensions[2][2]\n box = np.array([x, y, z, 90, 90, 90]) \n ### Determining side of the bilayer CHOL belongs to in this frame\n lipid1 = 'CHL'\n lipid2 = 'DLIP'\n lipid3 = 'SSM'\n lipid4 = 'DSPC'\n \n lpd1_atoms = u.select_atoms('resname %s and name O2'%lipid1) \n lpd2_atoms = u.select_atoms('resname %s and name P '%lipid2) \n lpd3_atoms = u.select_atoms('resname %s and name P '%lipid3) \n lpd4_atoms = u.select_atoms('resname %s and name P '%lipid4)\n \n num_lpd2 = lpd2_atoms.n_atoms\n num_lpd3 = lpd3_atoms.n_atoms\n num_lpd4 = lpd4_atoms.n_atoms \n # atoms in the upper leaflet as defined by insane.py or the CHARMM-GUI membrane builders\n # select cholesterol headgroups within 1.5 nm of lipid headgroups in the selected leaflet\n # this must be done because CHOL rapidly flip-flops between leaflets\n # so we must assign CHOL to each leaflet at every time step, and in large systems\n # with substantial membrane undulations, a simple cut-off in the z-axis just will not cut it\n if side == 'up':\n lpd2i = lpd2_atoms[:int((num_lpd2)/2)]\n lpd3i = lpd3_atoms[:int((num_lpd3)/2)]\n lpd4i = lpd4_atoms[:int((num_lpd4)/2)]\n \n\n lipids = lpd2i + lpd3i + lpd4i \n\n ns_lipids = NS.AtomNeighborSearch(lpd1_atoms, box=box) \n lpd1i = ns_lipids.search(lipids,15.0) #1.5 nm\n leaflet = lpd1i + lpd2i + lpd3i + lpd4i \n\n elif side == 'down':\n lpd2i = lpd2_atoms[int((num_lpd2)/2):]\n lpd3i = lpd3_atoms[int((num_lpd3)/2):]\n lpd4i = lpd4_atoms[int((num_lpd4)/2):]\n\n lipids = lpd2i + lpd3i + lpd4i #+ lpd3i\n \n ns_lipids = NS.AtomNeighborSearch(lpd1_atoms, box=box)\n lpd1i = ns_lipids.search(lipids,15.0) # 1.5nm\n leaflet = lpd1i + lpd2i + lpd3i+ lpd4i \n return lpd1i, lpd2i, lpd3i, lpd4i, COM_z, box, leaflet",
"def plotVolumeContours(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Particle Positions Colored by Voronoi Volumes', fontsize=22)\n ax.set_xlabel('x [m]', fontsize=18)\n ax.set_ylabel('y [m]', fontsize=18)\n ax.set_zlabel('z [m]', fontsize=18)\n pos = ax.scatter(self.data[self.nonB, 0], self.data[self.nonB, 1], self.data[self.nonB, 2], s=10, c=self.volumes, cmap='plasma')\n cbar = fig.colorbar(pos, ax=ax)\n cbar.ax.tick_params(labelsize=15)",
"def belt(image):\n\n # Belt Detector\n x, y = circular_detector(image, 70, 80)\n\n return x, y",
"def plotGlassbrainSlices(niftipath, mnipath, ortho='z', nRows=2, nCuts=6,\n threshpos=0, threshneg=0, figLayout='Both',\n showLRannot=True, findOptimalCut=True,\n imageType='svg'):\n\n # Initiation of relevant parameters\n img = nb.load(niftipath)\n lineW = 2. / (nRows + int((figLayout == 'Brain' or figLayout == 'Both')))\n\n # Reduce 4D volume to 3D\n if len(img.shape) == 4:\n data4D = img.get_data()\n data4D = data4D.reshape(data4D.shape[:-1])\n img = Nifti1Image(data4D, img.get_affine())\n\n # Get voxel extend in all directions\n dirMin = np.dot(img.get_affine(), [0, 0, 0, 1])[:3]\n dirMax = np.dot(img.get_affine(),\n np.array(img.shape).tolist() + [1])[:3]\n\n if findOptimalCut:\n # Find cuts automatically\n cut_coords = find_cut_slices(img, direction=ortho, n_cuts=nCuts)\n else:\n # Split orientation in x-equal parts\n cut_coords = getEqualSpacing(dirMin, dirMax, ortho, nCuts)\n\n # Split cuts according nRows\n cut_coords = [cut_coords[int(i * len(cut_coords) / np.float(nRows)):\n int((i + 1) * len(cut_coords) / np.float(nRows))]\n for i in range(nRows)]\n\n # Create Slices\n for i in range(nRows):\n\n # Create axes for plotting\n ax = plt.subplot(nRows + int((figLayout == 'Brain' or\n figLayout == 'Both')),\n 1, i + 1)\n\n # Plot the white background for all slices as a zeros value brain\n # (without it, the view focuses around the first area plotted)\n zerobrain = Nifti1Image(img.get_data() * 0, img.get_affine())\n brain = plot_roi(\n zerobrain, zerobrain, colorbar=False, cut_coords=cut_coords[i],\n display_mode=ortho, alpha=1, draw_cross=False, cmap=plt.cm.gray,\n black_bg=False, axes=ax, annotate=False)\n\n # Plot positive values\n posdata = np.copy(img.get_data())\n posdata[posdata <= threshpos] = 0.001 # = 0 crashes contour function\n posbrain = Nifti1Image(posdata, img.get_affine())\n brain.add_contours(\n posbrain, filled=False, cmap=plt.cm.hot, alpha=1, linewidths=lineW)\n\n # Plot negative values\n negdata = np.copy(img.get_data())\n negdata[negdata >= -threshneg] = 0.001 # = 0 crashes contour function\n negbrain = Nifti1Image(negdata, img.get_affine())\n brain.add_contours(\n negbrain, filled=False, cmap=plt.cm.winter, alpha=1,\n linewidths=lineW)\n\n # Plot outer MNI contours\n brain.add_contours(\n smooth_img(mnipath, 4), alpha=1, filled=False,\n levels=[100], linewidths=lineW, cmap=plt.cm.gray)\n\n # Plot inner MNI contours\n brain.add_contours(\n nb.load(mnipath), alpha=0.8, levels=[5000], linewidths=lineW,\n cmap=plt.cm.gray)\n\n # Add annotation if requested\n if figLayout == 'Both' or figLayout == 'Number':\n brain.annotate(left_right=showLRannot, size=int(12 * lineW))\n\n # Plot overview Brain at the bottom\n if figLayout == 'Brain' or figLayout == 'Both':\n\n # Create axes for overview brain\n ax = plt.subplot(nRows + 1, 1, nRows + 1)\n\n # Find overview view direction\n if ortho == 'z':\n direction = 'x'\n elif ortho == 'x':\n direction = 'z'\n elif ortho == 'y':\n direction = 'z'\n\n # Plot the white backgroundas a zeros value brain\n brain = plot_roi(\n zerobrain, zerobrain, colorbar=False, cut_coords=[0],\n display_mode=direction, alpha=1, draw_cross=False,\n cmap=plt.cm.gray, black_bg=False, axes=ax, annotate=False)\n\n # Plot positive values\n brain.add_contours(\n posbrain, filled=False, cmap=plt.cm.hot, alpha=1, linewidths=lineW)\n\n # Plot negative values\n brain.add_contours(\n negbrain, filled=False, cmap=plt.cm.winter, alpha=1,\n linewidths=lineW)\n\n # Plot outer MNI contours\n brain.add_contours(\n smooth_img(mnipath, 4), alpha=1, filled=False,\n levels=[100], linewidths=lineW, cmap=plt.cm.gray)\n\n # Plot inner MNI contours\n brain.add_contours(\n nb.load(mnipath), alpha=0.8, levels=[5000], linewidths=lineW,\n cmap=plt.cm.gray)\n\n # Plot the line indicating the cut\n for i in np.array(cut_coords).flatten():\n if ortho == 'z' or ortho == 'y':\n ax.plot([-100, 100], [i, i], 'k-', lw=lineW)\n elif ortho == 'x':\n ax.plot([i, i], [-100, 100], 'k-', lw=lineW)\n\n if ortho == 'z':\n ax.axis((-300.0, 300.0, dirMin[2], dirMax[2]))\n elif ortho == 'y':\n ax.axis((-300.0, 300.0, dirMin[1], dirMax[1]))\n elif ortho == 'x':\n stretcher = (nRows + 1) / 2.\n ax.axis((-300.0 * stretcher, 300.0 * stretcher, -100.0, 100.0))\n\n # Add annotation if requested\n if figLayout == 'Both' or figLayout == 'Number':\n brain.annotate(left_right=showLRannot, size=int(12 * lineW))\n\n # Get file prefix\n if niftipath.endswith('.nii'):\n filename = opb(niftipath)[:-4]\n elif niftipath.endswith('.nii.gz'):\n filename = opb(niftipath)[:-7]\n\n # Create output folder\n path2Figure = opj(os.path.split(os.path.realpath(niftipath))[0], 'figures')\n if not os.path.exists(opj(path2Figure)):\n os.makedirs(opj(path2Figure))\n\n # Save figure\n figname = '_'.join([filename, '%s-cut' % ortho])\n plt.savefig(opj(path2Figure, '%s.%s' % (figname, imageType)))\n plt.clf()",
"def add_phase_interconnections(net, snow_partitioning_n, voxel_size=1,\n marching_cubes_area=False,\n alias=None):\n # -------------------------------------------------------------------------\n # Get alias if provided by user\n im = snow_partitioning_n.im\n al = _create_alias_map(im, alias=alias)\n # -------------------------------------------------------------------------\n # Find interconnection and interfacial area between ith and jth phases\n conns1 = net['throat.conns'][:, 0]\n conns2 = net['throat.conns'][:, 1]\n label = net['pore.label'] - 1\n\n num = snow_partitioning_n.phase_max_label\n num = [0, *num]\n phases_num = np.unique(im * 1)\n phases_num = np.trim_zeros(phases_num)\n for i0, i1 in enumerate(phases_num):\n loc1 = np.logical_and(conns1 >= num[i0], conns1 < num[i0 + 1])\n loc2 = np.logical_and(conns2 >= num[i0], conns2 < num[i0 + 1])\n loc3 = np.logical_and(label >= num[i0], label < num[i0 + 1])\n net['throat.{}'.format(al[i1])] = loc1 * loc2\n net['pore.{}'.format(al[i1])] = loc3\n if i1 == phases_num[-1]:\n loc4 = np.logical_and(conns1 < num[-1], conns2 >= num[-1])\n loc5 = label >= num[-1]\n net['throat.boundary'] = loc4\n net['pore.boundary'] = loc5\n for j0, j1 in enumerate(phases_num):\n if j0 > i0:\n pi_pj_sa = np.zeros_like(label, dtype=float)\n loc6 = np.logical_and(conns2 >= num[j0], conns2 < num[j0 + 1])\n pi_pj_conns = loc1 * loc6\n net['throat.{}_{}'.format(al[i1], al[j1])] = pi_pj_conns\n if any(pi_pj_conns):\n # ---------------------------------------------------------\n # Calculates phase[i] interfacial area that connects with\n # phase[j] and vice versa\n p_conns = net['throat.conns'][:, 0][pi_pj_conns]\n s_conns = net['throat.conns'][:, 1][pi_pj_conns]\n ps = net['throat.area'][pi_pj_conns]\n p_sa = np.bincount(p_conns, ps)\n # trim zeros at head/tail position to avoid extra bins\n p_sa = np.trim_zeros(p_sa)\n i_index = np.arange(min(p_conns), max(p_conns) + 1)\n j_index = np.arange(min(s_conns), max(s_conns) + 1)\n s_pa = np.bincount(s_conns, ps)\n s_pa = np.trim_zeros(s_pa)\n pi_pj_sa[i_index] = p_sa\n pi_pj_sa[j_index] = s_pa\n # ---------------------------------------------------------\n # Calculates interfacial area using marching cube method\n if marching_cubes_area:\n ps_c = net['throat.area'][pi_pj_conns]\n p_sa_c = np.bincount(p_conns, ps_c)\n p_sa_c = np.trim_zeros(p_sa_c)\n s_pa_c = np.bincount(s_conns, ps_c)\n s_pa_c = np.trim_zeros(s_pa_c)\n pi_pj_sa[i_index] = p_sa_c\n pi_pj_sa[j_index] = s_pa_c\n net[f'pore.{al[i1]}_{al[j1]}_area'] = pi_pj_sa * voxel_size ** 2\n return net",
"def showComponents(self, mask):\n\n from skimage import measure\n\n thresh = cv2.threshold(mask, 0, 255, cv2.THRESH_BINARY)[1]\n labels = measure.label(thresh, neighbors=8, background=0)\n for label in range(0,len(labels)):\n img = np.zeros(mask.shape)\n # if this is the background label, ignore it\n if label == 0:\n continue\n img[labels==label]=255\n numPixels = cv2.countNonZero(img)\n\n \t# if the number of pixels in the component is sufficiently\n \t# large, then add it to our mask of \"large blobs\"\n if numPixels > 500:\n showme(img, 'Contour '+str(label))",
"def refine_droplet(\n phase_field: ScalarField,\n droplet: DiffuseDroplet,\n least_squares_params: Optional[Dict[str, Any]] = None,\n) -> DiffuseDroplet:\n assert isinstance(phase_field, ScalarField)\n if least_squares_params is None:\n least_squares_params = {}\n\n if not isinstance(droplet, DiffuseDroplet):\n droplet = DiffuseDroplet.from_droplet(droplet)\n if droplet.interface_width is None:\n droplet.interface_width = phase_field.grid.typical_discretization\n\n # enlarge the mask to also contain the shape change\n mask = droplet._get_phase_field(phase_field.grid, dtype=np.bool_)\n dilation_iterations = 1 + int(2 * droplet.interface_width)\n mask = ndimage.morphology.binary_dilation(mask, iterations=dilation_iterations)\n\n # apply the mask\n data_mask = phase_field.data[mask]\n\n # determine the coordinate constraints and only vary the free data points\n data_flat = structured_to_unstructured(droplet.data) # unstructured data\n dtype = droplet.data.dtype\n free = np.ones(len(data_flat), dtype=np.bool_)\n free[phase_field.grid.coordinate_constraints] = False\n\n # determine data bounds\n l, h = droplet.data_bounds\n bounds = l[free], h[free]\n\n def _image_deviation(params):\n \"\"\"helper function evaluating the residuals\"\"\"\n # generate the droplet\n data_flat[free] = params\n droplet.data = unstructured_to_structured(data_flat, dtype=dtype)\n droplet.check_data()\n img = droplet._get_phase_field(phase_field.grid)[mask]\n return img - data_mask\n\n # do the least square optimization\n result = optimize.least_squares(\n _image_deviation, data_flat[free], bounds=bounds, **least_squares_params\n )\n data_flat[free] = result.x\n droplet.data = unstructured_to_structured(data_flat, dtype=dtype)\n\n # normalize the droplet position\n grid = phase_field.grid\n coords = grid.point_from_cartesian(droplet.position)\n droplet.position = grid.point_to_cartesian(grid.normalize_point(coords))\n\n return droplet",
"def yellow_points(self, plan, dlvl_sag_img, rphase_sag_img, root_cor_sequence, cor_sequence, sag_sequence):\n # print(\">>>> Sag. Seq: {}\".format(sag_sequence))\n # print(\">>>> Cor. Seq: {}\".format(cor_sequence))\n\n lpatterns = [x for x in self.dataset if int(x.split('-')[1]) == cor_sequence]\n\n # column = self.cor_sequences.index(cor_sequence)\n\n pattern = [p for p in lpatterns if int(p.split('-')[3]) == sag_sequence][0]\n\n pts_pattern = self.pattern_coronal('{}.png'.format(pattern))\n diaph_lvl = [max(x) for x in self.diaphragmatic_level_coronal(pts_pattern)]\n resp_phase = self.respiratory_phase_coronal(self.diaphragmatic_level_coronal(pts_pattern))\n\n index_imgs_registered = list() # Store index of the coronal registered images\n for index, i in enumerate(diaph_lvl):\n if i == dlvl_sag_img[self.cor_sequences.index(cor_sequence)]:\n index_imgs_registered.append(index)\n # print(\"(Step 3) Index of registered images: {} ({})\\n\".format(index_imgs_registered, len(index_imgs_registered)))\n\n for index, i in enumerate(resp_phase):\n if index in index_imgs_registered:\n if resp_phase[index] != rphase_sag_img[self.cor_sequences.index(cor_sequence)]:\n index_imgs_registered.remove(index)\n # print(\"(Step 3) Index of registered images: {} ({})\\n\".format(index_imgs_registered, len(index_imgs_registered)))\n\n return index_imgs_registered",
"def locate_droplets_in_mask(grid: GridBase, mask: np.ndarray) -> Emulsion:\n if isinstance(grid, CartesianGridBase):\n return _locate_droplets_in_mask_cartesian(grid, mask)\n elif isinstance(grid, SphericalSymGridBase):\n return _locate_droplets_in_mask_spherical(grid, mask)\n elif isinstance(grid, CylindricalSymGrid):\n return _locate_droplets_in_mask_cylindrical(grid, mask)\n elif isinstance(grid, GridBase):\n raise NotImplementedError(f\"Locating droplets is not possible for grid {grid}\")\n else:\n raise ValueError(f\"Invalid grid {grid}\")",
"def apply(self, mode='lateral'):\n num_lat_slices = self.img3d.shape[0]\n num_cor_slices = self.img3d.shape[2]\n bin_mask = np.zeros(self.mask3d.shape)\n x,y,z = np.where(self.mask3d==self.vertebra_id)\n bin_mask[np.min(x):np.max(x), np.min(y):np.max(y), np.min(z):np.max(z)] = 1\n if mode=='lateral' or mode=='fuse':\n mask_lat = np.zeros((6, self.mask3d.shape[0], self.mask3d.shape[1], self.mask3d.shape[2]))\n img_lat = np.zeros(self.img3d.shape)\n binary_lat = np.zeros(self.mask3d.shape)\n # for each lateral slice\n for idx in range(num_lat_slices):\n img_slice, mask_slice = np.copy(self.img3d[idx, :, :]), np.copy(self.mask3d[idx, :, :])\n xloc, yloc = np.where(mask_slice==self.vertebra_id)\n # check if vertebra is present in image\n if xloc.shape[0]==0:\n # if not keep mask as it is\n mask_lat[:,idx, :, :] = self.get_one_hot(mask_slice)\n img_lat[idx, :, :] = img_slice\n else:\n min_x, max_x = np.min(xloc), np.max(xloc)\n min_y, max_y = np.min(yloc), np.max(yloc)\n inpainted_img, inpainted_mask, binary_mask = self.inpaint(img_slice, mask_slice, min_x, max_x, min_y, max_y)\n mask_lat[:,idx, :, :] = inpainted_mask\n img_lat[idx,:, :] = inpainted_img\n binary_lat[idx,:,:] = binary_mask\n\n\n if mode=='coronal' or mode=='fuse':\n mask_cor = np.zeros((6, self.mask3d.shape[0], self.mask3d.shape[1], self.mask3d.shape[2]))\n img_cor = np.zeros(self.img3d.shape)\n binary_cor = np.zeros(self.mask3d.shape)\n # for each coronal slice\n for idx in range(num_cor_slices):\n img_slice, mask_slice = np.copy(self.img3d[:, :, idx]), np.copy(self.mask3d[:, :, idx])\n xloc, yloc = np.where(mask_slice==self.vertebra_id)\n # check if vertebra is present in image\n if xloc.shape[0]==0:\n # if not keep mask as it is\n mask_cor[:, :, :, idx] = self.get_one_hot(mask_slice)\n img_cor[:, :, idx] = img_slice\n else:\n min_x, max_x = np.min(xloc), np.max(xloc)\n min_y, max_y = np.min(yloc), np.max(yloc)\n # else remove fractured vertebra and inpaint\n inpainted_img, inpainted_mask, binary_mask = self.inpaint(img_slice, mask_slice, min_x, max_x, min_y, max_y, 'coronal')\n mask_cor[:, :, :, idx] = inpainted_mask\n img_cor[:, :, idx] = inpainted_img\n binary_cor[:,:,idx] = binary_mask\n \n # return to a one channel mask and convert labels back\n if mode=='lateral':\n mask_lat = np.argmax(mask_lat, axis=0)\n mask_lat = self.map_class_to_vert(mask_lat)\n self.mask3d = mask_lat\n self.img3d = img_lat\n elif mode=='coronal':\n mask_cor = np.argmax(mask_cor, axis=0)\n mask_cor = self.map_class_to_vert(mask_cor)\n self.mask3d = mask_cor\n self.img3d = img_cor\n elif mode=='fuse':\n mask_fuse = mask_cor*0.5+mask_lat*0.5\n mask_fuse = np.argmax(mask_fuse, axis=0)\n mask_fuse = self.map_class_to_vert(mask_fuse)\n self.mask3d = mask_fuse\n self.img3d = (img_lat+img_cor)/2\n \n # save result\n self.mask3d = self.mask3d.astype(np.uint8)\n self.img3d = self.img3d.astype(np.float32)\n \n # put back if we padded and cropped\n if self.padz and self.padx:\n self.orig_img3d[:,self.ymin:self.ymax, :] = self.img3d[self.xcrop1:-self.xcrop2,:,self.zcrop1:-self.zcrop2]\n self.orig_mask3d[:,self.ymin:self.ymax, :] = self.mask3d[self.xcrop1:-self.xcrop2,:,self.zcrop1:-self.zcrop2]\n elif self.padz and not self.padx:\n self.orig_img3d[self.xcrop1:self.xcrop2,self.ymin:self.ymax, :] = self.img3d[:,:,self.zcrop1:-self.zcrop2]\n self.orig_mask3d[self.xcrop1:self.xcrop2,self.ymin:self.ymax, :] = self.mask3d[:,:,self.zcrop1:-self.zcrop2]\n elif not self.padz and self.padx:\n self.orig_img3d[:,self.ymin:self.ymax, self.zcrop1:self.zcrop2] = self.img3d[self.xcrop1:-self.xcrop2,:,:]\n self.orig_mask3d[:,self.ymin:self.ymax, self.zcrop1:self.zcrop2] = self.mask3d[self.xcrop1:-self.xcrop2,:,:]\n else:\n self.orig_img3d[self.xcrop1:self.xcrop2,self.ymin:self.ymax, self.zcrop1:self.zcrop2] = self.img3d\n self.orig_mask3d[self.xcrop1:self.xcrop2,self.ymin:self.ymax, self.zcrop1:self.zcrop2] = self.mask3d\n \n img = return_scan_to_orig(self.orig_img3d, self.mask_affine, self.mask_header, self.zooms)\n nib.save(img, self.inpainted_img_path)\n\n mask_fuse = return_scan_to_orig(self.orig_mask3d, self.mask_affine, self.mask_header, self.zooms, np.uint8)\n nib.save(mask_fuse, self.inpainted_mask_path)\n print('Inpaint mask and image saved at: ', self.inpainted_mask_path, self.inpainted_img_path)",
"def find_components_old(image,deltaPix,lens_rad_arcsec = 5.0,lens_rad_ratio = None, gal_rad_ratio = 0.1,min_size_arcsec=0.3,thresh=0.4, show_locations=False):\n\n # convert minimum component size in pixel units\n min_size = int(min_size_arcsec / deltaPix)\n \n #Convert lens radius and central galaxy radius to pixels\n if lens_rad_ratio == None:\n lens_rad = int(lens_rad_arcsec / deltaPix)\n else: lens_rad = int(len(image) * lens_rad_ratio)\n gal_rad = int(len(image) * gal_rad_ratio)\n \n # downscale source image to data resolution (for speed + easier for converting to data units)\n #down = image_util.re_size(image, factor=supersampling_factor_source)\n \n # apply laplacian of gaussian (LoG) filter to enhance maxima\n filtered = - gaussian_laplace(deepcopy(image), sigma = min_size, mode='constant', cval=0.)\n \n# print(filtered.min(),filtered.max(),filtered.min() + thresh * np.abs(filtered.min()))\n \n \n # assume all value below max*threshold can not be maxima, so put all to zero\n# filtered[filtered < thresh*filtered.max()] = 0.\n \n# assume all value below min*threshold can not be maxima, so put all to zero\n filtered[filtered < filtered.min() + thresh * np.abs(filtered.min())] = 0.\n \n if show_locations:\n plt.figure(figsize = (8,8))\n plt.subplot(1,2,1)\n plt.imshow(image, origin='lower', norm=SymLogNorm(5))\n plt.title('Image')\n\n plt.subplot(1,2,2)\n plt.imshow(filtered, origin='lower', norm=SymLogNorm(5))\n plt.title('Filtered Image')\n plt.show()\n \n # find coordinates of local maxima\n #print(int(0.5 * min_size))\n max_idx_2d_small = peak_local_max(filtered, min_distance=0)\n max_idx_2d_large = peak_local_max(filtered, min_distance=1)\n \n x_list_small, y_list_small = max_idx_2d_small[:, 1], max_idx_2d_small[:, 0]\n x_list_large, y_list_large = max_idx_2d_large[:, 1], max_idx_2d_large[:, 0]\n \n im_center_x, im_center_y = len(image) / 2., len(image) / 2.\n \n R = np.sqrt((x_list_large - im_center_x)**2 + (y_list_large - im_center_y)**2)\n new_center_x, new_center_y = x_list_large[R < gal_rad], y_list_large[R < gal_rad]\n \n if (len(new_center_x) > 1) and (len(x_list_large[R == R.min()]) ==1 ): \n new_center_x, new_center_y = x_list_large[R == R.min()], y_list_large[R == R.min()]\n elif (len(new_center_x) > 1) and (len(x_list_large[R == R.min()]) > 1 ): \n new_center_x, new_center_y = im_center_x, im_center_y\n elif len(new_center_x) == 0: \n new_center_x, new_center_y = im_center_x, im_center_y\n \n \n R_small = np.sqrt((x_list_small - new_center_x)**2 + (y_list_small - new_center_y)**2)\n R_large = np.sqrt((x_list_large - new_center_x)**2 + (y_list_large - new_center_y)**2)\n \n x_sats, y_sats = x_list_small[R_small > lens_rad], y_list_small[R_small > lens_rad]\n \n # show maxima on image for debug\n if show_locations:\n fig = plt.figure(figsize=(4, 4))\n #plt.imshow(image, origin='lower', cmap=cmap_flux, norm=LogNorm(1e-2))\n plt.imshow(image, origin='lower', norm=SymLogNorm(5))\n \n for i in range(len(x_sats)):\n plt.scatter([x_sats[i]], [y_sats[i]], c='red', s=60, marker='+')\n# plt.annotate(i+1, (x_list[i], y_list[i]), color='black')\n \n# for i in range(len(x_mask)):\n# plt.scatter([x_mask[i]], [y_mask[i]], c='red', s=100, marker='*')\n# plt.annotate(i+1, (x_mask[i], y_mask[i]), color='red')\n plt.scatter(new_center_x, new_center_y,c='red', s=100, marker='*')\n \n draw_lens_circle = Circle((new_center_x, new_center_y),lens_rad ,fill=False)\n draw_gal_circle = Circle((new_center_x, new_center_y),gal_rad, fill = False)\n plt.gcf().gca().add_artist(draw_lens_circle)\n plt.gcf().gca().add_artist(draw_gal_circle)\n plt.title('Detected Components')\n plt.text(1, 1, \"detected components\", color='red')\n fig.axes[0].get_xaxis().set_visible(True); fig.axes[0].get_yaxis().set_visible(True)\n plt.show()\n return (x_sats, y_sats), (new_center_x, new_center_y)",
"def classifyPhaseImage(fr_nb):\n phase_path = os.path.join(\"..\",'data','microglia','Beacon-1 unst',\"Scene1Interval\"+str(fr_nb)+\"_PHASE.png\")\n \n phase= Image.open(phase_path)\n phase = np.asarray(phase)\n X=phase.reshape(-1,1)\n from sklearn.cluster import KMeans\n kmeans = KMeans(n_clusters=3).fit(X)\n classified = kmeans.labels_\n classified=classified.reshape(phase.shape)\n si2(phase,classified,\"Phase image\",\"Classification\")\n return classified",
"def categoryFinder(bassSize):\n from string import Template\n catCollapse = {}\n sliceCount = {}\n theImportantSlices = []\n skippedThings = 0\n corpusSize = 0\n binnedThings = 0\n supersettedThings = 0\n #Load the pickled slices that have not been bass-normalized into types\n theSlices = pickle.load( open ('1122MajModeSliceDictwSDB.pkl', 'rb') )\n for i, slicey in enumerate(theSlices):\n if slicey == ['start'] or slicey == ['end']:\n continue\n #keep count of the total number of slices before reduction\n corpusSize += 1\n if theSlices[i+1] == ['end']:\n continue\n #First, deal with singletons of bass motion 0\n if len(slicey['voicing_type']) == 1 and theSlices[i]['bassMIDI'] - theSlices[i+1]['bassMIDI'] == 0:\n skippedThings += 1\n continue\n #Next, only look at cases where |bass motion| > bassSize\n if abs(theSlices[i+1]['bassMIDI'] - theSlices[i]['bassMIDI']) > bassSize:\n secondSlicePCs = []\n theKey = theSlices[i+1]['key']\n theTonic = str(theKey).split(' ')[0]\n theKeyPC = pitch.Pitch(theTonic).pitchClass\n keyTransPCs = [(n - theKeyPC)%12 for n in theSlices[i+1]['pcset']]\n for n in keyTransPCs:\n secondSlicePCs.append(n)\n firstSlicePCs = []\n theKey = theSlices[i]['key']\n theTonic = str(theKey).split(' ')[0]\n theKeyPC = pitch.Pitch(theTonic).pitchClass\n keyTransPCs = [(n - theKeyPC)%12 for n in theSlices[i]['pcset']]\n for m in keyTransPCs:\n firstSlicePCs.append(m)\n #make sure second thing is superset of first thing\n continueIfZero = 0\n #even one note wrong means no!\n for n in firstSlicePCs:\n if n not in secondSlicePCs:\n continueIfZero += 1\n break\n #If it passes bass motion and superset test, skip it\n if continueIfZero == 0:\n skippedThings += 1\n continue\n #if the slice is still around, it's \"important\" \n theImportantSlices.append(slicey)\n #Now, from the important ones, find voicing probs\n for slicey in theImportantSlices:\n theKey = slicey['key']\n theTonic = str(theKey).split(' ')[0]\n theKeyPC = pitch.Pitch(theTonic).pitchClass\n keyTransPCs = [(n - theKeyPC)%12 for n in slicey['pcset']]\n #rightChord = chord.Chord(sorted(keyTransPCs))\n slicey_label = (sorted(keyTransPCs),slicey['bassSD'])\n try:\n sliceCount[str(slicey_label)] += 1\n except KeyError:\n sliceCount[str(slicey_label)] = 1\n sliceProbs = getProbsFromFreqs(sliceCount)\n #Now make a list of the really important slices\n theReallyImportantSlices = []\n skipNext = 0\n #OK, now go again, looking for non-superset bass leaps\n for i, slicey in enumerate(theImportantSlices):\n if i == len(theImportantSlices) - 1:\n break\n if skipNext == 1:\n skipNext = 0\n continue\n #First, if there's no bass leap, just go on and add it like a normal slice\n if abs(theImportantSlices[i+1]['bassMIDI'] - theImportantSlices[i]['bassMIDI']) <= bassSize:\n theKeyPC = pitch.Pitch(str(slicey['key']).split(' ')[0]).pitchClass\n keyTransPCs = [(n - theKeyPC)%12 for n in slicey['pcset']]\n theReallyImportantSlices.append((sorted(keyTransPCs),slicey['bassSD']))\n continue\n #Next, only look at cases where |bass motion| > bassSize\n if abs(theImportantSlices[i+1]['bassMIDI'] - theImportantSlices[i]['bassMIDI']) > bassSize:\n combinedSlices = []\n theKeyPC = pitch.Pitch(str(slicey['key']).split(' ')[0]).pitchClass\n keyTransPCs = [(n - theKeyPC)%12 for n in slicey['pcset']]\n for n in keyTransPCs:\n combinedSlices.append(n)\n theKeyPC = pitch.Pitch(str(theImportantSlices[i+1]['key']).split(' ')[0]).pitchClass\n nextkeyTransPCs = [(n - theKeyPC)%12 for n in theImportantSlices[i+1]['pcset']]\n for m in nextkeyTransPCs:\n if m in combinedSlices:\n continue\n combinedSlices.append(m)\n sortedSlice = sorted(combinedSlices)\n #Pick whichever bass is literally lower in pitch, and use its SD for combo\n slicey_bass = slicey['bassMIDI']\n nextslice_bass = theImportantSlices[i+1]['bassMIDI']\n if slicey_bass <= nextslice_bass:\n bassSD = slicey['bassSD']\n if nextslice_bass < slicey_bass:\n bassSD = theImportantSlices[i+1]['bassSD']\n sortedSlice_type = (sortedSlice,bassSD)\n #If the combination never occurs, don't combine and move on\n try:\n testProb = sliceProbs[str(sortedSlice_type)]\n except KeyError:\n theKeyPC = pitch.Pitch(str(slicey['key']).split(' ')[0]).pitchClass\n keyTransPCs = [(n - theKeyPC)%12 for n in slicey['pcset']]\n theReallyImportantSlices.append((sorted(keyTransPCs),slicey['bassSD']))\n continue\n #Deal with singletons, which always have higher p\n #If both are singletons, move on:\n if len(slicey['pcset']) == 1 and len(theImportantSlices[i+1]['pcset']) == 1:\n continue\n #If the first is a singleton and second more probable than comb., move on\n elif len(slicey['pcset']) == 1 and len(theImportantSlices[i+1]['pcset']) > 1:\n if testProb < sliceProbs[str((sorted(nextkeyTransPCs),theImportantSlices[i+1]['bassSD']))]:\n continue\n #If the second is a singleton and first more probable than comb., move on\n elif len(theImportantSlices[i+1]['pcset']) == 1 and len(slicey['pcset']) > 1:\n if testProb < sliceProbs[str((sorted(keyTransPCs),slicey['bassSD']))]:\n continue\n #Otherwise, if p(comb) is less than either by themselves, move on\n elif testProb < sliceProbs[str((sorted(keyTransPCs),slicey['bassSD']))] or testProb < sliceProbs[str((sorted(nextkeyTransPCs),theImportantSlices[i+1]['bassSD']))]:\n continue\n #Once we rule out those cases, we know we want to combine.\n theReallyImportantSlices.append(sortedSlice_type)\n skipNext = 1\n binnedThings += 1\n #Tally up theReallyImportantSlices to get new sliceProbs\n #Now use sliceProbs to check the most common superset for each non-singleton slice\n sliceCount = {}\n for i, slicey in enumerate(theReallyImportantSlices):\n #if i > 10:\n # break\n if slicey == ['start'] or slicey == ['end'] or i == len(theReallyImportantSlices) - 1:\n continue\n if len(slicey[0]) == 1:\n continue\n slicey_prob = sliceProbs[str(slicey)]\n bestSupersetProb = slicey_prob\n bestSuperset = slicey\n #Find superset entries in sliceProbs with higher prob\n for key, probvalue in sliceProbs.iteritems():\n if probvalue < bestSupersetProb:\n continue\n #something funny here... what exactly does iteritems() do?\n keything = key.split('], ')[0]\n keyparts = keything.strip('([')\n if len(keyparts) == 1:\n listofPCs = [int(n) for n in keyparts]\n else:\n pclist = keyparts.split(', ')\n listofPCs = [int(n) for n in pclist]\n continueIfZero = 0\n #even one note wrong means no! For now, allow NEW bass note?\n for n in slicey[0]:\n if n not in listofPCs:\n continueIfZero += 1\n break\n if continueIfZero == 0:\n supersettedThings += 1\n bestSuperset = key\n bestSupersetProb = probvalue\n break\n #MESSED THIS UP\n if bestSuperset != str(slicey):\n #print bestSuperset, slicey\n try:\n catCollapse[str(bestSuperset)][str(slicey)] += 1\n except KeyError:\n try:\n catCollapse[str(bestSuperset)][str(slicey)] = 1\n except KeyError:\n catCollapse[str(bestSuperset)] = {}\n catCollapse[str(bestSuperset)][str(slicey)] = 1\n try:\n sliceCount[str((bestSuperset,bestSupersetProb))] += 1\n except KeyError:\n sliceCount[str((bestSuperset,bestSupersetProb))] = 1\n sorted_slicecount = sorted(sliceCount.iteritems(), key=operator.itemgetter(1), reverse=True)\n #export the probs as a csv file\n csvName = 'pcset superset tallies.csv'\n x = csv.writer(open(csvName, 'wb'))\n for pair in sorted_slicecount:\n x.writerow([pair[0], pair[1]]) \n #print \"supersetted things\",supersettedThings\n #now put the bigramTally in some kind of csv table\n \"\"\"\n cols = set()\n for row in catCollapse:\n for col in catCollapse[row]:\n cols.add(col)\n fieldnames = ['rowlabel'] + list(cols)\n #populate row labels\n for row in catCollapse:\n catCollapse[row]['rowlabel'] = row\n #write the CSV\n file = open('whatsincategories1122.csv', 'wb')\n #write the column headers\n #first, use plain CSV writer to write the field list\n lw = csv.writer(file)\n lw.writerow(fieldnames)\n #now write the body of the table\n #use a different CSV writer object\n dw = csv.DictWriter(file, fieldnames)\n for row in catCollapse:\n dw.writerow(catCollapse[row])\n \"\"\"",
"def populate_grid(self):\n from cemc_cpp_code import hoshen_kopelman\n self.bins[:, :, :] = 0\n for atom in self.atoms:\n if atom.symbol in self.track_elements:\n n = self.get_bin(atom.index)\n self.bins[n[0], n[1], n[2]] += 1\n\n # Run the Hoshen-Kopelman algorithm to label the \n # bins into clusters\n self.clusters = hoshen_kopelman(self.bins)",
"def _seg_image(self, x, y, r_cut=100):\n snr=self.snr\n npixels=self.npixels\n bakground = self.bakground\n error= self.bkg_rms(x,y,r_cut)\n kernel = self.kernel\n image_cutted = self.cut_image(x,y,r_cut)\n image_data = image_cutted\n threshold_detect_objs=detect_threshold(data=image_data, nsigma=snr,error=error)\n segments=detect_sources(image_data, threshold_detect_objs, npixels=npixels, filter_kernel=kernel)\n segments_deblend = deblend_sources(image_data, segments, npixels=npixels,nlevels=10)\n segments_deblend_info = source_properties(image_data, segments_deblend)\n nobjs = segments_deblend_info.to_table(columns=['id'])['id'].max()\n xcenter = segments_deblend_info.to_table(columns=['xcentroid'])['xcentroid'].value\n ycenter = segments_deblend_info.to_table(columns=['ycentroid'])['ycentroid'].value\n image_data_size = np.int((image_data.shape[0] + 1) / 2.)\n dist = ((xcenter - image_data_size) ** 2 + (ycenter - image_data_size) ** 2) ** 0.5\n c_index = np.where(dist == dist.min())[0][0]\n center_mask=(segments_deblend.data==c_index+1)*1 #supposed to be the data mask\n obj_masks = []\n for i in range(nobjs):\n mask = ((segments_deblend.data==i+1)*1)\n obj_masks.append(mask)\n xmin = segments_deblend_info.to_table(columns=['bbox_xmin'])['bbox_xmin'].value\n xmax = segments_deblend_info.to_table(columns=['bbox_xmax'])['bbox_xmax'].value\n ymin = segments_deblend_info.to_table(columns=['bbox_ymin'])['bbox_ymin'].value\n ymax = segments_deblend_info.to_table(columns=['bbox_ymax'])['bbox_ymax'].value\n xmin_c, xmax_c = xmin[c_index], xmax[c_index]\n ymin_c, ymax_c = ymin[c_index], ymax[c_index]\n xsize_c = xmax_c - xmin_c\n ysize_c = ymax_c - ymin_c\n if xsize_c > ysize_c:\n r_center = np.int(xsize_c)\n else:\n r_center = np.int(ysize_c)\n center_mask_info= [center_mask, r_center, xcenter, ycenter, c_index]\n return obj_masks, center_mask_info, segments_deblend",
"def phase_segmentation(image, threshold):\n # Normalize the image\n im_norm = (image - image.min()) / (image.max() - image.min())\n\n # Do a background subtraction\n im_blur = skimage.filters.gaussian(image, 50.0)\n im_sub = im_norm - im_blur\n\n # Threshold the image\n im_thresh = im_sub < -0.2\n\n # Label the image\n im_label = skimage.measure.label(im_thresh)\n\n # Get the properties and apply an area threshold\n props = skimage.measure.regionprops(im_label)\n\n # Make an empty image to store the approved cells\n approved_objects = np.zeros_like(im_label)\n\n # Apply the area filters\n for prop in props:\n obj_area = prop.area * 0.160**2 # Given the interpixel distance\n if (obj_area > 0.5) & (obj_area < 5):\n approved_objects += (im_label==prop.label)\n\n # Relabel the image.\n return im_relab",
"def droplet(r_drop=0.02): # [dm]\n alpha_pom = float(76.8)\n r_real = r_drop / np.sin(alpha_pom) # [dm]\n height = r_real * (1 - np.cos(alpha_pom)) # [dm]\n s_drop = np.pi * (4 * r_real * height - height ** 2) # [dm2]\n v_drop = np.pi * height ** 2 * (r_real - height / 3) # [dm3]\n s0 = np.pi * r_drop ** 2 # [dm2]\n return s_drop, v_drop, s0 # , h_max, s_max, v_max, s1",
"def initiate_path_tracking(self, compress = False): \r\n\r\n start_contour = -1\r\n \r\n # input active particle and target particle\r\n while start_contour == -1:\r\n self.__define_particle('ActiveParticle')\r\n active_pos = mouse_click_pos\r\n self.__define_particle('Target')\r\n target_pos = mouse_click_pos\r\n \r\n start_contour = self.__get_contour_num(active_pos)\r\n print(start_contour)\r\n \r\n end_contour = self.__get_contour_num(target_pos)\r\n print(end_contour)\r\n\r\n start_x, start_y = self.centers[start_contour]\r\n if end_contour != -1:\r\n end_x, end_y = self.centers[end_contour]\r\n else:\r\n end_x, end_y = target_pos\r\n \r\n ret, not_image = cv2.threshold(self.image,127,255,cv2.THRESH_BINARY)\r\n \r\n masked_image = mask_particle_rect(not_image, self.boundRect[start_contour], (255,255,255))\r\n\r\n if end_contour != -1:\r\n masked_image = mask_particle_rect(masked_image, self.boundRect[end_contour], (255,255,255))\r\n \r\n\r\n path, runs = find_path(masked_image, (start_x,start_y), (end_x, end_y),\r\n 4, int(self.radius[start_contour]*3), compress)\r\n\r\n \r\n path_image = self.image\r\n\r\n for i in range(len(path)-1):\r\n x_start, y_start = path[i]\r\n x_end, y_end = path[i+1]\r\n\r\n path_image = cv2.line(path_image,(x_start,y_start),(x_end,y_end),(100,100,100),2)\r\n \r\n cv2.imshow('image', path_image)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n \r\n return path, self.boundRect[start_contour]",
"def red_points(self, plan, dlvl_sag_img, rphase_sag_img, root_cor_sequence, cor_sequence, sag_sequence):\n # print(\"dlvl_sag_img: {}\".format(dlvl_sag_img))\n # print(\"Cor Sequences: {}\".format(self.cor_sequences))\n # print(\"Sag Sequences: {}\".format(self.sag_sequences))\n cor_sequence = int(cor_sequence)\n sag_sequence = int(sag_sequence)\n # print(\"cor_sequence: {}\".format(cor_sequence))\n # print(\"sag_sequence: {}\".format(sag_sequence))\n column = self.cor_sequences.index(cor_sequence)\n row = self.sag_sequences.index(sag_sequence)\n # print(\"Column: {}\".format(column))\n # print(\"Row: {}\".format(row))\n\n lpatterns = [x for x in self.dataset if int(x.split('-')[1]) == cor_sequence]\n\n pattern = [p for p in lpatterns if int(p.split('-')[3]) == sag_sequence][0]\n\n pts_pattern = self.pattern_coronal('{}.png'.format(pattern))\n diaph_lvl = [max(x) for x in self.diaphragmatic_level_coronal(pts_pattern)]\n resp_phase = self.respiratory_phase_coronal(self.diaphragmatic_level_coronal(pts_pattern))\n\n index_imgs_registered = list() # Store index of the coronal registered images\n for index, i in enumerate(diaph_lvl):\n if i == dlvl_sag_img[self.cor_sequences.index(cor_sequence)]:\n index_imgs_registered.append(index)\n # print(\"(Red) Index of registered images: {} ({})\\n\".format(index_imgs_registered, len(index_imgs_registered)))\n\n for index, i in enumerate(resp_phase):\n if index in index_imgs_registered:\n if resp_phase[index] != rphase_sag_img[self.cor_sequences.index(cor_sequence)]:\n index_imgs_registered.remove(index)\n # print(\"(Red) Index of registered images: {} ({})\\n\".format(index_imgs_registered, len(index_imgs_registered)))\n\n # print(\"lpatterns[sag_sequence]: {}, sag_sequence: {}\".format(lpatterns[self.sag_sequences.index(sag_sequence)], sag_sequence))\n pts_pattern = self.pattern_coronal('{}.png'.format(lpatterns[self.sag_sequences.index(sag_sequence)]))\n diaph_lvl = [max(x) for x in self.diaphragmatic_level_coronal(pts_pattern)]\n\n \"\"\" If analyzed point is green and there are at least one image registered in this\n position, the diaphragmatic level and respiratory phase is equal and only\n registration matrix is update \"\"\"\n if self.matRegistration[row, column] == 2.0 and len(index_imgs_registered) > 0:\n # print(\"{}x{}\\n\".format(row, column))\n # print(\"matDL: {}, DL: {}\\n\".format(self.matDL[i, column], diaph_lvl[imgnum]))\n self.matRegistration[row, column] = self.red\n\n # print(\"(Red) Diaphragmatic level matrix:\\n{}\\n\".format(self.matDL))\n # print(\"(Red) Registration matrix:\\n{}\\n\".format(self.matRegistration))\n # print(\"(Red) Respiratory phase:\\n{}\\n\".format(self.matRP))",
"def jackknife_errors_CLF(pos,Phi,Ndivs,Lbox,M,L_bins,dL,Mhost_min,Mhost_max,Mhost):\n\n n_subBox = Ndivs*Ndivs*Ndivs # The number of sub volumes for the Jackknife resampling\n V_subBox = Vbox - Vbox/n_subBox # The volume of a Jackknife sample\n N = len(pos) \n delta = Lbox/Ndivs\n \n # Indices for the galaxies positions\n index = np.asarray([floor(pos[i,0]/delta) + (floor(pos[i,1]/delta)*Ndivs) + (floor(pos[i,2]/delta)*Ndivs*Ndivs) + 1 for i in range(N)]) # index for the position of particle2\n M_sub_sample = [] # keeps the absolute magnitude for the sub-samples\n Mhost_sub_sample = [] # keeps the halo mass for the sub-samples\n CLF_all = [] # keeps the values of the CLF for the full sample and for each of the sub-samples\n CLF_all.append(Phi)\n for k in range(1,n_subBox+1): # run over the sub-samples\n for i in range(0,N): # runs over all the points (galaxies)\n if (index[i] != k): # the point is inside the sub-box\n M_sub_sample.append(M[i]) # then add to sub-box list\n Mhost_sub_sample.append(Mhost[i])\n CLF_sub,L_bins = CLF(M_sub_sample,L_bins,dL,Mhost_min,Mhost_max,Mhost_sub_sample)\n CLF_all.append(CLF_sub)\n M_sub_sample = []\n Mhost_sub_sample = []\n\n\tn_subBox = float(n_subBox)\n full = np.asarray(CLF_all[0]) # the CLF for the full sample\n sub_samples = np.asarray(CLF_all[1:]) # the CLF for the Jackknife sub-samples\n after_subtraction = sub_samples - np.mean(sub_samples,axis=0)\n squared = after_subtraction**2\n error2 = ((n_subBox-1)/n_subBox)*squared.sum(axis=0)\n errors = error2**0.5\n return errors",
"def plot_pet_volume(pet_image, pixel_shape, pixel_spacing, mask=None, patient=\"?\", mask_name=\"?\"):\n # create axis for plotting\n pixel_shape = pet_image.shape\n x = np.arange(0.0, (pixel_shape[1] + 1) * pixel_spacing[0], pixel_spacing[0])\n y = np.arange(0.0, (pixel_shape[0] + 1) * pixel_spacing[1], pixel_spacing[1])\n # z = np.arange(0.0, (pixel_shape[2] + 1) * pixel_spacing[2], pixel_spacing[2])\n if mask is not None:\n masked_pet_image = np.ma.masked_array(pet_image, mask)\n # normalize values\n vmin = np.min(pet_image)\n vmax = np.max(pet_image)\n cmap = plt.cm.gray\n cmap.set_bad('r', 1)\n i = 0\n while i < pet_image.shape[2]:\n # show images\n fig_num = 0\n fig = plt.figure(fig_num)\n plt.clf()\n plt.pcolormesh(x, y, pet_image[:, :, i], vmin=vmin, vmax=vmax, cmap=cmap)\n plt.xlabel('y')\n plt.ylabel('x')\n title = \"Patient: {} - Slice: {}/{}\".format(patient, i + 1, pet_image.shape[2])\n fig.canvas.set_window_title(\"Figure {} - {}\".format(fig_num, title))\n if mask is not None:\n input(\"Press ENTER to reveal contour. \")\n fig = plt.figure(fig_num)\n plt.pcolormesh(x, y, masked_pet_image[:, :, i], vmin=vmin, vmax=vmax, cmap=cmap,\n rasterized=True, linewidth=0)\n title += \" - Contour Name: {}\".format(mask_name)\n fig.canvas.set_window_title(\"Figure {} - {}\".format(fig_num, title))\n c = input(\"ENTER=continue, Q=quit, M=median, R=repeat, P=previous, N=start over. \")\n if c.startswith(\"q\"):\n break\n elif c.startswith(\"m\"):\n i = int(pet_image.shape[2] / 2) - 1\n elif c.startswith(\"r\"):\n i -= 1\n elif c.startswith(\"p\"):\n i -= 2\n if i < -1:\n i = -1\n elif c.startswith(\"n\"):\n i = -1\n i += 1"
] | [
"0.58779204",
"0.58106714",
"0.5760057",
"0.54264325",
"0.51649725",
"0.51187783",
"0.5067118",
"0.50534153",
"0.50534153",
"0.50201076",
"0.5005486",
"0.49400416",
"0.49284068",
"0.49158067",
"0.4898579",
"0.48766935",
"0.48485488",
"0.4811385",
"0.47815025",
"0.47782052",
"0.47579148",
"0.4750158",
"0.472634",
"0.47250545",
"0.46689233",
"0.46673143",
"0.46655828",
"0.46655336",
"0.46625888",
"0.4661831"
] | 0.63500845 | 0 |
Refines droplet parameters by fitting to phase field This function varies droplet parameters, like position, size, interface width, and potential perturbation amplitudes until the overlap with the respective phase field region is maximized. Here, we use a constraint fitting routine. | def refine_droplet(
phase_field: ScalarField,
droplet: DiffuseDroplet,
least_squares_params: Optional[Dict[str, Any]] = None,
) -> DiffuseDroplet:
assert isinstance(phase_field, ScalarField)
if least_squares_params is None:
least_squares_params = {}
if not isinstance(droplet, DiffuseDroplet):
droplet = DiffuseDroplet.from_droplet(droplet)
if droplet.interface_width is None:
droplet.interface_width = phase_field.grid.typical_discretization
# enlarge the mask to also contain the shape change
mask = droplet._get_phase_field(phase_field.grid, dtype=np.bool_)
dilation_iterations = 1 + int(2 * droplet.interface_width)
mask = ndimage.morphology.binary_dilation(mask, iterations=dilation_iterations)
# apply the mask
data_mask = phase_field.data[mask]
# determine the coordinate constraints and only vary the free data points
data_flat = structured_to_unstructured(droplet.data) # unstructured data
dtype = droplet.data.dtype
free = np.ones(len(data_flat), dtype=np.bool_)
free[phase_field.grid.coordinate_constraints] = False
# determine data bounds
l, h = droplet.data_bounds
bounds = l[free], h[free]
def _image_deviation(params):
"""helper function evaluating the residuals"""
# generate the droplet
data_flat[free] = params
droplet.data = unstructured_to_structured(data_flat, dtype=dtype)
droplet.check_data()
img = droplet._get_phase_field(phase_field.grid)[mask]
return img - data_mask
# do the least square optimization
result = optimize.least_squares(
_image_deviation, data_flat[free], bounds=bounds, **least_squares_params
)
data_flat[free] = result.x
droplet.data = unstructured_to_structured(data_flat, dtype=dtype)
# normalize the droplet position
grid = phase_field.grid
coords = grid.point_from_cartesian(droplet.position)
droplet.position = grid.point_to_cartesian(grid.normalize_point(coords))
return droplet | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def constrain_pars(model_info, pars):\n name = model_info['id']\n # if it is a product model, then just look at the form factor since\n # none of the structure factors need any constraints.\n if '*' in name:\n name = name.split('*')[0]\n\n if name == 'capped_cylinder' and pars['cap_radius'] < pars['radius']:\n pars['radius'], pars['cap_radius'] = pars['cap_radius'], pars['radius']\n if name == 'barbell' and pars['bell_radius'] < pars['radius']:\n pars['radius'], pars['bell_radius'] = pars['bell_radius'], pars['radius']\n\n # Limit guinier to an Rg such that Iq > 1e-30 (single precision cutoff)\n if name == 'guinier':\n #q_max = 0.2 # mid q maximum\n q_max = 1.0 # high q maximum\n rg_max = np.sqrt(90*np.log(10) + 3*np.log(pars['scale']))/q_max\n pars['rg'] = min(pars['rg'], rg_max)\n\n if name == 'rpa':\n # Make sure phi sums to 1.0\n if pars['case_num'] < 2:\n pars['Phia'] = 0.\n pars['Phib'] = 0.\n elif pars['case_num'] < 5:\n pars['Phia'] = 0.\n total = sum(pars['Phi'+c] for c in 'abcd')\n for c in 'abcd':\n pars['Phi'+c] /= total",
"def setup_fitting_init_pars(inparam, night, band, masterbeam, order):\n\n # Determine whether IGRINS mounting was loose or\n # the night of interest is in question\n if (int(night) < 20180401) or (int(night) > 20190531):\n IPpars = inparam.ips_tightmount_pars[band][masterbeam][order]\n else:\n IPpars = inparam.ips_loosemount_pars[band][masterbeam][order]\n\n # start at bucket loc = 1250 +- 100, width = 250 +- 100,\n # depth = 100 +- 5000 but floor at 0\n centerloc = 1250 if band == 'H' else 1180\n\n # Initialize parameter array for optimization as well as half-range values\n # for each parameter during the various steps of the optimization.\n # Many of the parameters initialized here will be changed throughout the\n # code before optimization and in between optimization steps.\n\n parA0 = np.array([\n 0.0, # 0: The shift of the stellar template (km/s)\n 0.0, # 1: The scale factor for the stellar template\n 0.0, # 2: The shift of the telluric template (km/s)\n 1.0, # 3: The scale factor for the telluric template\n 0.0, # 4: vsini (km/s)\n IPpars[2], # 5: The instrumental resolution (FWHM) in pixels\n 0.0, # 6: Wavelength 0-pt\n 0.0, # 7: Wavelength linear component\n 0.0, # 8: Wavelength quadratic component\n 0.0, # 9: Wavelength cubic component\n 1.0, #10: Continuum zero point\n 0.0, #11: Continuum linear component\n 0.0, #12: Continuum quadratic component\n IPpars[1], #13: Instrumental resolution linear component\n IPpars[0], #14: Instrumental resolution quadratic component\n centerloc, #15: Blaze dip center location\n 330, #16: Blaze dip full width\n 0.05, #17: Blaze dip depth\n 90, #18: Secondary blaze dip full width\n 0.05, #19: Blaze dip depth\n 0.0, #20: Continuum cubic component\n 0.0, #21: Continuum quartic component\n 0.0, #22: Continuum quintic component\n 0.0, #23: Continuum hexic component\n 0.0, #24: secondary par\n 0.0, #25: secondary par\n 0.0, #26: secondary par\n 0.0 #27: secondary par\n ])\n\n return parA0",
"def locate_droplets(\n phase_field: ScalarField,\n threshold: Union[float, str] = 0.5,\n modes: int = 0,\n minimal_radius: float = 0,\n refine: bool = False,\n interface_width: Optional[float] = None,\n) -> Emulsion:\n assert isinstance(phase_field, ScalarField)\n dim = phase_field.grid.dim # dimensionality of the space\n\n if modes > 0 and dim not in [2, 3]:\n raise ValueError(\"Perturbed droplets only supported for 2d and 3d\")\n\n # determine actual threshold\n if threshold == \"auto\":\n threshold = float(phase_field.data.min() + phase_field.data.max()) / 2\n else:\n threshold = float(threshold)\n\n # locate droplets in thresholded image\n img_binary = phase_field.data > threshold\n candidates = locate_droplets_in_mask(phase_field.grid, img_binary)\n\n if minimal_radius > -np.inf:\n candidates.remove_small(minimal_radius)\n\n droplets = []\n for droplet in candidates:\n # check whether we need to add the interface width\n droplet_class = droplet.__class__\n args: Dict[str, NumberOrArray] = {}\n\n # change droplet class when interface width is given\n if interface_width is not None:\n droplet_class = DiffuseDroplet\n args[\"interface_width\"] = interface_width\n\n # change droplet class when perturbed droplets are requested\n if modes > 0:\n if dim == 2:\n droplet_class = PerturbedDroplet2D\n elif dim == 3:\n droplet_class = PerturbedDroplet3D\n else:\n raise NotImplementedError(f\"Dimension {dim} is not supported\")\n args[\"amplitudes\"] = np.zeros(modes)\n\n # recreate a droplet of the correct class\n if droplet_class != droplet.__class__:\n droplet = droplet_class.from_droplet(droplet, **args)\n\n # refine droplets if necessary\n if refine:\n try:\n droplet = refine_droplet(phase_field, droplet)\n except ValueError:\n continue # do not add the droplet to the list\n droplets.append(droplet)\n\n # return droplets as an emulsion\n emulsion = Emulsion(droplets, grid=phase_field.grid)\n if minimal_radius > -np.inf:\n emulsion.remove_small(minimal_radius)\n return emulsion",
"def optimize(self):\n # Loop through every WD and WS individually\n wd_array = self.fi_subset.floris.flow_field.wind_directions\n ws_array = self.fi_subset.floris.flow_field.wind_speeds\n for nwsi, ws in enumerate(ws_array):\n\n self.fi_subset.reinitialize(wind_speeds=[ws])\n\n for nwdi, wd in enumerate(wd_array):\n # Find turbines to optimize\n turbs_to_opt = self._turbs_to_opt_subset[nwdi, nwsi, :]\n if not any(turbs_to_opt):\n continue # Nothing to do here: no turbines to optimize\n\n # Extract current optimization problem variables (normalized)\n yaw_lb = self._minimum_yaw_angle_subset_norm[nwdi, nwsi, turbs_to_opt]\n yaw_ub = self._maximum_yaw_angle_subset_norm[nwdi, nwsi, turbs_to_opt]\n bnds = [(a, b) for a, b in zip(yaw_lb, yaw_ub)]\n x0 = self._x0_subset_norm[nwdi, nwsi, turbs_to_opt]\n\n J0 = self._farm_power_baseline_subset[nwdi, nwsi]\n yaw_template = self._yaw_angles_template_subset[nwdi, nwsi, :]\n turbine_weights = self._turbine_weights_subset[nwdi, nwsi, :]\n yaw_template = np.tile(yaw_template, (1, 1, 1))\n turbine_weights = np.tile(turbine_weights, (1, 1, 1))\n\n # Define cost function\n def cost(x):\n x_full = np.array(yaw_template, copy=True)\n x_full[0, 0, turbs_to_opt] = x * self._normalization_length\n return (\n - 1.0 * self._calculate_farm_power(\n yaw_angles=x_full,\n wd_array=[wd],\n turbine_weights=turbine_weights\n )[0, 0] / J0\n )\n\n # Perform optimization\n residual_plant = minimize(\n fun=cost,\n x0=x0,\n bounds=bnds,\n method=self.opt_method,\n options=self.opt_options,\n )\n\n # Undo normalization/masks and save results to self\n self._farm_power_opt_subset[nwdi, nwsi] = -residual_plant.fun * J0\n self._yaw_angles_opt_subset[nwdi, nwsi, turbs_to_opt] = (\n residual_plant.x * self._normalization_length\n )\n\n # Finalize optimization, i.e., retrieve full solutions\n df_opt = self._finalize()\n return df_opt",
"def optimize_force_field_parameters_Cv_FWHM(cgmodel, file_list, temperature_list, param_bounds_dict,\n frame_begin=0, frame_end=-1, sample_spacing=1, sparsify_stride=1, output_data='output.nc',\n verbose=False, n_cpu=12, min_eff_samples=50,\n n_trial_boot=200, num_intermediate_states=0, plotfile='optimize_FWHM_iterations.pdf',\n min_method='TNC'):\n\n # Parse the force field parameter change dict:\n x0 = []\n param_names = []\n bounds = []\n units = []\n \n for key,value in param_bounds_dict.items():\n # value should be [(bound_lo, bound_hi)]\n # key should be a valid force field parameter name\n param_names.append(key)\n # Every parameter except periodicity should have units\n # For now, changing periodicity is not supported.\n \n # TODO: add support for sums of periodic torsion terms\n units.append(value[0].unit)\n bounds.append((value[0].value_in_unit(units[-1]),value[1].value_in_unit(units[-1])))\n # Use mean value as starting guess:\n x0.append((value[1].value_in_unit(units[-1])+value[0].value_in_unit(units[-1]))/2)\n\n if verbose:\n print(f'param_names: {param_names}')\n print(f'unit: {units}')\n print(f'bounds: {bounds}')\n print(f'x0: {x0}')\n\n def get_reeval_FWHM(param_values, cgmodel, file_list, temperature_list, output_data,\n param_names, units, frame_begin, sample_spacing, sparsify_stride, frame_end,\n n_cpu, n_trial_boot, num_intermediate_states):\n \"\"\"\n Objective function to be minimized\n \"\"\"\n\n # Construct dictionary of parameter update instructions:\n param_dict = {}\n \n # if len(param_names) == 1:\n # # 1D optimization:\n # param_dict[param_names[0]] = param_values * units[0]\n\n for i in range(len(param_names)):\n param_dict[param_names[i]] = param_values[i] * units[i]\n \n if verbose:\n print(f'Current parameters: {param_dict}') \n \n # Re-evaluate energy with current force field parameters:\n # For bootstrapping, evaluate all frames between [frame_begin:sparsify_stride:frame_end], and\n # apply the sample_spacing only to the heat capacity part\n U_eval, simulation = eval_energy(\n cgmodel,\n file_list,\n temperature_list,\n param_dict,\n frame_begin=frame_begin,\n frame_stride=sparsify_stride,\n frame_end=frame_end,\n n_cpu=n_cpu,\n verbose=verbose,\n )\n\n # Evaluate heat capacity and full-width half-maximum from bootstrapping:\n (new_temperature_list, C_v_values, C_v_uncertainty,\n Tm_value, Tm_uncertainty,\n Cv_height_value, Cv_height_uncertainty,\n FWHM_value, FWHM_uncertainty,\n N_eff_values) = bootstrap_heat_capacity(\n U_kln=U_eval,\n output_data=output_data,\n frame_begin=frame_begin,\n frame_end=frame_end,\n sample_spacing=sample_spacing,\n sparsify_stride=sparsify_stride,\n num_intermediate_states=num_intermediate_states,\n n_trial_boot=n_trial_boot,\n plot_file=f'heat_capacity_boot_{param_names[0]}_{param_values}.pdf',\n )\n \n if verbose:\n print(f'Current FWHM: {FWHM_value} +/- {FWHM_uncertainty[0]}')\n print(f'Current minimum N_eff: {np.min(N_eff_values)}')\n \n # Check for minimum N_eff criteria.\n # If too small, the minimization should stop if we're using a gradient method.\n # If we're not using a gradient method, return a large value.\n \n if np.min(N_eff_values) < min_eff_samples:\n print(f'Insufficient number of effective samples ({np.min(N_eff_values)})')\n \n # print(f'Creating a cgmodel with current parameters...,end='')\n # Create the cgmodel\n # print('done')\n \n exit()\n \n return FWHM_value.value_in_unit(unit.kelvin)\n\n # Run optimization:\n\n # if len(param_names) == 1:\n # # Do scalar optimization:\n # opt_results = minimize_scalar(get_reeval_FWHM, x0,\n # args=(cgmodel, file_list, temperature_list, output_data, param_names, units,\n # frame_begin, sample_spacing, sparsify_stride, frame_end, n_cpu, n_trial_boot, num_intermediate_states),\n # method='bounded',\n # bounds=[bounds[0][0],bounds[0][1]],\n # options={'maxiter': 25},\n # )\n\n # else:\n # Do multivariate optimization:\n opt_results = minimize(get_reeval_FWHM, x0, jac='2-point',\n args=(cgmodel, file_list, temperature_list, output_data, param_names, units,\n frame_begin, sample_spacing, sparsify_stride, frame_end, n_cpu, n_trial_boot, num_intermediate_states),\n method=min_method,\n bounds=bounds,\n options={'maxfun': 25, 'finite_diff_rel_step': 0.005, 'eta': 0.5}, # This should be user input\n ) \n \n # TODO: plot the heat capacity curves at each iteration, and make a plot of all FWHM_values \n\n # Construct dictionary of optimal parameters:\n opt_param_dict = {} \n \n k = 0\n for key,value in param_bounds_dict.items():\n opt_param_dict[key] = opt_results.x[k] * units[k]\n k += 1\n \n return opt_param_dict, opt_results",
"def LeastSquareFit_Fixed_Parameter(self,params0,FixedIndices):\n\t\tnpl = self.Observations.nplanets\n\t\tassert len(params0.reshape(-1)) == npl * 5, \"Shape of initial parameter does not match what is required for the number of planets!\"\n\t\t\t\n\t\ttarget_data = np.array([])\n\t\terrors = np.array([])\n\t\t\n\t\tfor time,err in zip(self.Observations.transit_times,self.Observations.transit_uncertainties):\n\t\t\ttarget_data = np.append(target_data,time)\n\t\t\terrors = np.append(errors,err)\n\t\t\n\t\ttFinal = self.Observations.tFinal() + np.max(self.Observations.PeriodEstimates)\n\t\t\n\t\tFixedPars = (params0.reshape(1,npl*5))[0,FixedIndices]\n\t\t\n\t\tdef objectivefn(x):\n\t\t\tinpars = np.insert( x , np.array(FixedIndices)-np.arange(len(FixedIndices)) , FixedPars )\n\t\t\t#print inpars\t\n\t\t\ttransits,success = self.MCMC_CoplanarParam_TransitTimes(inpars,tFinal)\n\t\t\t\n\t\t\tanswer = np.array([],dtype=float)\n\t\t\tfor i,t in enumerate(transits):\n\t\t\t\ttnums = self.Observations.transit_numbers[i]\n\t\t\t\ttry:\n\t\t\t\t\tanswer = np.append( answer,np.array(t[tnums]) )\n\t\t\t\texcept:\n\t\t\t\t\treturn -np.inf * np.ones(len(target_data))\n\t\t\t#\n\t\t\ttry:\n\t\t\t\tttvchi2 = (answer - target_data)/errors\n\t\t\texcept:\n\t\t\t\treturn -np.inf * np.ones(len(target_data))\n\t\t\t\n\t\t\treturn ttvchi2\n\t\t\n\t\tfitpars = leastsq(objectivefn, np.delete(params0,FixedIndices) ,full_output=1)[0]\n\t\treturn np.insert( fitpars, np.array(FixedIndices)-np.arange(len(FixedIndices)), FixedPars)",
"def fit(self):\n self._minuit_problem.migrad() # run optimizer\n self._status = 0 if self._minuit_problem.migrad_ok() else 1",
"def _reset_parameters(self):\n self._solver_input[\"P\"] = cvxopt.matrix(2.0 * self.opt.P(self.p).toarray())\n self._solver_input[\"q\"] = cvxopt.matrix(self.opt.q(self.p).toarray().flatten())\n if self.opt_type in CONSTRAINED_OPT:\n if self.opt.nk > 0:\n self._solver_input[\"G\"] = cvxopt.matrix(-self.opt.M(self.p).toarray())\n self._solver_input[\"h\"] = cvxopt.matrix(\n self.opt.c(self.p).toarray().flatten()\n )\n if self.opt.na > 0:\n self._solver_input[\"A\"] = cvxopt.matrix(self.opt.A(self.p).toarray())\n self._solver_input[\"b\"] = cvxopt.matrix(-self.opt.b(self.p).toarray())",
"def objective(trial):\n # The parameters that we will calibrate the model for are shown here.\n # Optuna trial i\n BOD = trial.suggest_uniform(\"BOD\", 0, 1) #Review ranges here\n k_r = trial.suggest_uniform(\"k_r\", 0, 1) #Review Ranges here \n \n def ChLa(t):\n return 1 # Need to link to data\n\n def I(x):\n return 1 # Need to link to data\n\n K_z = 2 * 10**(-5) # p.51\n a = K_z\n k_b = 0.1 # Table 5\n th_b = 1.047 # Table 5\n k_r = 0.1 # Table 5\n YCHO2 = 0.0083 # Table 5\n th_p = 1.036 # Table 5\n th_s = 1.065 # Table 5\n th_r = 1.047 # Table 5\n\n def Temp(t):\n \"\"\"\n Function that maps time to temperature\n \"\"\"\n return 20 # Need to link to data\n\n def P_max(t):\n return 9.6 * 1.036 **(Temp(t) - 20) # Eq. 4\n\n def L_min(t):\n I = 1 # Need to link to PAR data\n K_1 = 0.687 * 1.086**(Temp(t) - 20)\n K_2 = 15\n return I * (1 + 2 * np.sqrt(K_1 / K_2)) / (I + K_1 + I**2 / K_2) # Eq. 5\n \n # f deals with sink and source terms \n def f(x, t):\n return -1 / YCHO2 * k_r * th_r**(Temp(t) - 20) * ChLa(t) + P_max(t) * L_min(t) * ChLa(t) - k_b * th_b**(Temp(t)-20) * BOD \n\n L = 200 # Length of domain\n dt = 1 / 48 # Mesh spacing in t\n F = a * dt # a * dt / dx**2\n T = 100 # Simulation time stop\n\n # Solving the PDE\n DO, x, t, _ = solver_FE_simple(I, a, f, L, dt, F, T)\n \n # Creating some bogus targets while database errors are happening\n DO_data = DO + np.random.random(len(DO))\n\n # Using mean squared error as the measure of fit, where we want\n # to minimize this number\n return ((DO - DO_data)**2).mean()",
"def fit(self, filename, **options):\n amp_guess = np.max(self.I_data)\n center_x_guess = self.RA[self.I_data.argmax()]\n center_y_guess = self.DEC[self.I_data.argmax()]\n # cos dec correction\n center_x_guess = center_x_guess*np.cos(np.deg2rad(center_y_guess))\n sigma_x_guess = options[\"beam_width\"]/2.\n sigma_y_guess = options[\"beam_width\"]/2.\n theta_guess = 0.\n guess_p = [amp_guess, center_x_guess, center_y_guess,\n sigma_x_guess, sigma_y_guess, theta_guess]\n sigma = [options[\"sigma\"]]*len(self.I_data)\n self.center_RA = self.RA[self.I_data.argmax()] #centers for the bad fits\n self.center_DEC = self.DEC[self.I_data.argmax()]\n self.center_I = None\n try:\n fit_p, covar = curve_fit(gauss2d,\n (self.RA*np.cos(np.deg2rad(self.DEC)),\n self.DEC),\n self.I_data, p0=guess_p,\n sigma=sigma)\n self.fit_p = np.array(fit_p)\n self.covar = np.array(covar)\n if (np.isinf(fit_p).any() or np.isinf(covar).any() or\n np.isnan(fit_p).any() or np.isnan(covar).any() or\n (fit_p<0).any()):\n\n self.good_fit = False\n self.bad_reasons+=\"fit_is_nan_or_inf,\"\n else:\n [self.center_I, self.center_RA, self.center_DEC, sigma_x, sigma_y, theta] = self.fit_p\n self.e_fit_p = np.array([np.sqrt(covar[i,i])\n for i in range(len(fit_p))])\n if (np.abs(self.e_fit_p[0]/self.fit_p[0])<options[\"amp_req\"] and\n np.abs(self.e_fit_p[3]/self.fit_p[3])<options[\"width_req\"] and\n np.abs(self.e_fit_p[4]/self.fit_p[4])<options[\"width_req\"]):\n self.good_fit = True\n else:\n self.good_fit = False\n self.bad_reasons+=\"bad_fit_exceded_error\"\n # for plotting\n if options[\"file_verbose\"]:\n fit_x = np.linspace(np.min(self.RA),\n np.max(self.RA), 100)\n fit_y = np.linspace(np.min(self.DEC),\n np.max(self.DEC), 100)\n fit_x = fit_x * np.cos(np.deg2rad(fit_y))\n mesh_x, mesh_y = np.meshgrid(fit_x, fit_y)\n fit_z = np.array([[gauss2d((x,y),*self.fit_p)\n for x in fit_x]\n for y in fit_y])\n plt.field_plot_3d(self.RA, self.DEC, self.I_data,\n mesh_x, mesh_y, fit_z, filename)\n except RuntimeError:\n if options[\"verbose\"]:\n print(\"Log: A fit did not converge.\")\n self.good_fit = False\n self.bad_reasons+= \"no_convergence,\"",
"def make_peeling_parset(parset, peel_bins, scalar_phase=True, phase_only=True,\n sol_int_amp=500, time_block=None):\n sol_int_list = []\n if time_block is not None:\n # Set all chunk sizes to time_block\n for peel_bin in peel_bins:\n peel_bin['sol_int'] = time_block + 2\n for peel_bin in peel_bins:\n sol_int_list.append(peel_bin['sol_int'])\n if not phase_only:\n sol_int_list.append(sol_int_amp)\n sol_int_list.append(250) # don't let chunk size fall below 250 for performance reasons\n\n # Set overall strategy\n nbins = len(peel_bins)\n newlines = ['Strategy.InputColumn = DATA\\n',\n 'Strategy.ChunkSize = {0}\\n'.format(int(max(sol_int_list))),\n 'Strategy.Baselines = [CR]S*&\\n',\n 'Strategy.UseSolver = F\\n']\n if phase_only:\n pstr = ''\n strategy_str = 'Strategy.Steps = [subtractfield'\n for i, peel_bin in enumerate(peel_bins):\n strategy_str += ', add{0}, solve{0}'.format(i+1)\n if i < nbins - 1:\n strategy_str += ', subtract{0}'.format(i+1)\n strategy_str += ']\\n'\n else:\n pstr = 'p'\n strategy_str = 'Strategy.Steps = [subtractfield'\n for i, peel_bin in enumerate(peel_bins):\n strategy_str += ', add{0}, solvep{0}, solvea{0}'.format(i+1)\n if i < nbins - 1:\n strategy_str += ', subtract{0}'.format(i+1)\n strategy_str += ']\\n'\n newlines += strategy_str\n\n # Subtract field (all sources)\n newlines += ['\\n', 'Step.subtractfield.Operation = SUBTRACT\\n',\n 'Step.subtractfield.Model.Sources = []\\n',\n 'Step.subtractfield.Model.Beam.Enable = T\\n',\n 'Step.subtractfield.Model.Beam.Mode = ARRAY_FACTOR\\n',\n '\\n']\n\n for i, peel_bin in enumerate(peel_bins):\n # Add sources in current bin\n newlines += ['Step.add{0}.Operation = ADD\\n'.format(i+1),\n 'Step.add{0}.Model.Sources = '.format(i+1) + str(peel_bin['names']) + '\\n',\n 'Step.add{0}.Model.Beam.Enable = T\\n'.format(i+1),\n 'Step.add{0}.Model.Beam.Mode = ARRAY_FACTOR\\n'.format(i+1),\n '\\n']\n\n # Phase-only solve\n newlines += ['Step.solve{0}{1}.Operation = SOLVE\\n'.format(pstr, i+1),\n 'Step.solve{0}{1}.Model.Sources = '.format(pstr, i+1) + str(peel_bin['names']) + '\\n',\n 'Step.solve{0}{1}.Model.Cache.Enable = T\\n'.format(pstr, i+1),\n 'Step.solve{0}{1}.Model.Beam.Enable = T\\n'.format(pstr, i+1),\n 'Step.solve{0}{1}.Model.Beam.Mode = ARRAY_FACTOR\\n'.format(pstr, i+1),\n 'Step.solve{0}{1}.Model.DirectionalGain.Enable = T\\n'.format(pstr, i+1),\n 'Step.solve{0}{1}.Model.Phasors.Enable = T\\n'.format(pstr, i+1)]\n if scalar_phase:\n newlines += ['Step.solve{0}{1}.Solve.Mode = COMPLEX\\n'.format(pstr, i+1),\n 'Step.solve{0}{1}.Model.ScalarPhase.Enable= T\\n'.format(pstr, i+1),\n 'Step.solve{0}{1}.Solve.Parms = [\"ScalarPhase:*\"]\\n'.format(pstr, i+1)]\n else:\n newlines += ['Step.solve{0}{1}.Solve.Mode = COMPLEX\\n'.format(pstr, i+1),\n 'Step.solve{0}{1}.Solve.Parms = [\"DirectionalGain:0:0:Phase:*\",'\n '\"DirectionalGain:1:1:Phase:*\"]\\n'.format(pstr, i+1)]\n newlines += ['Step.solve{0}{1}.Solve.CellSize.Freq = 0\\n'.format(pstr, i+1),\n 'Step.solve{0}{1}.Solve.CellSize.Time = {2}\\n'.format(pstr, i+1, int(peel_bin['sol_int'])),\n 'Step.solve{0}{1}.Solve.CellChunkSize = {2}\\n'.format(pstr, i+1, int(peel_bin['sol_int'])),\n 'Step.solve{0}{1}.Solve.Options.MaxIter = 50\\n'.format(pstr, i+1),\n 'Step.solve{0}{1}.Solve.Options.EpsValue = 1e-9\\n'.format(pstr, i+1),\n 'Step.solve{0}{1}.Solve.Options.EpsDerivative = 1e-9\\n'.format(pstr, i+1),\n 'Step.solve{0}{1}.Solve.Options.ColFactor = 1e-9\\n'.format(pstr, i+1),\n 'Step.solve{0}{1}.Solve.Options.LMFactor = 1.0\\n'.format(pstr, i+1),\n 'Step.solve{0}{1}.Solve.Options.BalancedEqs = F\\n'.format(pstr, i+1),\n 'Step.solve{0}{1}.Solve.Options.UseSVD = T\\n'.format(pstr, i+1),\n 'Step.solve{0}{1}.Solve.UVRange = [250]\\n'.format(pstr, i+1)]\n\n # Ampl-only solve\n if not phase_only and time_block is None:\n newlines += ['\\n',\n 'Step.solvea{0}.Operation = SOLVE\\n'.format(i+1),\n 'Step.solvea{0}.Model.Sources = '.format(i+1) + str(peel_bin['names']) + '\\n',\n 'Step.solvea{0}.Model.Cache.Enable = T\\n'.format(i+1),\n 'Step.solvea{0}.Model.Beam.Enable = T\\n'.format(i+1),\n 'Step.solvea{0}.Model.Beam.Mode = ARRAY_FACTOR\\n'.format(i+1),\n 'Step.solvea{0}.Model.Phasors.Enable = T\\n'.format(i+1),\n 'Step.solvea{0}.Solve.Mode = COMPLEX\\n'.format(i+1),\n 'Step.solvea{0}.Model.ScalarPhase.Enable= T\\n'.format(i+1),\n 'Step.solvea{0}.Model.DirectionalGain.Enable = T\\n'.format(i+1),\n 'Step.solvea{0}.Solve.Parms = [\"DirectionalGain:0:0:Ampl:*\",'\n '\"DirectionalGain:1:1:Ampl:*\"]\\n'.format(i+1),\n 'Step.solvea{0}.Solve.CellSize.Freq = 0\\n'.format(i+1),\n 'Step.solvea{0}.Solve.CellSize.Time = {1}\\n'.format(i+1, int(sol_int_amp)),\n 'Step.solvea{0}.Solve.CellChunkSize = {1}\\n'.format(i+1, int(sol_int_amp)),\n 'Step.solvea{0}.Solve.Options.MaxIter = 50\\n'.format(i+1),\n 'Step.solvea{0}.Solve.Options.EpsValue = 1e-9\\n'.format(i+1),\n 'Step.solvea{0}.Solve.Options.EpsDerivative = 1e-9\\n'.format(i+1),\n 'Step.solvea{0}.Solve.Options.ColFactor = 1e-9\\n'.format(i+1),\n 'Step.solvea{0}.Solve.Options.LMFactor = 1.0\\n'.format(i+1),\n 'Step.solvea{0}.Solve.Options.BalancedEqs = F\\n'.format(i+1),\n 'Step.solvea{0}.Solve.Options.UseSVD = T\\n'.format(i+1),\n 'Step.solvea{0}.Solve.UVRange = [250]\\n'.format(i+1)]\n\n # Subtract sources in current bin\n if i < nbins - 1:\n newlines += ['\\n',\n 'Step.subtract{0}.Operation = SUBTRACT\\n'.format(i+1),\n 'Step.subtract{0}.Model.Sources = '.format(i+1) + str(peel_bin['names']) + '\\n',\n 'Step.subtract{0}.Model.Beam.Enable = T\\n'.format(i+1)]\n if scalar_phase:\n newlines += ['Step.subtract{0}.Model.ScalarPhase.Enable = T\\n'.format(i+1)]\n if not scalar_phase or not phase_only:\n newlines += ['Step.subtract{0}.Model.DirectionalGain.Enable = T\\n'.format(i+1)]\n newlines += ['Step.subtract{0}.Model.Beam.Mode = ARRAY_FACTOR\\n'.format(i+1),\n '\\n']\n\n f = open(parset, 'w')\n f.writelines(newlines)\n f.close()",
"def sizeMission(self):\n v = self.vconfig\n m = self.mconfig\n steps = 0\n GWmin = v['Simulation']['GWMin']\n GWmax = v['Simulation']['GWmax']\n viableCandidate = False\n goodAbove = False\n GW = (GWmax + GWmin) / 2\n # http://www.youtube.com/watch?v=Xs_OacEq2Sk\n choppah = Vehicle(v, m, GW, self.airfoildata_mainRotor,self.Master)\n choppah.flyMission()\n while steps<choppah.vconfig['Simulation']['MaxSteps'] and (GWmax-GWmin)>choppah.vconfig['Simulation']['GWTolerance']:\n # Depending on whether we're oversized or undersized for the mission, adjust our GW limits accordingly\n if choppah.vconfig['Sizing Results']['CouldTrim']:\n if choppah.misSize > 0: # we can trim and we're too big\n GWmax = GW\n goodAbove = choppah\n viableCandidate = True\n else: # we can trim and we're too small\n GWmin = GW\n else: # if we can't trim the current candidate\n if goodAbove: # we can't trim but we could when we were heavier\n GWmin = GW\n else: # we can't trim and we never could\n GWmax = GW\n GW = (GWmax - GWmin) / 2 + GWmin\n choppah = Vehicle(v, m, GW, self.airfoildata_mainRotor,self.Master)\n choppah.flyMission()\n steps += 1\n\n if self.debug:\n couldTrim = choppah.vconfig['Sizing Results']['CouldTrim']\n couldMission = choppah.misSize > 0\n ms = 99999999999999 if math.isnan(choppah.misSize) else choppah.misSize\n gA = goodAbove is not False\n gAW = goodAbove.vconfig['Sizing Results']['GrossWeight'] if goodAbove else -999999999\n pvar(locals(), ('steps', 'GWmax', 'GWmin', 'couldTrim', 'couldMission'))\n stopReason = ''\n goodRun = False\n# if not (choppah.vconfig['Sizing Results']['CouldTrim'] and choppah.misSize>0):\n# choppah = goodAbove\n if choppah:\n if not choppah.vconfig['Sizing Results']['CouldTrim']:\n stopReason = 'Cound not trim at all conditions at any mission-capable weight'\n elif choppah.vconfig['Weights']['MaxAvailableFuelWeight'] < 0:\n stopReason = 'Negative calculated max fuel weight'\n elif steps >= choppah.vconfig['Simulation']['MaxSteps']:\n stopReason = 'MaxSteps reached before convergance. Stopped with bounds: %f to %f' % (GWmin, GWmax)\n elif (GWmax-GWmin <= choppah.vconfig['Simulation']['GWTolerance']):\n stopReason = 'Converged to within specified tolerances'\n goodRun = True\n else:\n stopReason = 'Stopped with some other reason'\n choppah.vconfig['Sizing Results']['StopReason'] = stopReason\n choppah.vconfig['Sizing Results']['GoodRun'] = goodRun\n if goodRun:\n choppah.vconfig['Sizing Results']['SizedWeightFound'] = True\n choppah.vconfig['Sizing Results']['SizedGrossWeight'] = GW\n else:\n choppah.vconfig['Sizing Results']['SizedWeightFound'] = False\n choppah.vconfig['Sizing Results']['SizedGrossWeight'] = float('nan')\n if self.debug: print('SizedWeightFound: %s %s' % (goodRun, stopReason))\n if self.writeOutput: choppah.write()\n return choppah",
"def do_fit(self):\n\n if (self._flag == 1):\n self._gf = [0.2]\n self._gf = self.par*(self._num_fu*len(self._sites)*2)\n x, F = self.read_from_file(\n self._sn, self._qn, self._path) # read data from the file\n # ,ftol=1.0e-7,xtol=1.0e-8)\n popt, pcov = curve_fit(\n self.modelfun, x, F, p0=self._gf, maxfev=5000)\n self._gf = popt\n\n elif (self._flag == 2):\n\n# par=[0.0]*(self._num_fu*5)\n# for j in range(self._num_fu):\n# par[j*5]=0.0*math.copysign(1,(pow(-1,j)))\n# self._gf[j*5]=0.1\n# par[j*5+1]=6.45\n# par[j*5+2]=0.0\n# par[j*5+3]=0.05\n# par[j*5+4]=1.0\n\n X, F = self.read_from_file(self._sn, self._qn, self._path) # read data from the file\n\n# height, xx, width=self.moments(F)\n# Tracer()()\n# par=[0.0]*(self._num_fu*5)\n# for j in range(self._num_fu):\n# par[j*5]=x[0,xx]\n# par[j*5]=X[0,xx]*math.copysign(1,(pow(-1,j)))\n# par[j*5+1]=X[1,xx]\n# par[j*5+2]=X[2,xx]\n# par[j*5+3]=0.007\n# par[j*5+4]=height*math.copysign(1,(pow(-1,j)))\n\n xi, yi, zi = np.mgrid[-6.5:6.5:160j, 4.0:8.9:160j, -7.5:7.5:160j]\n x, y, z = xi.flatten(), yi.flatten(), zi.flatten()\n XX = np.vstack((x, y, z))\n\n invdisttree = Invdisttree(X.T, F, leafsize=10, stat=1)\n AA = invdisttree(XX.T, nnear=130, eps=0, p=1)\n\n# aaa1,bbb1=self.detect_local_minima(-AA.reshape(xi.shape))\n# aaa2,bbb2=self.detect_local_maxima(-AA.reshape(xi.shape))\n if self.peaks==[]:\n print('\\n---------------------------------------------------------------------')\n print('Detecting maxima and minima of target function...',)\n\n peaks_min, min_coord, peaks_max, max_coord = self.detect_min_max(AA.reshape(xi.shape))\n print('done')\n print('Number of the min peaks: {}'.format(len(peaks_min)))\n print('Number of the max peaks: {}'.format(len(peaks_max)))\n print('---------------------------------------------------------------------\\n')\n # fig=plt.figure()\n # ax = fig.add_subplot(111, projection='3d')\n # ax.plot_surface(xi[:,:,60],yi[:,:,60],bbb2[:,:,60], cmap=cm.jet, linewidth=0.2)\n # plt.hold(True)\n # plt.show()\n\n if peaks_max==[]:\n peaks=np.insert(peaks_min, np.arange(len(peaks_max)), peaks_max)\n coords=np.insert(min_coord, np.arange(max_coord.shape[1]), max_coord, axis=1)\n else:\n peaks = np.insert(peaks_max, np.arange(len(peaks_min)), peaks_min)\n coords = np.insert(max_coord, np.arange(min_coord.shape[1]), min_coord, axis=1)\n\n self.peaks=peaks\n self.coords=coords\n\n par = [0.0]*(self._num_fu*5)\n j1 = 0\n aaaa = 1\n for j in range(self._num_fu):\n if (j > aaaa*self.coords.shape[1]-1):\n j1 = 0\n aaaa += 1\n par[j*5] = xi[self.coords[0, j1], self.coords[0, j1], self.coords[0, j1]]\n par[j*5+1] = yi[self.coords[1, j1], self.coords[1, j1], self.coords[1, j1]]\n par[j*5+2] = zi[self.coords[2, j1], self.coords[2, j1], self.coords[2, j1]]\n # par[j*5+3] = 0.1003+0.1000*math.copysign(1, (pow(-1, j)))\n par[j*5+3] = 0.0001\n# if j < 15:\n# par[j*5+3] = 0.00001\n# else:\n# par[j*5+3] = 0.0005\n par[j*5+4] = self.peaks[j1]\n# print(coords[0, j1], coords[1, j1], coords[2, j1])\n j1 += 1\n # popt, pcov = curve_fit(self.modelfun1, x[:,1:20000], F[1:20000],p0=par,maxfev=150000,xtol=1e-8,ftol=1e-8)\n popt, pcov = curve_fit(\n self.modelfun1, X, F, p0=par, maxfev=150000, xtol=1e-6,\n ftol=1e-8)\n # popt, pcov = curve_fit(self.modelfun1, XX, AA, p0=par)\n self._gf = popt\n# self.error=np.diagonal(pcov, offset=0)\n# print(pcov)\n else:\n # pass\n sys.exit(\"Wrong flag in do_fit\")",
"def fit_beam_and_baselines(self, pol='absI', circular_beam='auto', bl_degrees=(1, 3),\n refine_beam=True, spike_width=0, band=0):\n scan_coords = [scan.target_coords for scan in self.scans]\n scan_data = [remove_spikes(scan.pol(pol)[:, band], spike_width=spike_width) for scan in self.scans]\n # Auto beam shape picks a circular beam for dual-pol terms and elliptical beam for single-pol terms\n dual_pol = pol not in ('HH', 'VV')\n circular_beam = dual_pol if circular_beam == 'auto' else circular_beam\n # FWHM beamwidth (in radians) for uniformly illuminated circular dish is 1.03 lambda / D\n # FWHM beamwidth for Gaussian-tapered circular dish is 1.22 lambda / D\n # The antenna beamwidth factor is somewhere between 1.03 and 1.22\n ant = self.dataset.antenna\n expected_width = ant.beamwidth * katpoint.lightspeed / (self.dataset.freqs[band] * 1e6) / ant.diameter\n # An interferometer measures the beam voltage pattern while a single dish measures the beam power pattern\n # Since power = voltage ^ 2, a Gaussian voltage pattern ends up being sqrt(2) wider than the power pattern\n if self.dataset.antenna2:\n expected_width *= np.sqrt(2.0)\n if not circular_beam:\n expected_width = [expected_width, expected_width]\n # Degrees of freedom is time-bandwidth product (2 * BW * t_dump) of each sample\n dof = 2.0 * (self.dataset.bandwidths[band] * 1e6) / self.dataset.dump_rate\n # Stokes I etc has double the degrees of freedom, as it is the sum of the independent HH and VV samples\n dof = 2 * dof if dual_pol else dof\n # Refining the beam and baselines requires scan timestamps\n scan_timestamps = [scan.timestamps for scan in self.scans] if refine_beam else None\n # Fit beam and baselines directly to positive coherencies / Stokes params only, otherwise use I as proxy\n positive_pol = pol in ('absI', 'absHH', 'absVV', 'I', 'HH', 'VV', 'XX', 'YY')\n scan_total_power = [remove_spikes(scan.pol('absI')[:, band], spike_width=spike_width) for scan in self.scans] \\\n if not positive_pol else None\n logger.debug(\"Fitting beam and initial baseline of degree (%d, %d) to pol '%s' of target '%s':\" %\n (bl_degrees[0], bl_degrees[1], pol if positive_pol else 'absI', self.target.name))\n self.beam, baselines, self.baseline = fit_beam_and_baselines(scan_coords, scan_data, expected_width, dof,\n bl_degrees, scan_timestamps, scan_total_power)\n for scan, bl in zip(self.scans, baselines):\n scan.baseline = bl",
"def kernel_from_step_fit(tp_fit_res, kernel_length=60000):\n\n # TODO: give function the right argument, which is fit data\n # Extracting the fit results\n tau_1 = tp_fit_res.best_values['tau1']\n amp_1 = tp_fit_res.best_values['amp1']\n offset = tp_fit_res.best_values['offset']\n tau_2 = tp_fit_res.best_values['tau2']\n amp_2 = tp_fit_res.best_values['amp2']\n offset = tp_fit_res.best_values['offset']\n tau_3 = tp_fit_res.best_values['tau3']\n amp_3 = tp_fit_res.best_values['amp3']\n offset = tp_fit_res.best_values['offset']\n\n # These are the analytical expressions for the kernel corrections\n # to do a better mathematical formula for amp_kernel_1 and tau_kernel_1\n # to do offset appears in both values\n amp_kernel_1 = -amp_1/(1.+amp_1)\n tau_kernel_1 = tau_1*(1+amp_1)\n amp_kernel_2 = -amp_2/(1.+amp_2)\n tau_kernel_2 = tau_2*(1+amp_2)\n amp_kernel_3 = -amp_3/(1.+amp_3)\n tau_kernel_3 = tau_3*(1+amp_3)\n tpm_taus = [tau_kernel_1, tau_kernel_2, tau_kernel_3]\n tpm_amps = [amp_kernel_1, amp_kernel_2, amp_kernel_3]\n\n tau_idx = np.argmax(tpm_taus)\n tau_m = tpm_taus[tau_idx]\n amp_m = tpm_amps[tau_idx]\n t_kernel = np.arange(kernel_length)*1e-9\n # fit_kernel_step = (1. + offset*amp_kernel_1*np.exp(-t_kernel/tau_kernel_1)\n # + offset*amp_kernel_2*np.exp(-t_kernel/tau_kernel_2))/offset\n # + offset*amp_kernel_3*np.exp(-t_kernel/tau_kernel_3))/offset\n\n # it is good practice to correct only 1 order at a time\n fit_kernel_step = (1. + offset*amp_m*np.exp(-t_kernel/tau_m))/offset\n # FIXME -> we want to use the maximal tau here\n\n # calculates the response of the delta function based on the kernel for\n # the step\n fit_kernel = kf.kernel_from_kernel_stepvec(fit_kernel_step)\n\n return fit_kernel, fit_kernel_step, t_kernel",
"def _configure_constraint_introspection(phase):\n for constraint_type, constraints in [('initial', phase._initial_boundary_constraints),\n ('final', phase._final_boundary_constraints),\n ('path', phase._path_constraints)]:\n\n time_units = phase.time_options['units']\n time_name = phase.time_options['name']\n\n for con in constraints:\n # Determine the path to the variable which we will be constraining\n var = con['constraint_name'] if con['is_expr'] else con['name']\n var_type = phase.classify_var(var)\n\n if var != con['constraint_name'] is not None and var_type != 'ode':\n om.issue_warning(f\"Option 'constraint_name' on {constraint_type} constraint {var} is only \"\n f\"valid for ODE outputs. The option is being ignored.\", om.UnusedOptionWarning)\n\n if var_type == 't':\n con['shape'] = (1,)\n con['units'] = time_units if con['units'] is None else con['units']\n con['constraint_path'] = f'timeseries.{time_name}'\n\n elif var_type == 't_phase':\n con['shape'] = (1,)\n con['units'] = time_units if con['units'] is None else con['units']\n con['constraint_path'] = f'timeseries.{time_name}_phase'\n\n elif var_type == 'state':\n prefix = 'states:' if phase.timeseries_options['use_prefix'] else ''\n state_shape = phase.state_options[var]['shape']\n state_units = phase.state_options[var]['units']\n con['shape'] = state_shape\n con['units'] = state_units if con['units'] is None else con['units']\n con['constraint_path'] = f'timeseries.{prefix}{var}'\n\n elif var_type == 'parameter':\n param_shape = phase.parameter_options[var]['shape']\n param_units = phase.parameter_options[var]['units']\n con['shape'] = param_shape\n con['units'] = param_units if con['units'] is None else con['units']\n con['constraint_path'] = f'parameter_vals:{var}'\n\n elif var_type in ['indep_control', 'input_control']:\n prefix = 'controls:' if phase.timeseries_options['use_prefix'] else ''\n control_shape = phase.control_options[var]['shape']\n control_units = phase.control_options[var]['units']\n\n con['shape'] = control_shape\n con['units'] = control_units if con['units'] is None else con['units']\n con['constraint_path'] = f'timeseries.{prefix}{var}'\n\n elif var_type in ['indep_polynomial_control', 'input_polynomial_control']:\n prefix = 'polynomial_controls:' if phase.timeseries_options['use_prefix'] else ''\n control_shape = phase.polynomial_control_options[var]['shape']\n control_units = phase.polynomial_control_options[var]['units']\n con['shape'] = control_shape\n con['units'] = control_units if con['units'] is None else con['units']\n con['constraint_path'] = f'timeseries.{prefix}{var}'\n\n elif var_type == 'control_rate':\n prefix = 'control_rates:' if phase.timeseries_options['use_prefix'] else ''\n control_name = var[:-5]\n control_shape = phase.control_options[control_name]['shape']\n control_units = phase.control_options[control_name]['units']\n con['shape'] = control_shape\n con['units'] = get_rate_units(control_units, time_units, deriv=1) \\\n if con['units'] is None else con['units']\n con['constraint_path'] = f'timeseries.{prefix}{var}'\n\n elif var_type == 'control_rate2':\n prefix = 'control_rates:' if phase.timeseries_options['use_prefix'] else ''\n control_name = var[:-6]\n control_shape = phase.control_options[control_name]['shape']\n control_units = phase.control_options[control_name]['units']\n con['shape'] = control_shape\n con['units'] = get_rate_units(control_units, time_units, deriv=2) \\\n if con['units'] is None else con['units']\n con['constraint_path'] = f'timeseries.{prefix}{var}'\n\n elif var_type == 'polynomial_control_rate':\n prefix = 'polynomial_control_rates:' if phase.timeseries_options['use_prefix'] else ''\n control_name = var[:-5]\n control_shape = phase.polynomial_control_options[control_name]['shape']\n control_units = phase.polynomial_control_options[control_name]['units']\n con['shape'] = control_shape\n con['units'] = get_rate_units(control_units, time_units, deriv=1) \\\n if con['units'] is None else con['units']\n con['constraint_path'] = f'timeseries.{prefix}{var}'\n\n elif var_type == 'polynomial_control_rate2':\n prefix = 'polynomial_control_rates:' if phase.timeseries_options['use_prefix'] else ''\n control_name = var[:-6]\n control_shape = phase.polynomial_control_options[control_name]['shape']\n control_units = phase.polynomial_control_options[control_name]['units']\n con['shape'] = control_shape\n con['units'] = get_rate_units(control_units, time_units, deriv=2) \\\n if con['units'] is None else con['units']\n con['constraint_path'] = f'timeseries.{prefix}{var}'\n\n elif var_type == 'timeseries_exec_comp_output':\n con['shape'] = (1,)\n con['units'] = None\n con['constraint_path'] = f'timeseries.timeseries_exec_comp.{var}'\n\n else:\n # Failed to find variable, assume it is in the ODE. This requires introspection.\n ode = phase.options['transcription']._get_ode(phase)\n\n meta = get_source_metadata(ode, src=var, user_units=con['units'], user_shape=con['shape'])\n\n con['shape'] = meta['shape']\n con['units'] = meta['units']\n con['constraint_path'] = f'timeseries.{con[\"constraint_name\"]}'",
"def reset_parameters(self, p: Dict[str, ArrayType]):\n super().reset_parameters(p)\n if self.method == \"trust-constr\":\n if self.opt.nk:\n self._constraints[\"k\"].A = csc_matrix(self.opt.M(self.p).toarray())\n self._constraints[\"k\"].lb = -self.opt.c(self.p).toarray().flatten()\n if self.opt.na:\n eq = -self.opt.b(self.p).toarray().flatten()\n self._constraints[\"a\"].A = csc_matrix(self.opt.A(self.p).toarray())\n self._constraints[\"a\"].lb = eq\n self._constraints[\"a\"].ub = eq\n if self._constraints:\n self.minimize_input[\"constraints\"] = list(self._constraints.values())",
"def partial_fit(self, GRFData):\n\n self.__is_valid_dict(GRFData)\n for component in self.comp_list:\n self.scaler[component].partial_fit(np.reshape(GRFData[component], (-1, 1)))\n self.isFitted = True",
"def sweep(dir_out,box,channel,width,delay,scope,min_volt=None):\n print '____________________________'\n print width\n\n #fixed options\n height = 16383 \n fibre_delay = 0\n trigger_delay = 0\n pulse_number = 100\n #first select the correct channel and provide settings\n logical_channel = (box-1)*8 + channel\n \n sc.select_channel(logical_channel)\n sc.set_pulse_width(width)\n sc.set_pulse_height(16383)\n sc.set_pulse_number(pulse_number)\n sc.set_pulse_delay(delay)\n sc.set_fibre_delay(fibre_delay)\n sc.set_trigger_delay(trigger_delay)\n \n # first, run a single acquisition with a forced trigger, effectively to clear the waveform\n scope._connection.send(\"trigger:state ready\")\n time.sleep(0.1)\n scope._connection.send(\"trigger force\")\n time.sleep(0.1)\n\n # Get pin read\n time.sleep(0.1)\n sc.fire_sequence() # previously fire_sequence!\n #wait for the sequence to end\n tsleep = pulse_number * (delay*1e-3 + 210e-6)\n time.sleep(tsleep) #add the offset in\n pin = None\n # while not comms_flags.valid_pin(pin,channel):\n while pin==None:\n pin,rms, _ = sc.tmp_read_rms()\n print \"PIN (sweep):\",pin[logical_channel], rms[logical_channel]\n sc.stop()\n\n # File system stuff\n check_dir(\"%s/raw_data/\" % (dir_out))\n directory = check_dir(\"%s/raw_data/Channel_%02d/\" % (dir_out,logical_channel))\n fname = \"%sWidth%05d\" % (directory,width)\n \n # Check scope\n ck = find_and_set_scope_y_scale(1,height,width,delay,scope,scaleGuess=min_volt)\n\n if ck == True:\n print \"Saving raw files to: %s...\" % fname\n sc.fire_continuous()\n time.sleep(0.2)\n save_ck = save_scopeTraces(fname, scope, 1, 100)\n sc.stop()\n if save_ck == True:\n # Calc and return params\n x,y = calc.readPickleChannel(fname, 1)\n results = calc.dictionary_of_params(x,y)\n results[\"pin\"] = pin[logical_channel]\n results[\"pin error\"] = rms[logical_channel]\n calc.printParamsDict(results, width)\n calc.plot_eg_pulses(x,y,10, fname='%s/LastMeasuredPulses.png' % dir_out.split(\"/\")[0])\n #os.system(\"open %s/LastMeasuredPulses.png\" % dir_out.split(\"/\")[0])\n elif save_ck == False:\n results = return_zero_result()\n results['pin'] = pin[logical_channel]\n else: \n results = return_zero_result()\n results['pin'] = pin[logical_channel]\n results[\"pin error\"] = rms[logical_channel]\n sc.stop()\n return results",
"def fitRigidSize(data, target, x0=None, xtol=1e-3, maxfev=0, verbose=0):\n if x0 == None:\n x0 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]\n\n def obj(x):\n dataT = transformRigidSize3D(data, x)\n d = ( ( dataT - target ) ** 2.0 ).sum(1)\n return d\n\n x0 = scipy.array(x0)\n if verbose:\n rms0 = scipy.sqrt(obj(x0).mean())\n print 'initial RMS:', rms0\n\n xOpt = leastsq(obj, x0, xtol=xtol, maxfev=maxfev)[0]\n\n if verbose:\n rmsOpt = scipy.sqrt(obj(xOpt).mean())\n print 'final RMS:', rmsOpt\n\n dataFitted = transformRigidSize3D(data, xOpt)\n return xOpt, dataFitted",
"def fit_edp(self):\n self.edp = minimize(self._residual_edp, self.edp_par)\n self._set_phase()",
"def setup_pwn(name,pwndata,phase, free_radius=5, tempdir=None, emin=1.0e2, emax=1.0e5,maxroi=10,model=None,**kwargs):\n sources=yaml.load(open(pwndata))\n\n catalog_name=sources[name]['catalog']\n ltcube=sources[name]['ltcube']\n pulsar_position=SkyDir(*sources[name]['dir'])\n ft2=sources[name]['ft2']\n ft1=sources[name]['ft1']\n\n # in case no list was passed\n if len(phase)==2 and isinstance(phase[0],numbers.Real) and \\\n isinstance(phase[1],numbers.Real):\n\n # write in case phase wraps around.\n if phase[0]>phase[1]:\n phase=[[phase[0],1.0],[0.0,phase[1]]]\n else:\n phase = [phase] \n\n phase_factor=get_phase_factor(phase)\n print \"phase\"\n print phase\n print \"phase_factor=%.2f\"%phase_factor\n\n catalog=FermiCatalog(e(\"$FERMI/catalogs/gll_psc_v02.fit\"),free_radius=free_radius)\n catalog_source=[i for i in catalog.get_sources(SkyDir(),180) if i.name==catalog_name][0]\n\n center=catalog_source.skydir\n\n if tempdir is None: tempdir=mkdtemp(prefix='/scratch/')\n\n binfile=j(tempdir,'binned_phased.fits')\n\n # apply phase cut to ft1 file\n phased_ft1 = j(tempdir,'ft1_phased.fits')\n phasetools.phase_cut(ft1,phased_ft1,phaseranges=phase)\n\n # create a temporary ltcube scaled by the phase factor\n# phased_ltcube=j(tempdir,'phased_ltcube.fits')\n# phase_ltcube(ltcube,phased_ltcube, phase=phase)\n phased_ltcube=ltcube\n from uw.like.pointspec import DataSpecification\n data_specification = DataSpecification(\n ft1files = phased_ft1,\n ft2files = ft2,\n ltcube = phased_ltcube,\n binfile = binfile)\n\n spectral_analysis = SpectralAnalysis(data_specification,\n binsperdec = 4,\n emin = 100,\n emax = 100000,\n irf = \"P6_V3_DIFFUSE\",\n roi_dir = center,\n maxROI = maxroi,\n minROI = maxroi)\n\n if model == None :\n roi=spectral_analysis.roi(\n roi_dir=center,\n diffuse_sources=get_default_diffuse(diffdir=e(\"$FERMI/diffuse\"),\n gfile=\"gll_iem_v02.fit\",\n ifile=\"isotropic_iem_v02.txt\"),\n catalogs = catalog,\n phase_factor = 1.0,\n fit_emin = [emin,emin],\n fit_emax = [emax,emax],\n **kwargs)\n else :\n roi=spectral_analysis.roi(\n roi_dir=center,\n xmlfile = model,\n phase_factor =1.0,\n fit_emin = [emin,emin],\n fit_emax = [emax,emax],\n **kwargs)\n\n print \"---------------------Energy range--------------------\"\n \n print \"emin=\"+str(roi.bands[0].emin)+\"\\n\"\n print \"emax=\"+str(roi.bands[len(roi.bands)-1].emax)+\"\\n\"\n \n\n # keep overall flux of catalog source,\n # but change the starting index to 2.\n roi.modify(which=catalog_name, name=name, index=2, \n keep_old_flux=True)\n\n return roi",
"def _redef_via_predef_eqn(self):\r\n time = self.current_T # + self.d_T\r\n\r\n self.Beta = (self.diff_scale * self.thermal_conductivity) / \\\r\n (self.convect_coeff) \r\n self.Epsilon = self.d_T * self.thermal_conductivity / \\\r\n (self.density * self.heat_capacity)\r\n\r\n # Source term.\r\n def F_func(elem, eta):\r\n x = elem.local_to_global(eta)\r\n F = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n F -= self.Epsilon * self.redef_F_laplacian(x[0], x[1], time)\r\n F += self.redef_dTdt(x[0], x[1], time) * self.d_T\r\n return elem.funcs(eta) * F\r\n\r\n self.vF_vect_vol = et.elems_2_array(self.mesh,\r\n F_func,\r\n self.node_map,\r\n gauss_mult=2) # Use double gp_1D\r\n\r\n # Boundary term.\r\n def f_func(elem, eta):\r\n n = elem.guess_normal_vector_global(eta)\r\n f = elem.eval_elem(self.node_map, self.lst_tmp, [eta])[0]\r\n x = elem.local_to_global(eta)\r\n # Evaluate our boundary term.\r\n f += self.Beta * self.redef_f_norm_grad(x[0], x[1], time, n)\r\n f += self.redef_dTdt(x[0], x[1], time) * self.d_T\r\n return elem.funcs(eta) * f\r\n\r\n self.vf_vect_bound = et.edge_2_array(self.mesh,\r\n \"Boundary\",\r\n f_func,\r\n self.node_map,\r\n gauss_mult=2)",
"def fit_clicked(self):\n variables = self.model.parameters['variables']\n bounds = {}\n guess = {}\n\n for var in variables:\n bounds[var] = (self.view.ui.table_var_map[var + 'min'].value(),\n self.view.ui.table_var_map[var + 'max'].value())\n guess[var] = self.view.ui.table_var_map[var + 'guess'].value()\n\n self.model.parameters['bounds'] = bounds\n self.model.parameters['guess'] = guess\n self.model.parameters['Norm'] = self.view.ui.radiobutton_Norm.isChecked()\n self.model.parameters['method'] = self.view.ui.combobox_Method.currentText()\n\n try:\n self.model.do_fit()\n except Exception as e:\n self.logger.error(e)",
"def _axis_fit(self, idx, func, p0, slice_list=None, bounds=None, fixed_param=None, verbose=False):\n # TODO inline comment of the function\n # Reduce the functions parameters according to the fixed_param\n reduced_p0 = p0\n reduced_bounds = bounds\n reduced_func = func\n # TODO optimize this part\n if type(fixed_param).__name__ != 'NoneType':\n def reduced_func(p, x, *args, **kwargs):\n p_new, j = [], 0\n for param_i, param_val in enumerate(p0):\n if not (param_i in fixed_param[0]):\n p_new += [p[j]]\n j += 1\n else:\n for value_i, value in enumerate(fixed_param[0]):\n if value == param_i:\n p_new += [fixed_param[1][value_i]]\n return func(p_new, x, *args, **kwargs)\n\n reduced_p0 = []\n for i, param in enumerate(p0):\n if not (i in fixed_param[0]):\n reduced_p0 += [param]\n\n reduced_bounds = [[], []]\n for i, param in enumerate(p0):\n if not (i in fixed_param[0]):\n reduced_bounds[0] += [bounds[0][i]]\n reduced_bounds[1] += [bounds[1][i]]\n reduced_bounds = tuple(reduced_bounds)\n # noinspection PyUnusedLocal\n fit_result = None\n if slice_list == [0, 0, 1] or self.data[idx][slice_list[0]:slice_list[1]:slice_list[2]].shape == 0 \\\n or np.any(np.isnan(reduced_p0)) \\\n or np.any(np.isnan(reduced_bounds[0])) or np.any(np.isnan(reduced_bounds[1])) \\\n or np.any(np.isnan(p0)):\n\n self.logger.debug('Bad inputs')\n fit_result = (np.ones((len(reduced_p0), 2)) * np.nan)\n ndof = (slice_list[1] - slice_list[0]) / slice_list[2] - len(reduced_p0)\n chi2 = np.nan\n else:\n if not slice_list:\n slice_list = [0, self.bin_centers.shape[0] - 1, 1]\n ndof = (slice_list[1] - slice_list[0]) / slice_list[2] - len(reduced_p0)\n chi2 = np.nan\n try:\n residual = lambda p, x, y, y_err: self._residual(reduced_func, p, x, y, y_err)\n out = scipy.optimize.least_squares(residual, reduced_p0, args=(\n self.bin_centers[slice_list[0]:slice_list[1]:slice_list[2]],\n self.data[idx][slice_list[0]:slice_list[1]:slice_list[2]],\n self.errors[idx][slice_list[0]:slice_list[1]:slice_list[2]]), bounds=reduced_bounds)\n # noinspection PyUnresolvedReferences\n val = out.x\n # noinspection PyUnresolvedReferences,PyUnresolvedReferences\n chi2 = np.sum(out.fun * out.fun)\n try:\n # noinspection PyUnresolvedReferences,PyUnresolvedReferences\n cov = np.sqrt(np.diag(inv(np.dot(out.jac.T, out.jac))))\n fit_result = np.append(val.reshape(val.shape + (1,)), cov.reshape(cov.shape + (1,)), axis=1)\n except np.linalg.linalg.LinAlgError as inst:\n self.logger.warning('Could not compute error in the fit of',idx)\n self.logger.debug(inst)\n fit_result = np.append(val.reshape(val.shape + (1,)), np.ones((len(reduced_p0), 1)) * np.nan,\n axis=1)\n\n except Exception as inst:\n self.logger.error('Could not fit index', idx)\n self.logger.error(inst)\n self.logger.debug('p0:', reduced_p0)\n self.logger.debug('bound min:', reduced_bounds[0])\n self.logger.debug('bound max:', reduced_bounds[1])\n fit_result = (np.ones((len(reduced_p0), 2)) * np.nan)\n\n # restore the fixed_params in the fit_result\n if type(fixed_param).__name__ != 'NoneType':\n for k, i in enumerate(fixed_param[0]):\n fit_result = np.insert(fit_result, int(i), [fixed_param[1][k], 0.], axis=0)\n return fit_result, chi2, ndof",
"def face_pressure(self, tunnel_diameter: float, cutting_length: int,\r\n rockmass_dict: dict) -> float:\r\n unit_weight = rockmass_dict['spec. weight [N/m³]']\r\n cohesion = rockmass_dict['cohesion [Pa]']\r\n friction_angle = rockmass_dict['friction angle [°]']\r\n term1 = (2+3*(cutting_length/tunnel_diameter)**(6*np.tan(np.radians(friction_angle))))/(18*np.tan(np.radians(friction_angle)))-0.05\r\n term2 = cohesion / np.tan(np.radians(friction_angle))\r\n pf = unit_weight * tunnel_diameter * term1 - term2\r\n\r\n return pf",
"def calc(self):\n\n # the following if query ensures that volume- and interaction-terms\n # are only calculated if tau > 0.\n # (to avoid nan-values from invalid function-evaluations)\n\n if self.V.tau.shape == (1,):\n Isurf = self.surface()\n # differentiation for non-existing canopy, as otherwise NAN values\n if self.V.tau > 0.:\n Ivol = self.volume()\n if self.int_Q is True:\n Iint = self.interaction()\n else:\n Iint = np.array([0.])\n else:\n Ivol = np.array([0.])\n Iint = np.array([0.])\n else:\n # calculate surface-term (valid for any tau-value)\n Isurf = self.surface()\n\n # store initial parameter-values\n old_t_0 = self.t_0\n old_p_0 = self.p_0\n old_t_ex = self.t_ex\n old_p_ex = self.p_ex\n\n old_tau = self.V._get_tau()\n old_omega = self.V._get_omega()\n old_NN = self.SRF._get_NormBRDF()\n\n # set mask for tau > 0.\n mask = old_tau > 0.\n valid_index = np.where(mask)\n inval_index = np.where(~mask)\n\n # set parameter-values to valid values for calculation\n self.t_0 = old_t_0[valid_index[0]]\n self.p_0 = old_p_0[valid_index[0]]\n self.t_ex = old_t_ex[valid_index[0]]\n self.p_ex = old_p_ex[valid_index[0]]\n\n # squeezing the arrays is necessary since the setter-function for\n # tau, omega and NormBRDF automatically adds an axis to the arrays!\n self.V.tau = np.squeeze(old_tau[valid_index[0]])\n if np.array(self.V.omega).size != 1:\n self.V.omega = np.squeeze(old_omega[valid_index[0]])\n if np.array(self.SRF.NormBRDF).size != 1:\n self.SRF.NormBRDF = np.squeeze(old_NN[valid_index[0]])\n\n # calculate volume and interaction term where tau-values are valid\n _Ivol = self.volume()\n if self.int_Q is True:\n _Iint = self.interaction()\n else:\n _Iint = np.full_like(self.t_0, 0.)\n\n # reset parameter values to old values\n self.t_0 = old_t_0\n self.p_0 = old_p_0\n self.t_ex = old_t_ex\n self.p_ex = old_p_ex\n\n # squeezing the arrays is necessary since the setter-function for\n # tau, omega and NormBRDF automatically add an axis to the arrays!\n self.V.tau = np.squeeze(old_tau)\n self.V.omega = np.squeeze(old_omega)\n self.SRF.NormBRDF = np.squeeze(old_NN)\n\n # combine calculated volume-contributions for valid tau-values\n # with zero-arrays for invalid tau-values\n Ivol = np.ones_like(self.t_0)\n Ivol[valid_index[0]] = _Ivol\n Ivol[inval_index[0]] = np.ones_like(Ivol[inval_index[0]]) * 0.\n\n # combine calculated interaction-contributions for valid tau-values\n # with zero-arrays for invalid tau-values\n if self.int_Q is True:\n Iint = np.ones_like(self.t_0)\n Iint[valid_index[0]] = _Iint\n Iint[inval_index[0]] = np.ones_like(Iint[inval_index[0]]) * 0.\n else:\n Iint = np.full_like(self.t_0, 0.)\n\n return Isurf + Ivol + Iint, Isurf, Ivol, Iint",
"def fit(self, X, Y, constraints=None, warm_start=False):\n\n\t\tif self.verbose:\n\t\t\tcvxopt.solvers.options['show_progress'] = False\n\n\t\tn_samples = len(X)\t\n\n\t\t#part of w is computed for easiness(useful for oneSlack formulation)\n\t\t#for i in xrange(self.n):\n\t\t#\ttempPsi = psi(X[i], Y[i], self.sizePsi)\n\t\t#\tself.w += np.transpose(tempPsi)\n\t\t#self.w = self.w/n\n\n\t\t#initialize all slack variables to zero first\n\t\tslacks = np.zeros(n_samples)\n\t\tw_components = [] #the alphas and deltaPsi's for all violated constraints are stored here\n\n\t\tif constraints is None:\n\t\t\tself.last_active = [[] for i in range(n_samples)]\n\t\t\tself.objective_curve = []\n\t\t\tself.primal_objective_curve = []\n\n\t\telse:\n\t\t\tobjective = self.solve_n_slack_qp(n_samples)\n\n\t\tif not warm_start:\n\t\t\tself.constraints = [[] for i in range(n_samples)]\n\t\t\tself.w_components = []\n\n\t\tfor iteration in xrange(self.max_iter):\n\t\t\tif self.verbose > 0:\n\t\t\t\tprint(\"iteration : %d\" %(iteration+1))\n\n\t\t\tself.w_changed = False\n\n\t\t\t#find most violated constraint\n\t\t\tfor i in xrange(self.n):\n\t\t\t\tybar, slack, max_loss, deltaPsi = self.find_most_violated_constraint_margin(X[i], Y[i])\n\n\t\t\t\t#print ybar, Y[i], slack, max_loss, np.dot(deltaPsi, self.tempw)\n\t\t\t\t#chumma = raw_input('wait for key press ')\n\t\t\t\t#check whether the constraint violation is more than the tolerance level\n\t\t\t\t#if yes add constraint to the working set\n\t\t\t\tif (max_loss-np.dot(deltaPsi, self.tempw)) > (slacks[i]+self.eps):\n\t\t\t\t\tself.constraints[i].append([ybar, slack, max_loss, deltaPsi])\n\t\t\t\t\tself.w_changed = True\n\t\t\t\t\tslacks[i] = slack\n\n\t\t\t\t\t#print ybar, Y[i], slack, max_loss, np.dot(deltaPsi, self.tempw), 'from if cond'\n\n\t\t\t\t\t#solve the QP for new alphas\n\t\t\t\t\tself.w_components.append([i, deltaPsi])\n\t\t\t\t\tself.losses.append(max_loss)\t\t\t\t\t\n\t\t\t\t\tself.solve_n_slack_qp(n_samples)\n\n\n\t\t\t\t\t#calculate tempw\n\t\t\t\t\tself.tempw = np.zeros((self.sizePsi, 1))\n\t\t\t\t\ttempPsi = np.vstack([item[1] for item in self.w_components])\n\t\t\t\t\ttempPsi = np.transpose(tempPsi)\n\t\t\t\t\ttempAlphas = np.array(self.alphas)\n\t\t\t\t\t\n\t\t\t\t\t#print deltaPsi\n\t\t\t\t\t#print tempAlphas\n\t\t\t\t\t#print tempPsi.shape\n\t\t\t\t\tself.tempw = np.sum(tempAlphas*tempPsi, axis=1)\n\t\t\t\t\tself.tempw = self.tempw.reshape(self.sizePsi,1)\n\t\t\t\t\t#print self.tempw.T\n\n\t\t\t\n\t\t\t#if no constraints are added stop the optimization process\n\t\t\tif self.w_changed == False:\n\t\t\t\tbreak\n\n\t\tprint('No. of iterations taken :%d\\n' %(iteration+1))",
"def PGD(Params, relaxationVars, fixedBs, fixedTs, data):\n Tol = Params[\"tol\"]\n TolCD = Params[\"tolCD\"]\n Lambda0 = Params[\"Lambda\"]\n Lambda1 = Params[\"alpha\"] * Lambda0\n M = Params[\"M\"]\n y = data.ycentered # data.y - data.ybar\n\n Bindices = relaxationVars.BActive.copy() # list\n Tindices = relaxationVars.TActive.copy() # list of tuples (i,j)\n currentB, currentT = relaxationVars.initialSol.ToArray(Bindices, Tindices)\n fixedB = fixedBs.copy() # Dict. key = index, value = 0 or 1 (no index if not fixed)\n fixedT = fixedTs.copy() # Dict. key = (i,j), value = 0 or 1 (no index if not fixed)\n DualInitial = relaxationVars.useDual\n\n # Store the index mappings\n Bmap = {} # Bmap[i] = index of i in currentB or XB\n for i in range(len(Bindices)):\n Bmap[Bindices[i]] = i\n\n Tmap = {} # Tmap[(i,j)] = index of interaction in XT and currentT\n for i in range(len(Tindices)):\n c1, c2 = Tindices[i]\n Tmap[(c1, c2)] = i\n Tmap[(c2, c1)] = i\n\n # Next: Some sanity checks (those can be removed if we're carful about the\n # inputs)\n\n # Make sure if B_i is fixed to 0 then all T_{ij}'s (in Tindices) are also\n # fixed to zero\n for i, val in fixedB.items():\n if val == 0:\n for l, j in Tmap:\n if l < j and (l == i or j == i):\n fixedT[(l, j)] = 0\n\n # Make sure if T_{ij} is fixed to 1 then both B_i and B_j are fixed to 1\n for key, val in fixedT.items():\n if val == 1:\n i, j = key\n fixedB[i] = 1\n fixedB[j] = 1\n\n # Delete from Bindices and Tindices all the indices s.t. z_i = 0 / z_{ij}\n # = 0\n Bzeros = []\n for i, val in fixedB.items():\n if val == 0:\n Bzeros.append(Bmap[i])\n for i in sorted(Bzeros, reverse=True):\n del Bindices[i]\n currentB = np.delete(currentB, Bzeros)\n\n Tzeros = []\n for key, val in fixedT.items():\n if val == 0:\n Tzeros.append(Tmap[key])\n for i in sorted(Tzeros, reverse=True):\n del Tindices[i]\n currentT = np.delete(currentT, Tzeros)\n\n # Update the index mappings\n Bmap = {} # Bmap[i] = index of i in currentB or XB\n for i in range(len(Bindices)):\n Bmap[Bindices[i]] = i\n\n Tmap = {} # Tmap[(i,j)] = index of interaction in XT and currentT\n for i in range(len(Tindices)):\n c1, c2 = Tindices[i]\n Tmap[(c1, c2)] = i\n Tmap[(c2, c1)] = i\n\n # End of sanity checks\n\n # Retrive the matrices of the optimization variables\n # Later: We can store the centered columns (but this will require twice\n # the memory)\n XB, XT = data.Retrieve(Bindices, Tindices)\n XBMean = XB.mean(axis=0)\n XB = XB - XBMean\n XTMean = XT.mean(axis=0)\n XT = XT - XTMean\n\n Bfree = [i for i in Bindices if i not in fixedB]\n Tfree = [(i, j) for i, j in Tmap if i < j and (i, j) not in fixedT]\n TfreeIndices = [Tmap[(i, j)]\n for i, j in Tmap if i < j and (i, j) not in fixedT]\n lenFixedB = len(Bindices) - len(Bfree)\n lenFixedT = len([key for key in fixedT if fixedT[key] == 1])\n\n # (Dual) Block CD Variables\n u = defaultdict(float)\n w = defaultdict(dict)\n if not DualInitial:\n for i in Bindices:\n u[i] = 0\n for pair in Tmap:\n i, j = pair\n w[i][j] = 0\n else:\n for i in Bindices:\n if i in relaxationVars.u and i not in fixedB:\n u[i] = relaxationVars.u[i]\n else:\n u[i] = 0\n for i, j in Tmap:\n if j in relaxationVars.w[i] and (min(i, j), max(\n i, j)) not in fixedT and i not in fixedB and j not in fixedB:\n w[i][j] = relaxationVars.w[i][j]\n else:\n # Important: we need w[i][j] = 0 if T_{ij} if fixed (this is\n # due to the thresholding function)\n w[i][j] = 0\n\n sortedIndices = {i: sorted(w[i]) for i in w}\n sortedIndices = defaultdict(list, sortedIndices)\n\n # Prepare all the fixed matrices/vectors required for grad evaluation\n # later.\n XBty = np.dot(XB.T, y)\n XBtXB = np.dot(XB.T, XB)\n XTty = np.dot(XT.T, y)\n XTtXT = np.dot(XT.T, XT)\n XBtXT = np.dot(XB.T, XT)\n\n # Compute the lipschitz constant of the grad.\n Xfull = np.hstack((XB, XT))\n if Xfull.shape[1] != 0:\n eigvals, v = np.linalg.eig(np.dot(Xfull.T, Xfull))\n L = np.max(np.real(eigvals))\n else:\n L = 1 # any value here should suffice - it's not used.\n\n # Compute the lipschitz constants for BCD.\n LCD = {}\n for i in Bindices:\n LCD[i] = (len(w[i]) + 1) * ((Lambda0**2) / (L * M**2))\n\n # Define the thresholding constants\n frac = Lambda0 / (M * L)\n Mpfrac = M + frac\n frac1 = Lambda1 / (M * L)\n Mpfrac1 = M + frac1\n fracsqL = frac * frac * L\n LambdaovM = Lambda0 / M\n Lambda1ovM = Lambda1 / M\n Lambda1ovLambda0 = Lambda1 / Lambda0\n\n start = time.time()\n\n oldObj = math.inf\n for it in range(5000):\n grad_B = - XBty + np.dot(XBtXB, currentB) + np.dot(XBtXT, currentT)\n grad_T = - XTty + np.dot(XTtXT, currentT) + np.dot(XBtXT.T, currentB)\n Bstar = currentB - grad_B / L\n Tstar = currentT - grad_T / L\n # Iterate over the blocks, running dual BCD.\n # We employ dual warm starts by using the same (u,w) across the PGD updates.\n CDPrevObj = -math.inf\n LCDCurrent = copy(LCD)\n useZeroSuffCondition = True\n if useZeroSuffCondition:\n # Perform proximal screening below.\n zeroGroups = set()\n for i in Bfree:\n zeroSufficient = False\n cumsum = 0\n for j in w[i]:\n thrshld = max(\n (abs(Tstar[Tmap[(i, j)]]) / frac - Lambda1ovLambda0), 0)\n # Do feature level screening below.\n if thrshld == 0:\n # The initialization below ensures that \\theta_{ij} is\n # never updated by BCA.\n w[i][j] = 0\n w[j][i] = 0\n else:\n cumsum += thrshld\n\n if cumsum <= 1 - abs(Bstar[Bmap[i]]) / frac:\n zeroSufficient = True\n if zeroSufficient:\n u[i] = Bstar[Bmap[i]] / frac\n for j in w[i]:\n if abs(Tstar[Tmap[(i, j)]]) > frac1:\n w[i][j] = Tstar[Tmap[(\n i, j)]] / frac - Lambda1ovLambda0 * np.sign(Tstar[Tmap[(i, j)]])\n else:\n w[i][j] = 0\n w[j][i] = 0\n # Not nec. but can improve speed.\n LCDCurrent[j] -= (Lambda0**2) / (L * M**2)\n zeroGroups.add(i)\n\n BfreeMinusZeroGroups = [i for i in Bfree if i not in zeroGroups]\n CDObjConst = 0\n '''\n for i in zeroGroups:\n CDObjConst += q(u[i], Bstar[Bmap[i]], M, Lambda0, L,frac)\n for j in w[i]:\n if i < j:\n # T(wij, wji, thetaij, M, Lambda0, L, frac, frac1, Mpfrac1, LambdaovM, Lambda1ovM)\n CDObjConst += T(w[i][j], w[j][i], Tstar[Tmap[(i,j)]], M, Lambda0, L,frac, frac1, Mpfrac1, LambdaovM, Lambda1ovM)\n '''\n ####\n else:\n zeroGroups = set()\n CDObjConst = 0\n BfreeMinusZeroGroups = Bfree\n # To Turn the part above off, comment it out and set the following:\n # zeroGroups = set()\n # CDObjConst = 0\n # BfreeMinusZeroGroups = Bfree\n\n for innerit in range(10000):\n # for i in Bfree:\n for i in BfreeMinusZeroGroups:\n # First, Calculate utilde and wtilde for ith block\n utilde = u[i] + delq(u[i],\n Bstar[Bmap[i]],\n M,\n Lambda0,\n L,\n frac,\n Mpfrac,\n fracsqL,\n LambdaovM) / LCDCurrent[i]\n\n #wtilde = {}\n # for j in w[i]:\n # if B_j is fixed to 1, then we already set w[j][i] = 0\n # wtilde[j] = w[i][j] + delT(w[i][j], w[j][i], Tstar[Tmap[(i,j)]], M, Lambda0, L,frac, Mpfrac, fracsqL, LambdaovM)/LCD[i]\n sortedIndicesi = sortedIndices[i]\n # delT(wij, wji, thetaij, M, Lambda0, L, frac, frac1, Mpfrac1, LambdaovM)\n wtilde = [w[i][j] + delT(w[i][j],\n w[j][i],\n Tstar[Tmap[(i,\n j)]],\n M,\n Lambda0,\n L,\n frac,\n frac1,\n Mpfrac1,\n LambdaovM) / LCDCurrent[i] for j in sortedIndicesi]\n\n x = np.empty(shape=len(wtilde) + 1)\n # Solve the l1 projection problem.\n x[0] = utilde\n x[1:] = np.array(wtilde)\n projection = project(x)\n # Update the solution.\n u[i] = projection[0]\n # for j in range(len(w[i])):\n # w[i][sortedIndicesi[j]] = projection[j+1] ## +1 since u[i] is\n # first\n for counter, j in enumerate(sortedIndicesi):\n w[i][j] = projection[counter + 1]\n # Calculate the current objective\n CDObj = CDObjConst # 0\n for i in BfreeMinusZeroGroups: # Bfree:\n CDObj += q(u[i], Bstar[Bmap[i]], M, Lambda0, L, frac)\n for j in w[i]:\n if i < j:\n # T(wij, wji, thetaij, M, Lambda0, L, frac, frac1, Mpfrac1, LambdaovM, Lambda1ovM)\n CDObj += T(w[i][j], w[j][i], Tstar[Tmap[(i, j)]], M,\n Lambda0, L, frac, frac1, Mpfrac1, LambdaovM, Lambda1ovM)\n #Params[\"print\"](\"Inner obj: \", CDObj)\n if terminate(CDPrevObj, CDObj, TolCD):\n break\n CDPrevObj = CDObj\n\n # Get back the primal solution.\n for i in range(len(Bindices)):\n # if Bindices[i] is fixed to 1, then u[Bindices[i]] = 0 and the\n # update below will lead to currentB[i] = Bstar[i] (or +- M)\n if Bindices[i] not in zeroGroups:\n # assuming Bindices is sorted\n currentB[i] = dualtoprimalu(\n u[Bindices[i]], Bstar[i], M, Lambda0, L, frac)\n else:\n currentB[i] = 0\n\n for i, j in Tmap:\n # if i or j is fixed, the corresponding w[i][j] will be zero, which\n # leads to the correct update.\n if i < j:\n if (i, j) in Tfree:\n # dualtoprimalw(wij, wji, thetaij, M, Lambda0, L, frac, frac1, Mpfrac1)\n if i in zeroGroups or j in zeroGroups:\n currentT[Tmap[(i, j)]] = 0\n else:\n currentT[Tmap[(i, j)]] = dualtoprimalw(\n w[i][j], w[j][i], Tstar[Tmap[(i, j)]], M, Lambda0, L, frac, frac1, Mpfrac1)\n else: # careful, this is the case when no thresholding should be applied\n coefficient = Tstar[Tmap[(i, j)]]\n if np.abs(coefficient) <= M:\n currentT[Tmap[(i, j)]] = coefficient\n else:\n currentT[Tmap[(i, j)]] = M * np.sign(coefficient)\n\n r = y - np.dot(XB, currentB) - np.dot(XT, currentT)\n\n maxterm = 0\n for i in range(len(currentB)):\n if Bindices[i] not in fixedB:\n maxtemp = np.abs(currentB[i])\n for j in w[Bindices[i]]:\n maxtemp = max(maxtemp, np.abs(\n currentT[Tmap[(Bindices[i], j)]]))\n maxterm += maxtemp\n l1norm = np.sum(np.abs(currentT[TfreeIndices]))\n # IMPORTANT: Avoid using lenFixed and lenFixedT here.....!!!!!! ####\n currentobjective = 0.5 * np.dot(r, r) + Lambda0 * (\n lenFixedB + lenFixedT) + (Lambda0 / M) * maxterm + (Lambda1 / M) * l1norm\n\n if currentobjective > oldObj:\n Params[\"print\"](\"Objective Increased!!!\")\n\n if terminate(oldObj, currentobjective, Tol):\n break\n\n oldObj = currentobjective\n Params[\"print\"](\"Iteration :\", it, \". Objective: \", currentobjective)\n\n end = time.time()\n Params[\"print\"](\"Time: \", end - start, \" seconds.\")\n\n # Check if any small values should be zero.\n # Start with more aggressive checks first.\n Trunc = False\n for epsilon in [0.01, 1e-3, 1e-4, 1e-5, 1e-6]:\n currentBtrunc = np.copy(currentB)\n currentTtrunc = np.copy(currentT)\n currentBSetToZero = np.nonzero(np.abs(currentB) < epsilon)[0]\n currentBtrunc[currentBSetToZero] = 0\n currentBSetToZeroPSet = set(currentBSetToZero)\n for (i, j) in Tmap:\n if Bmap[i] in currentBSetToZeroPSet or Bmap[j] in currentBSetToZeroPSet:\n currentTtrunc[Tmap[(i, j)]] = 0\n\n currentTtrunc[np.abs(currentT) < epsilon] = 0\n rtrunc = y - np.dot(XB, currentBtrunc) - np.dot(XT, currentTtrunc)\n maxterm = 0\n for i in range(len(currentBtrunc)):\n if Bindices[i] not in fixedB:\n maxtemp = np.abs(currentBtrunc[i])\n for j in w[Bindices[i]]:\n maxtemp = max(maxtemp, np.abs(\n currentTtrunc[Tmap[(Bindices[i], j)]]))\n maxterm += maxtemp\n l1norm = np.sum(np.abs(currentTtrunc[TfreeIndices]))\n objectivetrunc = 0.5 * np.dot(rtrunc, rtrunc) + Lambda0 * (\n lenFixedB + lenFixedT) + (Lambda0 / M) * maxterm + (Lambda1 / M) * l1norm\n\n Params[\"print\"](\n \"eps: \",\n epsilon,\n \" objectivetrunc: \",\n objectivetrunc,\n \" currentobjective: \",\n currentobjective)\n # 1.01 might be beneficial in some extreme cases where supp becomes\n # very large (but might also cause descent problems)\n if objectivetrunc <= currentobjective:\n '''\n currentB = currentBtrunc\n currentT = currentTtrunc\n r = rtrunc\n currentobjective = objectivetrunc\n '''\n Params[\"print\"](\"###CHANGE###\", \"eps: \", epsilon)\n Params[\"print\"](\"Final Objective :\", objectivetrunc)\n Trunc = True\n break\n\n integral = True\n\n for i in Bfree:\n zi = np.abs(currentB[Bmap[i]]) / M\n if zi > 0 and zi < 0.999:\n integral = False\n\n for i in TfreeIndices:\n zi = np.abs(currentT[i]) / M\n if zi > 0 and zi < 0.999:\n integral = False\n\n Bnnz = {key: currentB[Bmap[key]]\n for key in Bmap if currentB[Bmap[key]] != 0}\n Tnnz = {(i, j): currentT[Tmap[(i, j)]]\n for i, j in Tmap if i < j and currentT[Tmap[(i, j)]] != 0}\n intercept = data.ybar - np.dot(XBMean, currentB) - np.dot(XTMean, currentT)\n sol = Solution(Bnnz, Tnnz, intercept)\n\n if Trunc:\n BnnzTrunc = {key: currentBtrunc[Bmap[key]]\n for key in Bmap if currentBtrunc[Bmap[key]] != 0}\n TnnzTrunc = {(i, j): currentTtrunc[Tmap[(\n i, j)]] for i, j in Tmap if i < j and currentTtrunc[Tmap[(i, j)]] != 0}\n interceptTrunc = data.ybar - \\\n np.dot(XBMean, currentBtrunc) - np.dot(XTMean, currentTtrunc)\n solTrunc = Solution(BnnzTrunc, TnnzTrunc, interceptTrunc)\n else:\n BnnzTrunc = Bnnz\n TnnzTrunc = Tnnz\n interceptTrunc = intercept\n solTrunc = sol\n\n return (sol, solTrunc, currentobjective, integral, r, u, w)",
"def fit_final(\n self,\n r3=1 / 11, δ=0.04,\n trim_lower=0.01,\n trim_upper=0.99,\n maxiter=1000\n ):\n\n vec_coeffs_start_scaled = np.ones(self.index_ncoeffs.sum())\n self._set_constants_itrim(r3, δ, trim_lower, trim_upper)\n self.loglikeobs = self._loglikeobs_final\n print(\"Starting final fit.\")\n self.results_final_scaled = self.fit(\n start_params=vec_coeffs_start_scaled,\n method='bfgs',\n maxiter=maxiter,\n full_output=1,\n disp=1,\n callback=None,\n retall=1,\n gtol=1e-5\n )\n self.coeffs = self.coeffs_from_vec(\n self.results_final_scaled.params * self._coeffs_pilot_vec\n )\n self.coeffs_final = [self.coeffs[0].copy(), self.coeffs[1].copy()]\n self.index_final = self.get_index(self.coeffs_final)\n self.std_err_final = self.coeffs_from_vec(\n self.results_final_scaled.bse * np.abs(self._coeffs_pilot_vec)\n )"
] | [
"0.5658608",
"0.5568899",
"0.5530349",
"0.50715446",
"0.50218",
"0.5020094",
"0.4985122",
"0.49686703",
"0.49606922",
"0.49477375",
"0.49405155",
"0.49347383",
"0.49186304",
"0.49127778",
"0.4878627",
"0.4878266",
"0.4875737",
"0.4857548",
"0.48561665",
"0.48514897",
"0.48336968",
"0.48335448",
"0.48328254",
"0.47798565",
"0.47703275",
"0.47659212",
"0.47652456",
"0.47648576",
"0.47475067",
"0.474643"
] | 0.68589544 | 0 |
Calculates the structure factor associated with a field Here, the structure factor is basically the power spectral density of the field `scalar_field` normalized so that regridding or rescaling the field does not change the result. | def get_structure_factor(
scalar_field: ScalarField,
smoothing: Union[None, float, str] = "auto",
wave_numbers: Union[Sequence[float], str] = "auto",
add_zero: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
logger = logging.getLogger(__name__)
if not isinstance(scalar_field, ScalarField):
raise TypeError(
"Length scales can only be calculated for scalar "
f"fields, not {scalar_field.__class__.__name__}"
)
grid = scalar_field.grid
if not isinstance(grid, CartesianGridBase):
raise NotImplementedError(
"Structure factor can currently only be calculated for Cartesian grids"
)
if not all(grid.periodic):
logger.warning(
"Structure factor calculation assumes periodic boundary "
"conditions, but not all grid dimensions are periodic"
)
# do the n-dimensional Fourier transform and calculate the structure factor
f1 = np_fftn(scalar_field.data, norm="ortho").flat[1:]
flat_data = scalar_field.data.flat
sf = np.abs(f1) ** 2 / np.dot(flat_data, flat_data)
# an alternative calculation of the structure factor is
# f2 = np_ifftn(scalar_field.data, norm='ortho').flat[1:]
# sf = (f1 * f2).real
# sf /= (scalar_field.data**2).sum()
# but since this involves two FFT, it is probably slower
# determine the (squared) components of the wave vectors
k2s = [
np.fft.fftfreq(grid.shape[i], d=grid.discretization[i]) ** 2
for i in range(grid.dim)
]
# calculate the magnitude
k_mag = np.sqrt(reduce(np.add.outer, k2s)).flat[1:]
no_wavenumbers = wave_numbers is None or (
isinstance(wave_numbers, str) and wave_numbers == "auto"
)
if smoothing is not None and smoothing != "none":
# construct the smoothed function of the structure factor
if smoothing == "auto":
smoothing = k_mag.max() / 128
smoothing = float(smoothing) # type: ignore
sf_smooth = SmoothData1D(k_mag, sf, sigma=smoothing)
if no_wavenumbers:
# determine the wave numbers at which to evaluate it
k_min = 2 / grid.cuboid.size.max()
k_max = k_mag.max()
k_mag = np.linspace(k_min, k_max, 128)
else:
k_mag = np.array(wave_numbers)
# obtain the smoothed values at these points
sf = sf_smooth(k_mag)
elif not no_wavenumbers:
logger.warning(
"Argument `wave_numbers` is only used when `smoothing` is enabled."
)
if add_zero:
sf = np.r_[1, sf]
k_mag = np.r_[0, k_mag]
return k_mag, sf | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_length_scale(\n scalar_field: ScalarField,\n method: str = \"structure_factor_maximum\",\n full_output: bool = False,\n smoothing: Optional[float] = None,\n) -> Union[float, Tuple[float, Any]]:\n logger = logging.getLogger(__name__)\n\n if method == \"structure_factor_mean\" or method == \"structure_factor_average\":\n # calculate the structure factor\n k_mag, sf = get_structure_factor(scalar_field)\n length_scale = np.sum(sf) / np.sum(k_mag * sf)\n\n if full_output:\n return length_scale, sf\n\n elif method == \"structure_factor_maximum\" or method == \"structure_factor_peak\":\n # calculate the structure factor\n k_mag, sf = get_structure_factor(scalar_field, smoothing=None)\n\n # smooth the structure factor\n if smoothing is None:\n smoothing = 0.01 * scalar_field.grid.typical_discretization\n sf_smooth = SmoothData1D(k_mag, sf, sigma=smoothing)\n\n # find the maximum\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\n max_est = k_mag[np.argmax(sf)]\n bracket = np.array([0.2, 1, 5]) * max_est\n logger.debug(f\"Search maximum of structure factor in interval {bracket}\")\n try:\n result = optimize.minimize_scalar(\n lambda x: -sf_smooth(x), bracket=bracket\n )\n except Exception:\n logger.exception(\"Could not determine maximal structure factor\")\n length_scale = np.nan\n else:\n if not result.success:\n logger.warning(\n \"Maximization of structure factor resulted in the following \"\n f\"message: {result.message}\"\n )\n length_scale = 1 / result.x\n\n if full_output:\n return length_scale, sf_smooth\n\n else:\n raise ValueError(\n f\"Method {method} is not defined. Valid values are `structure_factor_mean` \"\n \"and `structure_factor_maximum`\"\n )\n\n # return only the length scale with out any additional information\n return length_scale # type: ignore",
"def field_strength_to_power_flux(field: float) -> float:\n\n power = np.float_power(np.abs(field), 2)\n power *= (0.5 * speed_of_light * epsilon_0)\n\n return power",
"def scalar_potential(field, coord_sys):\n\n # Check whether field is conservative\n if not is_conservative(field):\n raise ValueError(\"Field is not conservative\")\n if field == Vector.zero:\n return S.Zero\n # Express the field exntirely in coord_sys\n # Substitute coordinate variables also\n if not isinstance(coord_sys, CoordSys3D):\n raise TypeError(\"coord_sys must be a CoordSys3D\")\n field = express(field, coord_sys, variables=True)\n dimensions = coord_sys.base_vectors()\n scalars = coord_sys.base_scalars()\n # Calculate scalar potential function\n temp_function = integrate(field.dot(dimensions[0]), scalars[0])\n for i, dim in enumerate(dimensions[1:]):\n partial_diff = diff(temp_function, scalars[i + 1])\n partial_diff = field.dot(dim) - partial_diff\n temp_function += integrate(partial_diff, scalars[i + 1])\n return temp_function",
"def mag(field):\n return np.sqrt(np.sum(field**2, axis=0, keepdims=True))",
"def make_field(self):\n uniaxial = self.u[0]*self.u[1]*self.u[2] != 0\n cubic = self.c1[0]*self.c1[1]*self.c1[2]*self.c2[0]*self.c2[1]*self.c2[2] != 0\n @nb.njit\n def field_func(m):\n heff = self.hext + field.demagnetization(m, self.Nd)\n if uniaxial:\n heff += field.uniaxial_anisotropy(m, self.u, self.hu1, self.hu2)\n if cubic:\n heff += field.cubic_anisotropy(m, self.c1, self.c2, self.c3, self.hc1, self.hc2)\n return heff\n self.field = field_func",
"def filter_field(self, field, frac=0.25):\n dom = field.domain\n logger.info(\"filtering field {} with frac={} using a set-scales approach\".format(field.name,frac))\n orig_scale = field.meta[:]['scale']\n field.set_scales(frac, keep_data=True)\n field['c']\n field['g']\n field.set_scales(orig_scale, keep_data=True)",
"def make_field(self):\n def field_func(m):\n return self.hext + field.demagnetization(m, self.Nd)\n self.field = field_func",
"def __init__(self, field):\n ScalingFunctional.__init__(self, field, 1.0)",
"def make_field(field):\n\n if \"time\" in field:\n return TimeField(field)\n if \"zd\" in field:\n return RadianField(field)\n else:\n return SimpleField(field)",
"def frac_field(self, *gens):\n from sympy.polys.domains import FractionField\n return FractionField(self, *gens)",
"def scale(structure):\n from numpy.linalg import det\n if \"O\" in [atom.type for atom in structure]: spvol = 8.5**3/4e0\n elif \"Se\" in [atom.type for atom in structure]: spvol = 9.5**3/4e0\n elif \"Te\" in [atom.type for atom in structure]: spvol = 10.5**3/4e0\n else: raise ValueError(\"unknown atom.type: %s\" % (atom.type,))\n\n nfu = float(len(structure)/7)*0.5 # 0.5 because 2 f.u. in spinel unit-cell.\n vol = det(structure.cell)\n return (nfu * spvol / vol)**(1e0/3e0)",
"def field(self) -> 'outputs.PreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldField':\n return pulumi.get(self, \"field\")",
"def _get_FIELD_humanized_display(self, field):\n value = getattr(self, field.attname)\n if value is None:\n return\n power = max([i for i in utils.POWERS if value // i > 0 and i > 1])\n value /= power\n template = ''\n template += '{:.%sf}' % field.humanized_decimals\n template += ' ' if field.spaced_display else ''\n template += utils.POWERS[power]\n template += '{!s:s}'\n return template.format(value, field.unit)",
"def sqf_part(self, f):\n domain = self.domain\n\n if domain.is_FiniteField:\n g = self.one\n for f, _ in self.sqf_list(f)[1]:\n g *= f\n\n return g\n\n if not f:\n return f\n\n gcd = f\n for x in self.gens:\n gcd = self.gcd(gcd, f.diff(x))\n sqf = f // gcd\n\n if domain.is_Field:\n return sqf.monic()\n return sqf.primitive()[1]",
"def calc_power(field):\r\n\r\n poynt_in_points = 0.5*numpy.real(field.p * numpy.conj(field.vn))\r\n power = numpy.sum(poynt_in_points)\r\n power *= field.one_pixel_area\r\n return power",
"def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n return self._scalar * self._field.compute_magnetic_field(coords, params, basis)",
"def _structure_factor_wave_number(\n rdf: freud.density.RDF, wave_number: float, num_particles: int\n):\n dr = rdf.R[1] - rdf.R[0]\n integral = dr * np.sum((rdf.RDF - 1) * rdf.R * np.sin(wave_number * rdf.R))\n density = num_particles / rdf.box.volume\n return 1 + 4 * np.pi * density / wave_number * integral",
"def sample(field: Field, geometry: Geometry) -> math.Tensor:\n assert all(dim not in field.shape for dim in geometry.shape.channel)\n if isinstance(field, SampledField) and field.elements.shallow_equals(geometry) and not geometry.shape.channel:\n return field.values\n if geometry.shape.channel:\n sampled = [field._sample(p) for p in geometry.unstack(geometry.shape.channel.name)]\n return math.stack(sampled, geometry.shape.channel)\n else:\n return field._sample(geometry)",
"def scalar(\n self,\n ax=None,\n figsize=None,\n multiplier=None,\n filter_field=None,\n colorbar=True,\n colorbar_label=\"\",\n filename=None,\n symmetric_clim=False,\n **kwargs,\n ):\n if self.field.nvdim > 1:\n raise ValueError(f\"Cannot plot {self.field.nvdim=} field.\")\n\n ax = self._setup_axes(ax, figsize)\n\n multiplier = self._setup_multiplier(multiplier)\n extent = self._extent(multiplier)\n\n values = self.field.array.copy().reshape(self.field.mesh.n)\n\n if filter_field is None:\n filter_field = self.field._valid_as_field\n\n self._filter_values(filter_field, values)\n\n if symmetric_clim and \"clim\" not in kwargs.keys():\n vmin = np.min(values, where=~np.isnan(values), initial=0)\n vmax = np.max(values, where=~np.isnan(values), initial=0)\n vmax_abs = max(abs(vmin), abs(vmax))\n kwargs[\"clim\"] = (-vmax_abs, vmax_abs)\n\n cp = ax.imshow(np.transpose(values), origin=\"lower\", extent=extent, **kwargs)\n\n if colorbar:\n self._add_colorbar(ax, cp, colorbar_label)\n\n self._axis_labels(ax, multiplier)\n\n self._savefig(filename)",
"def _real_field(self):\n try:\n return self.__real_field\n except AttributeError:\n from .real_mpfr import RealField\n self.__real_field = RealField(self._prec)\n return self.__real_field",
"def _field_stat(self, field):\r\n if not field in self.stats:\r\n stat = dq.FieldStatistics(field, distinct_threshold = self.distinct_threshold)\r\n self.stats[field] = stat\r\n else:\r\n stat = self.stats[field]\r\n return stat",
"def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n if params is None:\n params = [None] * len(self._fields)\n if isinstance(params, dict):\n params = [params]\n B = 0\n for i, field in enumerate(self._fields):\n B += field.compute_magnetic_field(coords, params[i % len(params)], basis)\n return B",
"def read_field(self, fieldname):\n if fieldname in ['wind_speed', 'wind_direction']:\n # create a virtual field\n variable = Variable(\n shortname=fieldname,\n description=VIRTUALFIELD_DESCR[fieldname],\n authority=self.get_naming_authority(),\n standardname=VIRTUALFIELD_STDNAME[fieldname]\n )\n field = Field(\n variable,\n OrderedDict([('time', 1),\n ('y', self.get_dimsize('y')),\n ('x', self.get_dimsize('x'))\n ]),\n datatype=numpy.dtype(numpy.float32),\n units=VIRTUALFIELD_UNITS[fieldname]\n )\n field.attach_storage(self.get_field_handler(fieldname))\n else:\n field = NCFile.read_field(self, fieldname)\n return field",
"def reduce_sample(field: Field, geometry: Geometry, dim=channel('vector')) -> math.Tensor:\n if isinstance(field, SampledField) and field.elements.shallow_equals(geometry):\n return field.values\n if geometry.shape.channel: # Reduce this dimension\n assert geometry.shape.channel.rank == 1, \"Only single-dimension reduction supported.\"\n if field.shape.channel.volume > 1:\n assert field.shape.channel.volume == geometry.shape.channel.volume, f\"Cannot sample field with channels {field.shape.channel} at elements with channels {geometry.shape.channel}.\"\n components = unstack(field, field.shape.channel.name)\n sampled = [c._sample(p) for c, p in zip(components, geometry.unstack(geometry.shape.channel.name))]\n else:\n sampled = [field._sample(p) for p in geometry.unstack(geometry.shape.channel.name)]\n dim = dim._with_item_names(geometry.shape.channel.item_names)\n return math.stack(sampled, dim)\n else: # Nothing to reduce\n return field._sample(geometry)",
"def _classify_object_field(field: s_obj.Field[Any]) -> FieldStorage:\n\n ftype = field.type\n shadow_ptr_kind = None\n shadow_ptr_type = None\n fieldtype = FieldType.OTHER\n\n is_array = is_multiprop = False\n if issubclass(ftype, s_obj.MultiPropSet):\n is_multiprop = True\n ftype = ftype.type\n elif (\n issubclass(\n ftype,\n (checked.CheckedList, checked.FrozenCheckedList,\n checked.CheckedSet, checked.FrozenCheckedSet))\n and not issubclass(ftype, s_expr.ExpressionList)\n ):\n is_array = True\n ftype = ftype.type # type: ignore\n\n if issubclass(ftype, s_obj.ObjectCollection):\n ptr_kind = 'multi link'\n ptr_type = 'schema::Object'\n if issubclass(ftype, s_obj.ObjectDict):\n fieldtype = FieldType.OBJ_DICT\n\n elif issubclass(ftype, s_obj.Object):\n ptr_kind = 'link'\n ptr_type = f'schema::{ftype.__name__}'\n\n elif issubclass(ftype, s_expr.Expression):\n shadow_ptr_kind = 'property'\n shadow_ptr_type = 'tuple<text: str, refs: array<uuid>>'\n ptr_kind = 'property'\n ptr_type = 'str'\n fieldtype = FieldType.EXPR\n\n elif issubclass(ftype, s_expr.ExpressionList):\n shadow_ptr_kind = 'property'\n shadow_ptr_type = (\n 'array<tuple<text: str, refs: array<uuid>>>'\n )\n ptr_kind = 'property'\n ptr_type = 'array<str>'\n fieldtype = FieldType.EXPR_LIST\n\n elif issubclass(ftype, s_expr.ExpressionDict):\n shadow_ptr_kind = 'property'\n shadow_ptr_type = '''array<tuple<\n name: str,\n expr: tuple<text: str, refs: array<uuid>>\n >>'''\n ptr_kind = 'property'\n ptr_type = 'array<tuple<name: str, expr: str>>'\n fieldtype = FieldType.EXPR_DICT\n\n elif issubclass(ftype, collections.abc.Mapping):\n ptr_kind = 'property'\n ptr_type = 'json'\n\n elif issubclass(ftype, (str, sn.Name)):\n ptr_kind = 'property'\n ptr_type = 'str'\n\n if field.name == 'name':\n # TODO: consider shadow-reflecting names as tuples\n shadow_ptr_kind = 'property'\n shadow_ptr_type = 'str'\n\n elif issubclass(ftype, bool):\n ptr_kind = 'property'\n ptr_type = 'bool'\n\n elif issubclass(ftype, int):\n ptr_kind = 'property'\n ptr_type = 'int64'\n\n elif issubclass(ftype, uuid.UUID):\n ptr_kind = 'property'\n ptr_type = 'uuid'\n\n elif issubclass(ftype, verutils.Version):\n ptr_kind = 'property'\n ptr_type = '''\n tuple<\n major: std::int64,\n minor: std::int64,\n stage: sys::VersionStage,\n stage_no: std::int64,\n local: array<std::str>,\n >\n '''\n else:\n raise RuntimeError(\n f'no metaschema reflection for field {field.name} of type {ftype}'\n )\n\n if is_multiprop:\n ptr_kind = 'multi property'\n if is_array:\n ptr_type = f'array<{ptr_type}>'\n\n return FieldStorage(\n fieldtype=fieldtype,\n ptrkind=ptr_kind,\n ptrtype=ptr_type,\n shadow_ptrkind=shadow_ptr_kind,\n shadow_ptrtype=shadow_ptr_type,\n )",
"def StructureFactor(ID,f,hkl,z=None):\n ID=goodID(ID)\n i=complex(0,1)\n h=hkl[0]\n k=hkl[1]\n l=hkl[2]\n L=latticeType[ID]\n if L=='fcc':\n F=f*(1+np.exp(-i*np.pi*(k+l))+np.exp(-i*np.pi*(h+l))+np.exp(-i*np.pi*(h+k)))\n elif L=='bcc':\n F=f*(1+np.exp(-i*np.pi*(h+k+l))) \n elif L=='cubic':\n F=f\n elif L=='diamond':\n F=f*(1+np.exp(-i*np.pi*(k+l))+np.exp(-i*np.pi*(h+l))+np.exp(-i*np.pi*(h+k)))*(1+np.exp(-i*2*np.pi*(h/4.0+k/4.0+l/4.0)))\n # elif L=='rhomb':\n # z=latticeParamRhomb[ID]\n # F=f*(1+np.exp(2*i*np.pi*(h+k+l)*z)) \n elif L=='tetr':\n F=f\n elif L=='hcp':\n F=f*(1+np.exp(2*i*np.pi*(h/3.0+2*k/3.0+l/2.0)))\n else:\n raise Exception(f'Unrecognized L: {L}')\n return F",
"def magnetisation(field):\n norm_field = df.Field(field.mesh, dim=1, value=(field.norm.array != 0))\n volume = df.integral(norm_field * df.dV, direction='xyz')\n return df.integral(field * df.dV / volume, direction='xyz')",
"def readOFScalar(case_dir,str_scal,str_time):\n scalarFile = case_dir + \"/\" + str_time + \"/\" + str_scal\n fileCheck(scalarFile) # does the file exists ? Stop if not.\n #\n # Init list\n flagRead = 0\n dataScal = []\n field = 'none'\n #\n # Read File\n for line in fileinput.input(scalarFile):\n words = line.split()\n if words:\n if words[0]=='internalField' and field=='none':\n field = 'internalField'\n if words[0]=='boundaryField' and field=='internalField':\n field = 'boundaryField'\n if flagRead == 0 and field=='internalField':\n if words[0]=='(':\n flagRead = 1\n elif flagRead == 1 and field=='internalField':\n if words[0]==')':\n flagRead = 0\n else:\n datum = float(words[0])\n dataScal.append(datum)\n return dataScal",
"def test_entities__Entity__getField__3(entity_with_field, schemaized_field):\n assert (schemaized_field ==\n entity_with_field.getField(schemaized_field.__name__))",
"def plot_field(mut_sample, field, lm, th=0.75,\n names=['blue', 'green', 'orange', 'purple'], ax=None,\n image=None, grid_mm2=None, n_factors=None, n_wt=2, flip=False, scale=15):\n if image is None:\n image = mut_sample._scaffold_image\n\n if grid_mm2 is None:\n grid_mm2 = (mut_sample.get_img_size(mut_sample.image)[0] \\\n * pixel2um / field.shape[1]) ** 2 / 1e6\n\n if n_factors is None:\n n_factors = field.shape[-1]\n\n f = field.mean(0)\n l = lm.mean(0)\n\n fmap = (f[:, :, :n_factors - 2]).argmax(2)\n fn = (cv.blur(l, (3, 3)) / grid_mm2 < 300)\n if type(th) is not list:\n fn |= (f[:, :, n_factors - 2:]).sum(2) > th\n elif type(th) is list:\n for i, t in enumerate(th):\n fn[(f[:, :, :n_factors - 2]).argmax(2) == i] |= ((f[:, :, n_factors - 2:]).sum(2) > t)[\n (f[:, :, :n_factors - 2]).argmax(2) == i]\n c = [get_cmap(cmaps_global[n])(150) for n in names] + [(1, 1, 1, 1)] * n_wt\n\n img = image\n img = (img / img.max() * 255).astype(np.uint8)\n s = img.shape\n s = tuple([int(x) for x in list(s)[::-1]])\n p35, p90 = np.percentile(img, (35, 90))\n processed_img = exposure.rescale_intensity(img, in_range=(p35, p90))\n\n b = cv.resize(processed_img, s)[::-1, :] / 255.\n b = np.maximum(np.minimum(b, 1), 0)\n Fc = np.array([c[int(i)] for i in fmap.flatten()]).reshape((*fmap.shape, -1)).transpose((1, 0, 2))[::-1, :, :3]\n Fc[fn.T[::-1, :], :] = 1.0\n out = (cv.resize(Fc, s) * b.reshape(*b.shape, 1))\n if flip:\n out = out[::-1, :]\n\n if ax is not None:\n ax.imshow(out)\n ax.plot([s[0] * 0.95,\n s[0] * 0.95 - 2.5e3 / 0.325 / scale],\n [s[1] * (.95),\n s[1] * (.95)], color='white', lw=3)\n ax.set_axis_off()\n else:\n plt.imshow(out)\n plt.plot([s[0] * 0.95,\n s[0] * 0.95 - 2.5e3 / 0.325 / scale],\n [s[1] * (.95),\n s[1] * (.95)], color='white', lw=3)\n plt.axis('off')"
] | [
"0.5794057",
"0.5752112",
"0.575145",
"0.5655662",
"0.56215703",
"0.55155873",
"0.53893906",
"0.53778607",
"0.5346394",
"0.5300327",
"0.5237999",
"0.5237276",
"0.5218443",
"0.5210143",
"0.5189513",
"0.5166773",
"0.51101947",
"0.51071626",
"0.51015085",
"0.50777686",
"0.50722134",
"0.50594217",
"0.5021238",
"0.5016469",
"0.50158995",
"0.50128573",
"0.50114083",
"0.49822798",
"0.49818644",
"0.49810728"
] | 0.72728807 | 0 |
Create and return a stub test. | def CreateStubTest(phases=None, params=None): # pylint: disable=invalid-name
test_metadata = htftest.TestMetadata('foo')
# pylint: disable=protected-access
if params is not None:
test_metadata._parameter_list = (
parameters.TestParameterList(params.parameters))
return htftest.HTFTest(test_metadata, phases or []) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def CreateStubTest(phases=None): # pylint: disable=invalid-name\n test_metadata = phase_data.TestMetadata('foo')\n return phase_data.phase_data(test_metadata, phases or [])",
"def test_stub(self):\n pass",
"def create_stub(cls, proto_py_module, stub_name):\n\n return cls.create_stubs(proto_py_module, stub_name)",
"def test_stub() -> None:\n test_val = 3\n assert test_val == 3",
"def make_test_object(self):\n return self.orm_cls.testing_create()",
"def test_new(self):",
"def test_new(self):",
"def create_instance(test_id, config, args):\n return TestT1Detail(test_id, config, args)",
"def test_create(api: API):\n api.user.create.return_value = 123456\n account = Account(api, \"USERNAME\", \"PASSWORD\")\n api.user.create.assert_called_once()\n assert account.create()",
"def create_test_node():\n node = cmds.createNode(\"unknown\")\n _add_test_attrs_to_node(node)\n return node",
"def create_stubs(cls, proto_py_module, *stub_names):\n\n return cls(proto_py_module, *stub_names).stubs",
"def make_shell_test(name):\n test = Test(name)\n test.add_step(\"run\", step_run, checks=[\n check_retcode_zero,\n create_check_reference_output(name+\".ref\"),\n ], allow_retries=3)\n return test",
"def _new(self):\n return self.lib.iperf_new_test()",
"def test_dummy():",
"def file_factory(test_workspace):\n\n return FileCreator(test_workspace)",
"def test_create_run(self):\n pass",
"def create_test_service(context, **kw):\n service = get_test_service(context, **kw)\n service.create()\n return service",
"def generate_test_method(test_name):\n\n def run_test(self):\n # backup any existing files with our expected output_name\n output_name = \"{}.png\".format(test_name)\n backup_name = output_name + \".backup\"\n if os.path.isfile(output_name):\n os.rename(output_name, backup_name)\n self.addCleanup(cleanup_backup, backup_name, output_name)\n\n # run the test\n ret = subprocess.call(\"python {}.py\".format(test_name), shell=True)\n self.assertEqual(ret, 0)\n\n output_exists = os.path.isfile(output_name)\n if output_exists:\n self.addCleanup(cleanup_output, output_name)\n\n ps_output_name = \"{}.ps\".format(test_name)\n if os.path.isfile(ps_output_name):\n # some tests may also generate postscript files which need to be deleted\n self.addCleanup(cleanup_output, ps_output_name)\n\n self.assertTrue(output_exists)\n\n return run_test",
"def test_dummy_test():\n pass",
"def test_create(self):\n pass",
"def create_mock_api_factory(cls):\n mock_api, mock_scheduler_client = cls.create_mock_api()\n mock_api_factory = Mock()\n mock_api_factory.return_value = mock_api\n return mock_api_factory, mock_scheduler_client",
"def beta_create_GNMITest_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):\n request_serializers = {\n ('gnmitest.GNMITest', 'Run'): github_dot_com_dot_openconfig_dot_gnmitest_dot_proto_dot_suite_dot_suite__pb2.Suite.SerializeToString,\n }\n response_deserializers = {\n ('gnmitest.GNMITest', 'Run'): github_dot_com_dot_openconfig_dot_gnmitest_dot_proto_dot_report_dot_report__pb2.Report.FromString,\n }\n cardinalities = {\n 'Run': cardinality.Cardinality.UNARY_UNARY,\n }\n stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)\n return beta_implementations.dynamic_stub(channel, 'gnmitest.GNMITest', cardinalities, options=stub_options)",
"def test_create10(self):\n pass",
"def _create_stub(target, port):\n channel = gnmi_pb2_grpc.grpc.insecure_channel(target + ':' + port)\n return gnmi_pb2_grpc.gNMIStub(channel)",
"def create_access_test(fullpath):\n try:\n verify_test_exists(fullpath)\n except:\n add_test(fullpath)\n access_test(fullpath)",
"def test_create(session, client, jwt, desc, json_data, roles, status, has_account):\n current_app.config.update(PAYMENT_SVC_URL=MOCK_PAY_URL)\n current_app.config.update(AUTH_SVC_URL=MOCK_URL_NO_KEY)\n headers = None\n # setup\n if has_account and BCOL_HELP in roles:\n headers = create_header_account(jwt, roles, 'test-user', BCOL_HELP)\n elif has_account and GOV_ACCOUNT_ROLE in roles:\n headers = create_header_account(jwt, roles, 'test-user', '1234')\n elif has_account:\n headers = create_header_account(jwt, roles)\n else:\n headers = create_header(jwt, roles)\n\n # test\n response = client.post('/api/v1/financing-statements',\n json=json_data,\n headers=headers,\n content_type='application/json')\n\n # check\n assert response.status_code == status\n if response.status_code == HTTPStatus.CREATED:\n registration: Registration = Registration.find_by_registration_number(response.json['baseRegistrationNumber'],\n 'PS12345', True)\n assert registration.verification_report",
"def createMakingTest(tx, query, personId, testId, date, hour, result):\n tx.run(query, personId=personId, testId=testId, date=date, hour=hour, result=result)",
"def create_test_goal(context, **kw):\n goal = get_test_goal(context, **kw)\n goal.create()\n return goal",
"def __new__(cls, name, func_call, expect_dir=None, expect_base=None,\n ext='json', covers=None, breakpoints=None, break_funcs=()):\n breakpoints = breakpoints or []\n if not breakpoints or break_funcs:\n for f in break_funcs or (func_call.func,):\n if hasattr(f, 'im_func'):\n f = f.im_func\n breakpoints.append((f.func_code.co_filename,\n f.func_code.co_firstlineno,\n f.func_code.co_name))\n\n expect_dir = expect_dir.rstrip('/')\n return super(Test, cls).__new__(cls, name, func_call, expect_dir,\n expect_base, ext, covers, breakpoints)",
"def Generatable(cls):\n if hasattr(cls, 'generate_tests') and callable(cls.generate_tests):\n def create_test_func(name, test_func):\n setattr(cls, 'test_' + name.replace(' ', '_').lower(), test_func)\n cls.generate_tests(create_test_func)\n return cls"
] | [
"0.7049483",
"0.68726027",
"0.65086746",
"0.6226904",
"0.59744567",
"0.5875409",
"0.5875409",
"0.5869115",
"0.5800327",
"0.57175136",
"0.5705263",
"0.5691941",
"0.5661378",
"0.5658473",
"0.5648882",
"0.5639574",
"0.5630987",
"0.5609541",
"0.55923575",
"0.55062956",
"0.5492857",
"0.54765517",
"0.5466594",
"0.54480326",
"0.5427351",
"0.5409014",
"0.5395724",
"0.5383768",
"0.53359103",
"0.53072935"
] | 0.74640894 | 0 |
Logic which should be executed for given 'rsm_ctx'. | def handle(self, rsm_ctx):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_RESULT",
"def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Starting executing for \"list\" operation for get usage ...'\n )\n\n execution_id = rsm_ctx.run_execution(wait=False)\n rsm_ctx.log(\n 'info',\n 'Execution started with ID: {} ...'.format(execution_id)\n )",
"def can_handle(self, rsm_ctx):\n return False",
"def can_handle(self, rsm_ctx):\n return not rsm_ctx.instance.type",
"def handle(self, rsm_ctx):\n rsm_ctx.log('info', 'Executing \"list\" operation for get usage ...')\n\n runtime_properties = rsm_ctx.run_execution()\n rsm_ctx.log(\n 'info',\n 'Got {} runtime_properties after execution',\n runtime_properties.keys()\n )\n\n self._process_runtime_properties(\n rsm_ctx,\n runtime_properties,\n self.VALUE_TYPE_USAGE\n )",
"def handle(self, rsm_ctx):\n runtime_properties = rsm_ctx.get_execution_result()\n\n rsm_ctx.log(\n 'info',\n 'Got {} runtime_properties after execution',\n runtime_properties.keys()\n )\n\n self._process_runtime_properties(\n rsm_ctx,\n runtime_properties,\n self.VALUE_TYPE_USAGE\n )",
"def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Node instance has type with is not supported by '\n 'Resource Management Plugin. Skipping'\n )",
"def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Dumping gathered data to runtime_properties of {} node instance',\n rsm_ctx.instance.id\n )\n\n rsm_ctx.add_result_instance_id()\n rsm_ctx.set_runtime_properties({\n 'data': rsm_ctx.dump()\n })",
"def match(self, ctx):\n pass",
"def __call__(self, context):\n msg = context.latest_msg()\n # deal with some common cases\n\n # 调侃\n if msg.intent == 'tune':\n return self.utter_default, self\n\n self.on_process_message(msg)\n\n self.on_enter_state(context)\n\n ac, st = self.run(context)\n\n if st == self:\n self.repeat_times += 1\n else:\n self.on_finish_state(context)\n\n if self.repeat_times > 2:\n ac, st = self.turn_to_manual_custom_service(context), StateFinish()\n\n return ac, st",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_PROJECT",
"async def private(self, ctx):\n pass",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE",
"def _execute(self, model_obj):",
"def apply(ctx):\n pass",
"def _doRun(self, model: Model):\n raise Exception(\"Not implemented\")",
"def handle(self, rsm_ctx):\n self._process_runtime_properties(\n rsm_ctx,\n rsm_ctx.instance.runtime_properties,\n self.VALUE_TYPE_QUOTA\n )",
"def _run_scenario(self, cls, method_name, context, args, config):",
"def run_game_logic(self):\n pass",
"def run(ctx):\n pass",
"def _evaluate_workflow_final_context(self, cause_task_ex):\n raise NotImplementedError",
"def run(self, in_op):\n raise NotImplementedError",
"def handle(self, rsm_ctx):\n rsm_ctx.log('info', 'Processing of project started')\n rsm_ctx.resolve_project()",
"def _do_action(self):\n pass",
"def _do_action(self):\n pass",
"async def roletools(self, ctx: Context) -> None:",
"def execute(self, context: Any) -> Any:\n pass",
"def _DoCommonRequestProcessing(self, request, mr):\n with mr.profiler.Phase('basic processing'):\n self._CheckForMovedProject(mr, request)\n self.AssertBasePermission(mr)"
] | [
"0.61911374",
"0.6145054",
"0.5788437",
"0.57324094",
"0.56074697",
"0.5554602",
"0.5442962",
"0.5299849",
"0.5204402",
"0.5140094",
"0.5080645",
"0.5065991",
"0.5039015",
"0.5039015",
"0.5039015",
"0.5005746",
"0.49829862",
"0.49768415",
"0.49702245",
"0.4969788",
"0.49602613",
"0.4953117",
"0.4917949",
"0.4916256",
"0.49066442",
"0.489",
"0.489",
"0.48693666",
"0.48480958",
"0.4841813"
] | 0.7899221 | 0 |
Check support 'rsm_ctx' type by handler. Instance should be None. | def can_handle(self, rsm_ctx):
return not rsm_ctx.instance.type | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def can_handle(self, rsm_ctx):\n return False",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_RESULT",
"def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Node instance has type with is not supported by '\n 'Resource Management Plugin. Skipping'\n )",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_PROJECT",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_QUOTA",
"def handle(self, rsm_ctx):\n pass",
"def can_handle(self, rsm_ctx):\n return super(OpenstackQuotaHandler, self).can_handle(rsm_ctx) and \\\n SYSTEM_NAME_OPENSTACK in rsm_ctx.instance.system_name",
"def check_kernel(cls):\n pass",
"def test_type(self):\n assert is_consistent_type(Context, \"Context\", TLSv1_METHOD)",
"def check_selinux_status(self):\n\n raise NotImplementedError()",
"def CheckType(self, *args, **kwargs):\n pass",
"def is_secure_context(self):\n raise exceptions.NotImplementedError()",
"def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.isKindOfClass(self))\n else:\n return False",
"def has_request_context():\n from .application import Nereid\n\n return base_has_request_context() and \\\n isinstance(current_app._get_current_object(), Nereid)",
"def verify_type(self, obj):\n return isinstance(obj, self.type_)",
"def is_applicable(self, context: Any) -> bool:\n pass",
"async def checktype(self, ctx:commands.Context):\r\n\r\n t = await self.GetChannelType(ctx.guild, ctx.channel.id)\r\n if t == 'none':\r\n await ctx.send(\r\n f'<#{ctx.channel.id}> is a normal channel (use `register <channel type>` to make this a specialized channel)')\r\n else:\r\n await ctx.send(f'<#{ctx.channel.id}> is a {t}')",
"def poll(cls, context):\n\n tex = context.texture\n if not tex:\n return False\n\n if context.texture.luxrender_texture.type == 'BLENDER':\n return tex and \\\n (context.scene.render.engine in cls.COMPAT_ENGINES) and \\\n context.texture.type in cls.BL_COMPAT\n else:\n return tex and \\\n (context.scene.render.engine in cls.COMPAT_ENGINES) and \\\n context.texture.luxrender_texture.type in cls.LUX_COMPAT",
"def handle(self, rsm_ctx):\n self._process_runtime_properties(\n rsm_ctx,\n rsm_ctx.instance.runtime_properties,\n self.VALUE_TYPE_QUOTA\n )",
"def _check_required_if_provider(self):\n return",
"def check_type(self):\n return True",
"def _isinstance(self, obj, raise_error=True):\n rv = isinstance(obj, self.__model__)\n if not rv and raise_error:\n raise ValueError('%s is not of type %s' % (obj, self.__model__))\n return rv",
"def _check_configured(cls):\r\n if not cls._CONFIGURED:\r\n raise RuntimeError('Registry not configured')",
"def check(cls, control_instance):\n pass",
"def _validateKey(self, key, cls = None):\n\n key_class_types = [self._BaseKey__class, self._LocalKey__class,\n self._MsgKey__class, ErrorMsgManager]\n\n if cls:\n if inspect.isclass(cls) and cls in key_class_types:\n classes = [cls]\n else:\n return None\n else:\n classes = key_class_types\n return any([isinstance(key, cls) for cls in classes])",
"def __get_verify_mode(self):\n ...",
"def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.conformsToProtocol(self))\n else:\n return False"
] | [
"0.6648118",
"0.6646733",
"0.63854903",
"0.62018675",
"0.62018675",
"0.62018675",
"0.6113427",
"0.60652363",
"0.5710701",
"0.5709683",
"0.5318473",
"0.5054878",
"0.4966977",
"0.4897182",
"0.4872651",
"0.48110458",
"0.47685593",
"0.47521907",
"0.47270998",
"0.4708487",
"0.47064868",
"0.47044092",
"0.46908468",
"0.4688083",
"0.46793523",
"0.46714193",
"0.46630493",
"0.46473497",
"0.4641149",
"0.46277797"
] | 0.7572694 | 0 |
Logic which should be executed for given 'rsm_ctx'. Write to log message that type is unsupported. | def handle(self, rsm_ctx):
rsm_ctx.log(
'info',
'Node instance has type with is not supported by '
'Resource Management Plugin. Skipping'
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def can_handle(self, rsm_ctx):\n return not rsm_ctx.instance.type",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_RESULT",
"def handle(self, rsm_ctx):\n pass",
"def can_handle(self, rsm_ctx):\n return False",
"def _process_msg(cls, msg):\n raise NotImplementedError",
"def check_r_type(r):\n if type(r) is str:\n raise TypeError('Get Error message.')",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE",
"def check_trace_mode(device_type, trace_mode):\n if trace_mode == tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY:\n if device_type != _DEVICE_TYPE_TPU:\n raise ValueError('Device_type \"%s\" is not yet supported for '\n 'trace mode \"%s\"' % (device_type, trace_mode))",
"def test_unsupported_action(self):\r\n self.xmodule.verify_oauth_body_sign = Mock()\r\n request = Request(self.environ)\r\n request.body = self.get_request_body({'action': 'wrongAction'})\r\n response = self.xmodule.grade_handler(request, '')\r\n real_response = self.get_response_values(response)\r\n expected_response = {\r\n 'action': None,\r\n 'code_major': 'unsupported',\r\n 'description': 'Target does not support the requested operation.',\r\n 'messageIdentifier': self.DEFAULTS['messageIdentifier'],\r\n }\r\n self.assertEqual(response.status_code, 200)\r\n self.assertDictEqual(expected_response, real_response)",
"def test_unsupported_action(self):\n self.xmodule.verify_oauth_body_sign = Mock()\n request = Request(self.environ)\n request.body = self.get_request_body({'action': 'wrongAction'})\n response = self.xmodule.grade_handler(request, '')\n real_response = self.get_response_values(response)\n expected_response = {\n 'action': None,\n 'code_major': 'unsupported',\n 'description': 'Target does not support the requested operation.',\n 'messageIdentifier': self.defaults['messageIdentifier'],\n }\n assert response.status_code == 200\n self.assertDictEqual(expected_response, real_response)",
"def errFunc(runType):\n logger.error('Execution type not recognized! {}'.format(runType))\n raise InvalidExecutionType('{} is not a valid command'.format(runType))",
"def main(ctx, **kwargs):\n ctx.meta['decode-errors'] = kwargs['errors'] or 'strict'\n ctx.meta['output-logmsg'] = kwargs['logmsg'] or 'normal'",
"def check_restype(restype, exc_message):\n if restype != 'OK':\n raise IMAPClientError(exc_message)",
"def check_parameters(self):\n\n if self.process not in [\"Like\", \"Like-and-follow\"]:\n raiser('process')\n\n if \"type\" not in self.duration or \"value\" not in self.duration:\n raiser('duration(type or value)')\n else:\n typ = self.duration['type']\n val = self.duration['value']\n if self.process == \"Like\":\n if typ not in ['by_time', 'by_likes']:\n raiser('type')\n\n if \"like\" not in self.limits_per_hour:\n raiser('limitsPerHour(like)')\n else:\n try:\n self.limits_per_hour['like'] = float(self.limits_per_hour['like'])\n except ValueError:\n raiser('like')\n elif self.process == \"Like-and-follow\":\n if typ not in ['by_time', 'by_users']:\n raiser('type')\n\n if \"like\" not in self.limits_per_hour or \"follow\" not in self.limits_per_hour \\\n or \"unfollow\" not in self.limits_per_hour:\n raiser('limitsPerHour(like or follow or unfollow)')\n else:\n for i in [\"like\", \"follow\", \"unfollow\"]:\n try:\n self.limits_per_hour[i] = float(self.limits_per_hour[i])\n except ValueError:\n raiser(i)\n try:\n self.duration['value'] = float(val)\n except ValueError:\n raiser('value')\n\n if not isinstance(self.search_hashtags, list):\n raiser('hashtags')\n\n if not isinstance(self.white_list, list):\n raiser('whiteList')",
"def is_of_type(cmd):\r\n raise NotImplementedError()",
"def write(self, msg):\n frame = inspect.stack()[1]\n\n if type(msg) == str:\n msg = formatMesg(msg, COMMENT, frame, Dispatcher.commentFormat)\n self.logger.log(COMMENT, msg)\n elif type(msg) == dict:\n status = msg['status']\n if status == 'pass':\n msg['value'] = formatMesg(msg['value'], INFO, frame, Dispatcher.infoFormat)\n self.logger.info(msg['value'])\n elif status == 'warning':\n msg['value'] = formatMesg(msg['value'], WARNING, frame, Dispatcher.warningFormat)\n self.logger.warning(msg['value'])\n elif status == 'fail':\n msg['value'] = formatMesg(msg['value'], ERROR, frame, Dispatcher.errorFormat)\n self.logger.error(msg['value'])\n else:\n msg['value'] = formatMesg(msg['value'] + \"Status: UNKNOWN\", ERROR, frame, Dispatcher.errorFormat)\n self.logger.warning(msg['value'])\n elif type(msg) == ReturnCode:\n if msg:\n msg = formatMesg(str(msg), INFO, frame, Dispatcher.infoFormat)\n self.logger.info(msg)\n elif not msg:\n msg = formatMesg(str(msg), ERROR, frame, Dispatcher.errorFormat)\n self.logger.error(msg)\n else:\n print str(type(msg))",
"def handle_exception(exc_type, exception, traceback):\n report(UNKNOWN, \"unhandled exception: %s\" % (exception,))",
"def error(ctx, flow):\n ctx.log(\"error\")",
"def _log_error(self, err_msg):\n if self._on_error_action == \"raise\":\n raise InvalidDatasetError(err_msg)\n else:\n logger.warning(err_msg)",
"def _raise_performing_request_error(self, *args, **kwargs):",
"def CheckType(self, *args, **kwargs):\n pass",
"def log(self, level, msg, *args, **kwargs):\n # we override this instead of process() because we need to change the log level\n # note that `self.extra` does NOT replace kwargs['extra']\n if self.isEnabledFor(level):\n task = cast(TaskView, self.extra)\n # e.g. Task configure for test-node (reason: missing)\n task_id = f\"{task.configSpec.operation} for {task.target.name}\"\n if task.reason:\n task_id += f\" (reason: {task.reason})\"\n if task._rendering:\n msg = f\"Rendering task {task_id} (errors expected): {msg}\"\n if level >= Levels.VERBOSE:\n level = Levels.VERBOSE\n else:\n msg = f\"Task {task_id}: {msg}\"\n self.logger.log(level, msg, *args, **kwargs)",
"def check_type(self):\n if self.action < 0 or self.action >= len(_action_args_dict):\n raise GameActionError('Invalid action type ({0})'.format(self.action))",
"def handle(self, rsm_ctx):\n runtime_properties = rsm_ctx.get_execution_result()\n\n rsm_ctx.log(\n 'info',\n 'Got {} runtime_properties after execution',\n runtime_properties.keys()\n )\n\n self._process_runtime_properties(\n rsm_ctx,\n runtime_properties,\n self.VALUE_TYPE_USAGE\n )",
"async def _noop_error_handler(ctx: \"RequestContext\") -> None:",
"async def on_handle_sent_message_error(self, guild_id: int, op: str, payload: Dict[str, Any],\n exc: Exception) -> None:\n log.error(f\"uncaught error {exc} in {self} when handling sent message {op} for guild {guild_id}: {exc}\\n\\n\"\n f\"Payload: {payload}\")",
"def _unhandled(self, context, message, reason):\r\n # TODO: call host's method instead\r\n self._host.unhandled.append((context.str, message.serialize(), reason))\r\n self._host.expected[context.str] = None\r\n eprint(\"{}: Command {} can't be handled due to {}\".format(self._host.name, message.serialize(), reason))",
"def isLogEnabled(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')"
] | [
"0.6189271",
"0.5799024",
"0.5745125",
"0.5314854",
"0.5146376",
"0.51199263",
"0.48813412",
"0.48813412",
"0.48813412",
"0.48692715",
"0.4823746",
"0.48163384",
"0.4791396",
"0.4765693",
"0.47391534",
"0.47356078",
"0.47062707",
"0.46968287",
"0.46908936",
"0.46611562",
"0.46587932",
"0.4646458",
"0.46459144",
"0.46230397",
"0.46206895",
"0.46198353",
"0.45978352",
"0.45951256",
"0.45891282",
"0.45861942"
] | 0.690277 | 0 |
Check support 'rsm_ctx' type by handler. Instance should be NODE_TYPE_PROJECT. | def can_handle(self, rsm_ctx):
return rsm_ctx.instance.type == NODE_TYPE_PROJECT | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Node instance has type with is not supported by '\n 'Resource Management Plugin. Skipping'\n )",
"def can_handle(self, rsm_ctx):\n return not rsm_ctx.instance.type",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_RESULT",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_QUOTA",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE",
"def can_handle(self, rsm_ctx):\n return False",
"def is_node_support(self, node):\n return self.node_attribute(key=node, name=\"type\") == \"support\"",
"def can_handle(self, rsm_ctx):\n return super(OpenstackQuotaHandler, self).can_handle(rsm_ctx) and \\\n SYSTEM_NAME_OPENSTACK in rsm_ctx.instance.system_name",
"def check(self):\n self.isNodes = False\n self.isFixable = False\n nodeType = self.get_parser.get('SETTINGS', 'settingsinfonode')\n self.setStatus('OK')\n if not len(pm.ls(type=nodeType)):\n self.setStatus('WARNING')\n self.setErrorMessage('No %s node found in the scene.' % nodeType)\n return False, ''\n elif len(pm.ls(type=nodeType)) > 1:\n self.setStatus('ERROR')\n self.setErrorMessage('More than 1 %s node found in the scene.' % nodeType)\n return False, ''\n return True, pm.ls(type=nodeType)[0]",
"def check_tree_type(tree):\n return tree.type in ref",
"def check(self):\n if pm.objExists(\"top_C_001_CTRL\"):\n self.status = \"OK\"\n else:\n self.status = \"ERROR\"\n self.addError(\"No node with the name top_C_001_CTRL\")\n self.errorMessage = \"No top Controler\"",
"def check_global_request(self, kind, msg):\n return False",
"def check(self):\n self.isNodes = True\n self.isFixable = False\n defaults = ['persp', 'top', 'front', 'side']\n project_defaults = ['__SUBSET__', '__SET__', '__CAMERA__', '__CHARS__', '__PROPS__']\n\n errorNodes = list()\n for each in pm.ls(assemblies=1):\n if str(each) in defaults:\n continue\n if str(each) in project_defaults:\n continue\n errorNodes.append(str(each))\n self.setStatus('OK')\n if len(errorNodes) > 0:\n self.setStatus('WARNING')\n self.errorNodes = errorNodes\n self.errorMessage = '%s numbers of extra root nodes found in the scene.' % str(len(self.errorNodes))",
"def handle(self, rsm_ctx):\n pass",
"def do_check_model(**kwargs):\n # pushes an XCom without a specific target, just by returning it\n mle = MLEngineHook()\n model_name = kwargs['dag_run'].conf.get('model_name')\n # return bool(mle.get_model(PROJECT, MODEL_DNN_NAME))\n project = mle.get_model(PROJECT, model_name)\n kwargs['ti'].xcom_push(key='is_project', value=bool(project))",
"def check_kernel(cls):\n pass",
"def global_check(self):\n return None",
"def check(self, context):\r\n return context.config.preset is not None",
"def has_request_context():\n from .application import Nereid\n\n return base_has_request_context() and \\\n isinstance(current_app._get_current_object(), Nereid)",
"def do_check(self):\n res = self.entity.do_check(self.context)\n if res:\n return self.RES_OK, 'Node check succeeded.'\n else:\n return self.RES_ERROR, 'Node check failed.'",
"def test_get_node_status(self):\n pass",
"def test_type(self):\n assert is_consistent_type(Context, \"Context\", TLSv1_METHOD)",
"def _check_config(self):",
"async def checktype(self, ctx:commands.Context):\r\n\r\n t = await self.GetChannelType(ctx.guild, ctx.channel.id)\r\n if t == 'none':\r\n await ctx.send(\r\n f'<#{ctx.channel.id}> is a normal channel (use `register <channel type>` to make this a specialized channel)')\r\n else:\r\n await ctx.send(f'<#{ctx.channel.id}> is a {t}')",
"def check(self, mode, values=None):\n res_ids = {}\n if self._ids:\n self._cr.execute(\n \"\"\"SELECT DISTINCT res_type, res_id FROM\n workflow_task WHERE id = ANY (%s)\"\"\", (list(self._ids),))\n for rmod, rid in self._cr.fetchall():\n res_ids.setdefault(rmod, set()).add(rid)\n if values:\n if values.get('res_type') and values.get('res_id'):\n res_ids.setdefault(values['res_type'], set())\\\n .add(values['res_id'])\n\n for model, mids in res_ids.items():\n existing_ids = self.pool[model].exists(self._cr, self._uid, mids)\n self.check_base_security(model, existing_ids, mode)\n if not self._uid == SUPERUSER_ID and\\\n not self.env['res.users'].has_group('base.group_user'):\n raise exceptions.AccessError(\n _(\"Sorry, you are not allowed to access this document.\"))",
"def check_supported_features(self):",
"def node_is_pytest_context_manager(node: ast.AST) -> bool:\n return isinstance(node, ast.With) and bool(cm_exp.match(get_first_token(node).line))",
"def can_reevaluate(self, node):\n return isinstance(node, (ast.Name, ast.Num, ast.Str)) or \\\n (six.PY3 and isinstance(node, ast.Bytes)) or \\\n (ast_has_name_constant and isinstance(node, ast.NameConstant))"
] | [
"0.6362641",
"0.61811864",
"0.61003655",
"0.57622105",
"0.5663",
"0.5663",
"0.5663",
"0.5303613",
"0.50670636",
"0.49746954",
"0.49681306",
"0.48913604",
"0.47937822",
"0.47625598",
"0.47432458",
"0.46864262",
"0.4640284",
"0.46303535",
"0.46094924",
"0.45498383",
"0.45277175",
"0.45137876",
"0.45065567",
"0.44957367",
"0.44723064",
"0.4469611",
"0.44661543",
"0.44265315",
"0.4421201",
"0.4416316"
] | 0.76011074 | 0 |
Logic which should be executed for given 'rsm_ctx'. Run resolve project on 'rsm_ctx'. | def handle(self, rsm_ctx):
rsm_ctx.log('info', 'Processing of project started')
rsm_ctx.resolve_project() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def eval(hw, r):\n\n # Check that the hw path exists and is a directory. If so, try\n # to determine if the student has committed a build directory.\n if os.path.isdir(hw):\n find_build_dir(hw, r)\n return True\n else:\n # Diagnose the error and try to locate the actual\n # project directory.\n r.error(\"project directory '\" + hw + \"' is missing\")\n find_project(hw, r)\n return False",
"def handle(self, rsm_ctx):\n pass",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_PROJECT",
"def do_resolve(self,args):\n try:\n for solution in self.resolve_all(args):\n self.print_solution(solution)\n except:\n traceback.print_exc(file=sys.stdout)",
"async def _execute(\n self,\n context: AppLoggedInUseCaseContext,\n args: PersonLoadSettingsArgs,\n ) -> PersonLoadSettingsResult:\n workspace = context.workspace\n\n async with self._storage_engine.get_unit_of_work() as uow:\n person_collection = await uow.person_collection_repository.load_by_parent(\n workspace.ref_id,\n )\n catch_up_project = await uow.project_repository.load_by_id(\n person_collection.catch_up_project_ref_id,\n )\n\n return PersonLoadSettingsResult(catch_up_project=catch_up_project)",
"def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Starting executing for \"list\" operation for get usage ...'\n )\n\n execution_id = rsm_ctx.run_execution(wait=False)\n rsm_ctx.log(\n 'info',\n 'Execution started with ID: {} ...'.format(execution_id)\n )",
"def __call__(self, context=None):\n if context is None:\n context = self.context\n return self.entity.resolve(context)",
"def run():\n from cgl.plugins.blender.tasks.rig import parent_mdl_to_rig\n parent_mdl_to_rig()",
"def _resolve(self):\n pass",
"def project():",
"def project():",
"def project():",
"def run(ctx):\n pass",
"def __call__(self, ctx: ResolutionContext) -> Coroutine[None, None, RT]:",
"def process_project(self, project_name):\n self.logging.debug('Retrieving project %s..', project_name)\n\n try:\n project = self.get_lp_client().projects[project_name]\n except KeyError:\n self.logging.error(\n \"Project %s wasn't found. Skipped..\",\n project_name\n )\n else:\n if project:\n self.logging.debug(\n 'Retrieving active milestone %s..',\n self.get_new_milestone_name()\n )\n\n new_milestone = project.getMilestone(\n name=self.get_new_milestone_name()\n )\n self.get_stats()[project.name] = {}\n\n for old_milestone_name in self.get_old_milestone_names():\n if self.is_limit_achived():\n break\n\n self.process_milestone_on_project(\n project, old_milestone_name, new_milestone\n )\n\n else:\n self.logging.debug(\n \"Project %s wasn't found. Skipped..\",\n project_name\n )",
"def run(self, targets, context={}, force=False):\n for target in targets:\n self._resolve(target, context, force)\n return context",
"def do_p(self, arg):\n self.do_project(arg)",
"async def _perform_mutation(\n self,\n progress_reporter: ProgressReporter,\n context: AppLoggedInUseCaseContext,\n args: ChoreChangeProjectArgs,\n ) -> None:\n user = context.user\n workspace = context.workspace\n\n async with self._domain_storage_engine.get_unit_of_work() as uow:\n chore = await uow.chore_repository.load_by_id(args.ref_id)\n\n inbox_task_collection = (\n await uow.inbox_task_collection_repository.load_by_parent(\n workspace.ref_id,\n )\n )\n all_inbox_tasks = await uow.inbox_task_repository.find_all_with_filters(\n parent_ref_id=inbox_task_collection.ref_id,\n allow_archived=True,\n filter_chore_ref_ids=[args.ref_id],\n )\n\n for inbox_task in all_inbox_tasks:\n schedule = schedules.get_schedule(\n chore.gen_params.period,\n chore.name,\n cast(Timestamp, inbox_task.recurring_gen_right_now),\n user.timezone,\n chore.skip_rule,\n chore.gen_params.actionable_from_day,\n chore.gen_params.actionable_from_month,\n chore.gen_params.due_at_time,\n chore.gen_params.due_at_day,\n chore.gen_params.due_at_month,\n )\n\n inbox_task = inbox_task.update_link_to_chore(\n project_ref_id=args.project_ref_id\n or workspace.default_project_ref_id,\n name=schedule.full_name,\n timeline=schedule.timeline,\n actionable_date=schedule.actionable_date,\n due_date=schedule.due_time,\n eisen=chore.gen_params.eisen,\n difficulty=chore.gen_params.difficulty,\n source=EventSource.CLI,\n modification_time=self._time_provider.get_current_time(),\n )\n await uow.inbox_task_repository.save(inbox_task)\n await progress_reporter.mark_updated(inbox_task)\n\n chore = chore.change_project(\n project_ref_id=args.project_ref_id or workspace.default_project_ref_id,\n source=EventSource.CLI,\n modification_time=self._time_provider.get_current_time(),\n )\n await uow.chore_repository.save(chore)\n await progress_reporter.mark_updated(chore)",
"def project_run(\n project_dir: Path,\n subcommand: str,\n *,\n overrides: Dict[str, Any] = SimpleFrozenDict(),\n force: bool = False,\n dry: bool = False,\n capture: bool = False,\n skip_requirements_check: bool = False,\n) -> None:\n config = load_project_config(project_dir, overrides=overrides)\n commands = {cmd[\"name\"]: cmd for cmd in config.get(\"commands\", [])}\n workflows = config.get(\"workflows\", {})\n validate_subcommand(list(commands.keys()), list(workflows.keys()), subcommand)\n\n req_path = project_dir / \"requirements.txt\"\n if not skip_requirements_check:\n if config.get(\"check_requirements\", True) and os.path.exists(req_path):\n with req_path.open() as requirements_file:\n _check_requirements([req.strip() for req in requirements_file])\n\n if subcommand in workflows:\n msg.info(f\"Running workflow '{subcommand}'\")\n for cmd in workflows[subcommand]:\n project_run(\n project_dir,\n cmd,\n overrides=overrides,\n force=force,\n dry=dry,\n capture=capture,\n skip_requirements_check=True,\n )\n else:\n cmd = commands[subcommand]\n for dep in cmd.get(\"deps\", []):\n if not (project_dir / dep).exists():\n err = f\"Missing dependency specified by command '{subcommand}': {dep}\"\n err_help = \"Maybe you forgot to run the 'project assets' command or a previous step?\"\n err_exits = 1 if not dry else None\n msg.fail(err, err_help, exits=err_exits)\n check_spacy_commit = check_bool_env_var(ENV_VARS.PROJECT_USE_GIT_VERSION)\n with working_dir(project_dir) as current_dir:\n msg.divider(subcommand)\n rerun = check_rerun(current_dir, cmd, check_spacy_commit=check_spacy_commit)\n if not rerun and not force:\n msg.info(f\"Skipping '{cmd['name']}': nothing changed\")\n else:\n run_commands(cmd[\"script\"], dry=dry, capture=capture)\n if not dry:\n update_lockfile(current_dir, cmd)",
"def solve(ctx):\n my_solver(ctx.obj['filename'])",
"def run_project_parser(self):\n\n # get Ansible project structure\n self.__get_ansible_project_content()\n self.__generate_graph('project', self.__project_content)\n\n # get Ansible roles\n self.__get_ansible_roles_content()\n self.__generate_graph('roles', self.__role_content)",
"def solve(bv: BinaryView):\n\n if (\n \"EVM\" in [x.name for x in list(Architecture)]\n and bv.arch == Architecture[\"EVM\"]\n and bv.session_data.mui_evm_source is not None\n ):\n # set default workspace url\n\n workspace_url = settings.get_string(f\"{BINJA_EVM_RUN_SETTINGS_PREFIX}workspace_url\", bv)\n if workspace_url == \"\":\n\n random_dir_name = \"\".join(random.choices(string.ascii_uppercase + string.digits, k=10))\n workspace_url = str(\n Path(\n bv.session_data.mui_evm_source.parent.resolve(),\n random_dir_name,\n )\n )\n settings.set_string(\n f\"{BINJA_EVM_RUN_SETTINGS_PREFIX}workspace_url\",\n workspace_url,\n view=bv,\n scope=SettingsScope.SettingsResourceScope,\n )\n\n dialog = RunDialog(\n DockHandler.getActiveDockHandler().parent(), bv, BINJA_EVM_RUN_SETTINGS_PREFIX\n )\n\n if dialog.exec() == QDialog.Accepted:\n bv.session_data.mui_is_running = True\n s = ManticoreEVMRunner(bv.session_data.mui_evm_source, bv)\n s.start()\n\n else:\n if len(bv.session_data.mui_find) == 0 and len(bv.session_data.mui_custom_hooks.keys()) == 0:\n show_message_box(\n \"Manticore Solve\",\n \"You have not specified a goal instruction or custom hook.\\n\\n\"\n + 'Please right click on the goal instruction and select \"Find Path to This Instruction\" to '\n + \"continue.\",\n MessageBoxButtonSet.OKButtonSet,\n MessageBoxIcon.ErrorIcon,\n )\n return\n\n dialog = RunDialog(\n DockHandler.getActiveDockHandler().parent(), bv, BINJA_NATIVE_RUN_SETTINGS_PREFIX\n )\n\n if dialog.exec() == QDialog.Accepted:\n # Start a solver thread for the path associated with the view\n bv.session_data.mui_is_running = True\n s = ManticoreNativeRunner(bv.session_data.mui_find, bv.session_data.mui_avoid, bv)\n s.start()",
"def run(fips_dir, proj_dir, args) :\n if len(args) > 0 :\n proj_name = args[0]\n proj_dir = util.get_project_dir(fips_dir, proj_name)\n dep.fetch_imports(fips_dir, proj_dir)",
"def execute(self):\n res = self.resolveInputVariables()\n if not res['OK']:\n LOG.error(\"Failed to resolve input variables:\", res['Message'])\n return res\n return S_OK()",
"def _run_env(self):\n raise NotImplementedError()",
"def _ensure_project(self, c_params: CommonParams) -> Squonk2AgentRv:\n assert c_params\n assert isinstance(c_params, CommonParams)\n \n target_access_string = self._get_target_access_string(c_params.access_id)\n assert target_access_string\n\n # A Squonk2Unit must exist for the Target Access String.\n rv: Squonk2AgentRv = self._ensure_unit(target_access_string)\n if not rv.success:\n return rv\n unit: Squonk2Unit = rv.msg\n\n user_name: str = self._get_user_name(c_params.user_id)\n target_title: str = self._get_target_title(c_params.target_id)\n assert user_name\n assert target_title\n\n _, name_full = self._build_product_name(user_name, target_title)\n sq2_project: Optional[Squonk2Project] = Squonk2Project.objects.filter(name=name_full).first()\n if not sq2_project:\n msg = f'No existing Squonk2Project for \"{name_full}\"'\n _LOGGER.info(msg)\n # Need to call upon Squonk2 to create a 'Product'\n # (and corresponding 'Product').\n rv = self._create_product_and_project(unit, user_name, target_title, c_params)\n if not rv.success:\n msg = f'Failed creating AS Product or DM Project ({rv.msg})'\n _LOGGER.error(msg)\n return rv\n\n # Now record these new remote objects in a new\n # Squonk2Project record. As it's worked we're given\n # a dictionary with keys \"sq2_project_uuid\" and \"sq2_product_uuid\"\n sq2_project = Squonk2Project(uuid=rv.msg['sq2_project_uuid'],\n name=name_full,\n product_uuid=rv.msg['sq2_product_uuid'],\n unit_id=unit.id)\n sq2_project.save()\n msg = f'Created NEW Squonk2Project for {sq2_project.uuid} \"{name_full}\"'\n _LOGGER.info(msg)\n else:\n msg = f'Squonk2Project for {sq2_project.uuid} \"{name_full}\" already exists - nothing to do'\n _LOGGER.debug(msg)\n\n return Squonk2AgentRv(success=True, msg=sq2_project)",
"def on_prepare(dts, acg, xact, xact_info, ks_path, msg, scratch):\n\n action = xact_info.query_action\n xpath = ks_path.to_xpath(RwProjectManoYang.get_schema())\n self._log.debug(\"Project xpath: {}\".format(xpath))\n name = ManoProject.from_xpath(xpath, self._log)\n\n self._log.debug(\"Project %s on_prepare config received (action: %s): %s\",\n name, xact_info.query_action, msg)\n\n if action == rwdts.QueryAction.CREATE:\n if name in self.projects:\n self._log.debug(\"Project {} already exists. Ignore request\".\n format(name))\n else:\n yield from self._callbacks.on_add_prepare(name)\n scratch[\"projects\"][\"added\"].append((name, msg))\n\n elif action == rwdts.QueryAction.UPDATE:\n if name in self.projects:\n scratch[\"projects\"][\"updated\"].append((name, msg))\n else:\n self._log.debug(\"Project {}: Invoking on_prepare add request\".\n format(name))\n yield from self._callbacks.on_add_prepare(name)\n scratch[\"projects\"][\"added\"].append((name, msg))\n\n\n elif action == rwdts.QueryAction.DELETE:\n # Check if the entire project got deleted\n fref = ProtobufC.FieldReference.alloc()\n fref.goto_whole_message(msg.to_pbcm())\n if fref.is_field_deleted():\n if name in self.projects:\n rc, delete_msg = yield from self._callbacks.on_delete_prepare(name)\n if not rc:\n self._log.error(\"Project {} should not be deleted. Reason : {}\".\n format(name, delete_msg))\n\n xact_info.send_error_xpath(RwTypes.RwStatus.FAILURE,\n ProjectDtsHandler.XPATH,\n delete_msg)\n\n xact_info.respond_xpath(rwdts.XactRspCode.NACK)\n return\n\n scratch[\"projects\"][\"deleted\"].append(name)\n else:\n self._log.warning(\"Delete on unknown project: {}\".\n format(name))\n else:\n self._log.error(\"Action (%s) NOT SUPPORTED\", action)\n xact_info.respond_xpath(rwdts.XactRspCode.NACK)\n return\n xact_info.respond_xpath(rwdts.XactRspCode.ACK)",
"def run() -> ():\n if len(sys.argv) > 1:\n show_help()\n errs = get_cargo_input()\n main_stack = make_proj_stack(errs)\n while len(main_stack) > 0:\n file_stack = make_file_stack(main_stack)\n overwrite(file_stack)\n\n # FIXME",
"def do_run(self, args):\n logger.debug(\"do_run() was called\")\n\t\n parser = CrispyArgumentParser(description=self.do_run.__doc__, prog=\"run\")\n parser.add_argument(\"module\", metavar=\"<module>\", help=\"module name\")\n parser.add_argument(\"session_id\", metavar=\"<session id>\", help=\"session to run on\")\n parser.add_argument(\"arguments\", nargs=argparse.REMAINDER, metavar=\"<arguments>\", help=\"module arguments\")\n \n try:\n pargs = parser.parse_args(shlex.split(args))\n except MyParserException as e:\n print e\n return\n\n try:\n target = self.srv.get_client(int(pargs.session_id))\n except Exception as e:\n fprint.error(\"Session id should be an integer.\")\n return\n\n if not target:\n fprint.error(\"Improper session id.\")\n return\n\n try:\n mod = self.srv.get_module(pargs.module)(target)\n except Exception as me:\n fprint.error(\"Error loading \\\"{}\\\" module: {}\".format(pargs.module, me))\n return\n\n try:\n margs = mod.check_args(pargs.arguments)\n except MyParserException as e:\n print e\n return\n\n try:\n target.run_module(mod, margs)\n except Exception as e:\n fprint.error(\"Error running module: {}\".format(e))\n return",
"def execute(context, log):\n\n try:\n\n # Don't run if there were errors or if this is a dry run\n ok_to_run = True\n dry = False\n ret = [] # return codes from all runs\n return_code = 1 # assume the worst\n\n if len(context.gear_dict['errors']) > 0:\n ok_to_run = False\n ret.append(1)\n log.info('Commands were NOT run because of previous errors.')\n\n elif context.config['gear-dry-run']:\n dry = True\n e = 'gear-dry-run is set: Commands were NOT run.'\n log.warning(e)\n context.gear_dict['warnings'].append(e)\n utils.dry_run.pretend_it_ran(context)\n\n if ok_to_run:\n\n # Create output directory\n log.info('Creating ' + context.gear_dict['output_analysisid_dir'])\n out = context.gear_dict['output_analysisid_dir']\n if not os.path.exists(out):\n os.makedirs(out)\n\n # ---------------------------------- #\n # The longitudinal pipeline, huzzah! #\n # ---------------------------------- #\n\n options = ' -openmp ' + context.gear_dict['cpu_count'] # zoom zomm\n\n if '3T' in context.config:\n options += ' -3T'\n\n subjects_dir = '/opt/freesurfer/subjects/'\n output_dir = context.gear_dict['output_analysisid_dir']\n\n # first link averages\n fst_links_to_make = [\"fsaverage\", \"lh.EC_average\",\"rh.EC_average\"]\n for fst in fst_links_to_make:\n targ = os.path.join(subjects_dir, fst)\n link = os.path.join(output_dir, fst)\n if not os.path.exists(link):\n log.info('Linking ' + targ + ' -> ' + link)\n os.symlink(os.path.join(subjects_dir, fst),\n os.path.join(output_dir, fst))\n else:\n log.info('Link exists ' + link)\n\n # Run cross-sectional analysis on each nifti\n # study is freesurfer's SUBJECTS_DIR\n scrnum = context.gear_dict['subject_code_safe']\n num_niftis = str(len(context.gear_dict['niftis']))\n\n for nn, nifti in enumerate(context.gear_dict['niftis']):\n\n if field_strength_close_enough(\n context.gear_dict['field_strength'][nn], 3):\n if ' -3T' not in options:\n options += ' -3T'\n\n subject_dir = scrnum + \"-\" + context.gear_dict['visits'][nn]\n\n update_gear_status('longitudinal-step', 'cross-sectional ' + \\\n subject_dir + ' (' + str(nn + 1) + ' of ' + num_niftis + \\\n ') \"' + context.gear_dict['file_names'][nn] + '\" ' + \\\n context.gear_dict['createds'][nn])\n\n cmd = 'recon-all -s ' + subject_dir + \\\n ' -i ' + nifti + ' -all -qcache' + options\n if dry:\n log.info('Not running: ' + cmd)\n else:\n log.info('Running: ' + cmd)\n ret.append(utils.system.run(context, cmd))\n\n set_recon_all_status(subject_dir)\n\n set_recon_all_status(subject_dir)\n\n # Create template\n cmd = 'recon-all -base BASE '\n\n update_gear_status('longitudinal-step', 'Create template')\n\n for nn, nifti in enumerate(context.gear_dict['niftis']):\n\n subject_dir = scrnum + \"-\" + context.gear_dict['visits'][nn]\n\n cmd += '-tp ' + subject_dir + ' '\n\n cmd += '-all' + options\n if dry:\n log.info('Not running: ' + cmd)\n else:\n log.info('Running: ' + cmd)\n ret.append(utils.system.run(context, cmd))\n\n set_recon_all_status('BASE')\n\n # Run longitudinal on each time point\n\n for nn, nifti in enumerate(context.gear_dict['niftis']):\n\n subject_dir = scrnum + \"-\" + context.gear_dict['visits'][nn]\n\n update_gear_status('longitudinal-step', 'longitudinal ' +\n subject_dir + ' (' + str(nn + 1) + ' of ' + num_niftis + \\\n ') \"' + context.gear_dict['file_names'][nn] + '\" ' + \\\n context.gear_dict['createds'][nn])\n\n cmd = 'recon-all -long ' + subject_dir + ' BASE -all' + options\n if dry:\n log.info('Not running: ' + cmd)\n else:\n log.info('Running: ' + cmd)\n ret.append(utils.system.run(context, cmd))\n\n set_recon_all_status(subject_dir + '.long.BASE')\n\n update_gear_status('longitudinal-step', 'all steps completed')\n\n # run asegstats2table and aparcstats2table to create tables from\n # aseg.stats and ?h.aparc.stats. Then modify the results.\n # freesurfer_tables.pl\n os.chdir(out)\n cmd = '/flywheel/v0/freesurfer_tables.pl .'\n log.info('Running: ' + cmd)\n ret.append(utils.system.run(context, cmd))\n\n log.info('Return codes: ' + repr(ret))\n\n if all(rr == 0 for rr in ret):\n log.info('Command successfully executed!')\n return_code = 0\n\n else:\n log.info('Command failed.')\n return_code = 1\n\n except Exception as e:\n context.gear_dict['errors'].append(e)\n log.critical(e)\n log.exception('Unable to execute command.')\n\n finally:\n\n # Copy summary csv files to top-level output\n files = glob.glob(context.gear_dict['output_analysisid_dir'] + \\\n '/tables/*')\n for ff in files:\n shutil.copy(ff,context.output_dir)\n\n if context.config['remove_subjects_dir']:\n # Remove all of Freesurfer's subject directories\n paths = glob.glob(context.gear_dict['output_analysisid_dir'] + '/*')\n for path in paths:\n if os.path.basename(path) != 'tables':\n if os.path.islink(path):\n os.unlink(path)\n log.debug('removing link \"' + path + '\"')\n elif os.path.isdir(path):\n log.debug('removing subject directory \"' + path + '\"')\n shutil.rmtree(path)\n\n # Default config: zip entire output/<analysis_id> folder\n if os.path.exists(context.gear_dict['output_analysisid_dir']):\n if context.config['gear-zip-output']:\n\n zip_output(context)\n\n path = context.output_dir + '/' + context.destination['id']\n log.debug('removing output directory \"' + path + '\"')\n shutil.rmtree(path)\n\n else:\n log.info('NOT zipping output directory \"' +\n context.gear_dict['output_analysisid_dir'] + '\"')\n\n else:\n log.info('Output directory does not exist so it cannot be removed')\n\n if len(context.gear_dict['warnings']) > 0 :\n msg = 'Previous warnings:\\n'\n for err in context.gear_dict['warnings']:\n if str(type(err)).split(\"'\")[1] == 'str':\n # show string\n msg += ' Warning: ' + str(err) + '\\n'\n else: # show type (of warning) and warning message\n msg += ' ' + str(type(err)).split(\"'\")[1] + ': ' + \\\n str(err) + '\\n'\n log.info(msg)\n\n if len(context.gear_dict['errors']) > 0 :\n msg = 'Previous errors:\\n'\n for err in context.gear_dict['errors']:\n if str(type(err)).split(\"'\")[1] == 'str':\n # show string\n msg += ' Error msg: ' + str(err) + '\\n'\n else: # show type (of error) and error message\n msg += ' ' + str(type(err)).split(\"'\")[1] + ': ' + \\\n str(err) + '\\n'\n log.info(msg)\n return_code = 1\n\n log.info('Gear is done. Returning '+str(return_code))\n os.sys.exit(return_code)"
] | [
"0.56092745",
"0.5602606",
"0.555728",
"0.55155736",
"0.5420812",
"0.5266047",
"0.522466",
"0.5147111",
"0.514152",
"0.5106763",
"0.5106763",
"0.5106763",
"0.5088506",
"0.5057311",
"0.5047965",
"0.5040335",
"0.5024697",
"0.5017567",
"0.4963319",
"0.49532133",
"0.49410152",
"0.48716807",
"0.48446655",
"0.48428103",
"0.48087698",
"0.4805968",
"0.47912377",
"0.47853225",
"0.4745428",
"0.47451842"
] | 0.76606447 | 0 |
Set value by resource managment context instance | def _set_value(rsm_ctx, value, value_type, resource_name=None):
value_dict = {value_type: value}
if resource_name:
value_dict['resource_name'] = resource_name
rsm_ctx.log('debug', 'Setting {}', value_dict)
rsm_ctx.set_value(**value_dict) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_resource_data(self, resource, meta):",
"def update_context(self, ctx):\r\n assert isinstance(ctx, dict)\r\n ctx[str(self.context_id)] = self.value",
"def set_attribute(self, context: ResourceCommandContext, obj_ref: str, attr_name: str, attr_value: str) -> None:\n self.handler.set_attribute(obj_ref, attr_name, attr_value)",
"def __get__(self, instance, owner):\r\n self.resource_meta = instance\r\n return self",
"def process_property(self, resources, resource, model, prop, context):\n pass",
"def set_res_value(name, value):\n pass",
"def setContext(self, context: Any, /) -> Any:\n ...",
"def resource(self, resource):\n self._resource = resource",
"def resource(self, resource):\n self._resource = resource",
"def context(self, context):\n self._context = context",
"def process_parameter(self, resources, resource, api, operation, parameter,\n context):\n pass",
"def set(self, request, _object):\n\n value = request._get_parameter_value(self)\n value.object = _object",
"def context(self, context):\n\n self._context = context",
"def resource(self, resource):\n\n self._resource = resource",
"def _set_default(name, value, context):\n if name not in context:\n context[name] = value",
"def setParameter(self, name, value):",
"def set_resource_data(self, resource, meta):\n super().set_resource_data(resource, meta)\n self._set_resource_temperature(resource)\n self._set_egs_plant_design_temperature()\n self._set_nameplate_to_match_resource_potential(resource)\n self._set_resource_potential_to_match_gross_output()\n self._set_costs()",
"def set(self, key, value):\n self.context.set(self.prefix+'.'+key, value)",
"def __setattr__(self, name, value):\n if name in [\"sampling_function\", \"env\", \"fit_dist\", \"reset\"]:\n object.__setattr__(self, name, value)\n else:\n setattr(self.env, name, value)",
"def set_context(self, context: Context):\n self.context = context",
"def set_context(self, context: Context):\n self.context = context",
"def set(self, key, value):\n task = Task.current_task()\n try:\n context = task._context\n except AttributeError:\n task._context = context = {}\n context[key] = value",
"def set_param(self, name, value, *, distrib=None, ref=None):\n raise NotImplementedError",
"def __setattr__(self, name, value):\n self.set(**{name: value})",
"def updateResource(self, authenticationToken, resource):\r\n pass",
"def __set__(self, instance, value):\n instance._values[self.name] = self.process(value)",
"def set_value ( self, object, value ):\n target, name = self.target_name( object )\n setattr( target, name, value )",
"def setTemplateParameter(self,name,value):\n self.tplparam[name] = value",
"def __set__(self, obj, value):\n\n return setattr(obj, '_' + self.name, value)",
"def __setattr__(self, key, value):\n return setattr(self.__dict__['_obj'], key, value)"
] | [
"0.7004103",
"0.64217883",
"0.63418335",
"0.63214225",
"0.63117516",
"0.62844723",
"0.6276568",
"0.6202078",
"0.6202078",
"0.6191413",
"0.61662775",
"0.6094014",
"0.6078255",
"0.6006722",
"0.60014075",
"0.5977802",
"0.59605306",
"0.5909131",
"0.5898597",
"0.5889289",
"0.5889289",
"0.58676046",
"0.58350754",
"0.5812761",
"0.5794312",
"0.57911503",
"0.5751143",
"0.5750471",
"0.57180786",
"0.5715042"
] | 0.6990149 | 1 |
Check support 'rsm_ctx' type by handler. Instance should be NODE_TYPE_QUOTA. | def can_handle(self, rsm_ctx):
return rsm_ctx.instance.type == NODE_TYPE_QUOTA | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def can_handle(self, rsm_ctx):\n return not rsm_ctx.instance.type",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE",
"def can_handle(self, rsm_ctx):\n return super(OpenstackQuotaHandler, self).can_handle(rsm_ctx) and \\\n SYSTEM_NAME_OPENSTACK in rsm_ctx.instance.system_name",
"def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Node instance has type with is not supported by '\n 'Resource Management Plugin. Skipping'\n )",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_RESULT",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_PROJECT",
"def can_handle(self, rsm_ctx):\n return False",
"def handle(self, rsm_ctx):\n self._process_runtime_properties(\n rsm_ctx,\n rsm_ctx.instance.runtime_properties,\n self.VALUE_TYPE_QUOTA\n )",
"def check_selinux_status(self):\n\n raise NotImplementedError()",
"def check_tree_type(tree):\n return tree.type in ref",
"def test_read_cluster_resource_quota_status(self):\n pass",
"def check_global_request(self, kind, msg):\n return False",
"def is_node_support(self, node):\n return self.node_attribute(key=node, name=\"type\") == \"support\"",
"def test_type(self):\n assert is_consistent_type(Context, \"Context\", TLSv1_METHOD)",
"async def permission_valid_check(cls):\n pass",
"def test_patch_cluster_resource_quota_status(self):\n pass",
"def check_kernel(cls):\n pass",
"def handle(self, rsm_ctx):\n pass",
"def global_quota(self) -> bool:\n return pulumi.get(self, \"global_quota\")",
"def check_is_admin(context):\n init()\n\n #the target is user-self\n credentials = context.to_dict()\n target = credentials\n\n return policy.check('context_is_admin', target, credentials)",
"async def check_quota(self, provider):\n # Load the spec-specific configuration if it has been overridden\n repo_config = provider.repo_config(self.settings)\n\n launch_quota = self.settings[\"launch_quota\"]\n try:\n return await launch_quota.check_repo_quota(\n self.image_name, repo_config, self.repo_url\n )\n except LaunchQuotaExceeded as e:\n LAUNCH_COUNT.labels(\n status=e.status,\n **self.repo_metric_labels,\n ).inc()\n await self.fail(e.message)\n raise",
"async def checktype(self, ctx:commands.Context):\r\n\r\n t = await self.GetChannelType(ctx.guild, ctx.channel.id)\r\n if t == 'none':\r\n await ctx.send(\r\n f'<#{ctx.channel.id}> is a normal channel (use `register <channel type>` to make this a specialized channel)')\r\n else:\r\n await ctx.send(f'<#{ctx.channel.id}> is a {t}')",
"def authorize_quota_class_context(context, class_name):\n if is_user_context(context):\n if not context.quota_class:\n raise exception.NotAuthorized()\n elif context.quota_class != class_name:\n raise exception.NotAuthorized()",
"def authorize_quota_class_context(context, class_name):\n if is_user_context(context):\n if not context.quota_class:\n raise exception.NotAuthorized()\n elif context.quota_class != class_name:\n raise exception.NotAuthorized()",
"def enable_hierarchical_resource_quota(self) -> Optional[bool]:\n return pulumi.get(self, \"enable_hierarchical_resource_quota\")",
"def CheckType(self, *args, **kwargs):\n pass",
"def node_type_validator(field, presentation, context, node_value, node_obj):\n the_child_nodetypes = []\n the_parent_capability_type_name = _get_requirement_in_type(context, presentation).\\\n capability\n the_parent_node_type_name = _get_requirement_in_type(context, presentation).node\n\n node_type = get_type_by_name(context, node_value, 'node_types')\n if node_type is None:\n context.validation.report(\n '\"%s\" refers to an unknown node type in \"%s\"'\n % (presentation._name, presentation._container._fullname),\n locator=presentation._get_child_locator(field.name),\\\n level=Issue.BETWEEN_FIELDS)\n return\n\n if the_parent_node_type_name:\n if not _is_parent(context, node_obj, the_parent_node_type_name, 'node_types'):\n context.validation.report(\n '\"%s\" refers to an unknown/inappropriate node type in \"%s\"'\n % (presentation._name, presentation._container._fullname),\n locator=presentation._get_child_locator(field.name),\\\n level=Issue.BETWEEN_FIELDS)\n return\n\n for the_node_type in context.presentation.presenter.service_template.node_types.\\\n iteritems():\n if the_node_type[1]._get_capabilities(context):\n the_capabilities = the_node_type[1]._get_capabilities(context)\n for the_capability in the_capabilities.iteritems():\n if _is_parent(context, the_capability[1]._get_type(context),\\\n the_parent_capability_type_name, 'capability_types'):\n the_child_nodetypes.append(the_node_type)\n\n for the_child_node_type in the_child_nodetypes:\n if _is_parent(context, the_child_node_type[1], node_obj._name, 'node_types'):\n return\n\n context.validation.report(\n '\"%s\" refers to a node type that does not match the capability requirement in \"%s\"'\n % (presentation._name, presentation._container._fullname),\n locator=presentation._get_child_locator(field.name), level=Issue.BETWEEN_FIELDS)\n return",
"def test_replace_cluster_resource_quota_status(self):\n pass"
] | [
"0.6811578",
"0.6618091",
"0.6618091",
"0.6618091",
"0.65914714",
"0.63905466",
"0.633495",
"0.6178352",
"0.5966573",
"0.57266796",
"0.52386665",
"0.5188582",
"0.5182612",
"0.48737982",
"0.4849607",
"0.48398957",
"0.48367012",
"0.48311907",
"0.4817003",
"0.4806151",
"0.48005065",
"0.4788951",
"0.47245663",
"0.4714898",
"0.4712836",
"0.4712836",
"0.4683927",
"0.46554285",
"0.46462247",
"0.46059352"
] | 0.7906162 | 0 |
Logic which should be executed for given 'rsm_ctx'. Process quota state from properties and run set_value on 'rsm_ctx'. | def handle(self, rsm_ctx):
self._process_runtime_properties(
rsm_ctx,
rsm_ctx.instance.runtime_properties,
self.VALUE_TYPE_QUOTA
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def handle(self, rsm_ctx):\n pass",
"def handle(self, rsm_ctx):\n runtime_properties = rsm_ctx.get_execution_result()\n\n rsm_ctx.log(\n 'info',\n 'Got {} runtime_properties after execution',\n runtime_properties.keys()\n )\n\n self._process_runtime_properties(\n rsm_ctx,\n runtime_properties,\n self.VALUE_TYPE_USAGE\n )",
"def modify_quota_config(self, quota_config_obj, quota_config_params):\n\n if quota_config_params:\n soft_limit = quota_config_params['default_soft_limit']\n hard_limit = quota_config_params['default_hard_limit']\n is_user_quota_enabled = quota_config_params['is_user_quota_enabled']\n quota_policy = quota_config_params['quota_policy']\n grace_period = quota_config_params['grace_period']\n cap_unit = quota_config_params['cap_unit']\n gp_unit = quota_config_params['grace_period_unit']\n\n if soft_limit:\n soft_limit_in_bytes = utils.get_size_bytes(soft_limit, cap_unit)\n else:\n soft_limit_in_bytes = quota_config_obj.default_soft_limit\n\n if hard_limit:\n hard_limit_in_bytes = utils.get_size_bytes(hard_limit, cap_unit)\n else:\n hard_limit_in_bytes = quota_config_obj.default_hard_limit\n\n if grace_period:\n grace_period_in_sec = get_time_in_seconds(grace_period, gp_unit)\n else:\n grace_period_in_sec = quota_config_obj.grace_period\n\n policy_enum = None\n policy_enum_val = None\n if quota_policy:\n if utils.QuotaPolicyEnum[quota_policy]:\n policy_enum = utils.QuotaPolicyEnum[quota_policy]\n policy_enum_val = \\\n utils.QuotaPolicyEnum[quota_policy]._get_properties()['value']\n else:\n errormsg = \"Invalid choice {0} for quota policy\".format(\n quota_policy)\n LOG.error(errormsg)\n self.module.fail_json(msg=errormsg)\n\n # Verify if modify is required. If not required, return False\n if quota_config_obj.default_hard_limit == hard_limit_in_bytes and \\\n quota_config_obj.default_soft_limit == soft_limit_in_bytes and \\\n quota_config_obj.grace_period == grace_period_in_sec and \\\n ((quota_policy is not None and\n quota_config_obj.quota_policy == policy_enum) or\n quota_policy is None) and \\\n (is_user_quota_enabled is None or\n (is_user_quota_enabled is not None and\n is_user_quota_enabled == quota_config_obj.is_user_quota_enabled)):\n return False\n\n try:\n resp = self.unity_conn.modify_quota_config(\n quota_config_id=quota_config_obj.id,\n grace_period=grace_period_in_sec,\n default_hard_limit=hard_limit_in_bytes,\n default_soft_limit=soft_limit_in_bytes,\n is_user_quota_enabled=is_user_quota_enabled,\n quota_policy=policy_enum_val)\n LOG.info(\"Successfully modified the quota config with response %s\", resp)\n return True\n\n except Exception as e:\n errormsg = \"Failed to modify quota config for filesystem {0} \" \\\n \" with error {1}\".format(quota_config_obj.filesystem.id, str(e))\n LOG.error(errormsg)\n self.module.fail_json(msg=errormsg)",
"def handle(self, rsm_ctx):\n rsm_ctx.log('info', 'Executing \"list\" operation for get usage ...')\n\n runtime_properties = rsm_ctx.run_execution()\n rsm_ctx.log(\n 'info',\n 'Got {} runtime_properties after execution',\n runtime_properties.keys()\n )\n\n self._process_runtime_properties(\n rsm_ctx,\n runtime_properties,\n self.VALUE_TYPE_USAGE\n )",
"def test_patch_cluster_resource_quota_status(self):\n pass",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_QUOTA",
"def test_replace_cluster_resource_quota_status(self):\n pass",
"def test_admin_set_quota_one_quota_positive(self):\n self.client.admin_set_quotas(role=\"usr-sys-admin-test\", read_quota=250)\n time.sleep(1)\n roles = self.client.admin_get_role(\"usr-sys-admin-test\")\n assert roles == {\n \"privileges\": [{\"ns\": \"\", \"set\": \"\", \"code\": 0}, {\"ns\": \"\", \"set\": \"\", \"code\": 1}],\n \"whitelist\": [],\n \"read_quota\": 250,\n \"write_quota\": 4500,\n }",
"def test_admin_set_quota_positive_with_policy(self):\n self.client.admin_set_quotas(\n role=\"usr-sys-admin-test\", read_quota=250, write_quota=300, policy={\"timeout\": 180000}\n )\n time.sleep(1)\n roles = self.client.admin_get_role(\"usr-sys-admin-test\")\n assert roles == {\n \"privileges\": [{\"ns\": \"\", \"set\": \"\", \"code\": 0}, {\"ns\": \"\", \"set\": \"\", \"code\": 1}],\n \"whitelist\": [],\n \"read_quota\": 250,\n \"write_quota\": 300,\n }",
"def _init_global_value_by_governance_score(self):\n context: 'IconScoreContext' = self._context_factory.create(IconScoreContextType.QUERY)\n # Clarifies that This Context does not count steps\n context.step_counter = None\n\n try:\n self._push_context(context)\n # Gets the governance SCORE\n governance_score: 'Governance' = context.get_icon_score(GOVERNANCE_SCORE_ADDRESS)\n if governance_score is None:\n raise ServerErrorException(f'governance_score is None')\n\n # Gets the step price if the fee flag is on\n # and set to the counter factory\n if context.is_service_flag_on(IconServiceFlag.fee):\n step_price = governance_score.getStepPrice()\n else:\n step_price = 0\n\n self._step_counter_factory.set_step_price(step_price)\n\n # Gets the step costs and set to the counter factory\n step_costs = governance_score.getStepCosts()\n\n for key, value in step_costs.items():\n try:\n self._step_counter_factory.set_step_cost(\n StepType(key), value)\n except ValueError:\n # Pass the unknown step type\n pass\n\n # Gets the max step limit and keep into the counter factory\n self._step_counter_factory.set_max_step_limit(\n IconScoreContextType.INVOKE,\n governance_score.getMaxStepLimit(\"invoke\"))\n self._step_counter_factory.set_max_step_limit(\n IconScoreContextType.QUERY,\n governance_score.getMaxStepLimit(\"query\"))\n\n finally:\n self._pop_context()\n\n self._context_factory.destroy(context)",
"def reconfigure_nova_quota(self):\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n\n self.show_step(2)\n config = utils.get_config_template('nova_quota')\n structured_config = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role=\"controller\")\n\n self.show_step(3)\n uptimes = self.get_service_uptime(controllers, 'nova-api')\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role=\"controller\")\n\n self.show_step(5)\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(6)\n self.check_service_was_restarted(controllers, uptimes, 'nova-api')\n\n self.show_step(7)\n self.check_config_on_remote(controllers, structured_config)\n\n self.show_step(8)\n self.show_step(9)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n\n self.check_nova_quota(os_conn, cluster_id)\n\n self.env.make_snapshot(\"reconfigure_nova_quota\")",
"def getquota(self, mailbox):\n if \"QUOTA\" not in self.capabilities:\n self.quota_limit = self.quota_current = None\n return\n try:\n data = self._cmd(\"GETQUOTAROOT\", self._encode_mbox_name(mailbox),\n responses=[\"QUOTAROOT\", \"QUOTA\"])\n except ImapError:\n data = None\n finally:\n if data is None:\n self.quota_limit = self.quota_current = None\n return\n\n quotadef = data[1][0].decode()\n m = re.search(r\"\\(STORAGE (\\d+) (\\d+)\\)\", quotadef)\n if not m:\n print(\"Problem while parsing quota def\")\n return\n self.quota_limit = int(m.group(2))\n self.quota_current = int(m.group(1))\n try:\n self.quota_usage = (\n int(float(self.quota_current) / float(self.quota_limit) * 100)\n )\n except TypeError:\n self.quota_usage = -1",
"def test_admin_set_quota_no_quotas_positive(self):\n self.client.admin_set_quotas(\n role=\"usr-sys-admin-test\",\n )\n time.sleep(1)\n roles = self.client.admin_get_role(\"usr-sys-admin-test\")\n assert roles == {\n \"privileges\": [{\"ns\": \"\", \"set\": \"\", \"code\": 0}, {\"ns\": \"\", \"set\": \"\", \"code\": 1}],\n \"whitelist\": [],\n \"read_quota\": 0,\n \"write_quota\": 4500,\n }",
"def set_quota(self, tenant_id):\n # Get the admin tenant's id.\n\n _url = \"http://\" + self.host_ip + \":8774/v2/\" + \\\n self.cloud_admin_info['project_id'] + \"/os-quota-sets/\" + tenant_id\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.cloud_admin_info['token_project']}\n _body = {\"quota_set\": {\n \"cores\": 80,\n \"floating_ips\": 40,\n \"instances\": 100,\n \"ram\": 512000}}\n response = self.request(\"PUT\", _url, _headers, json.dumps(_body))\n if response is None:\n LOG_OBJ.error(\"No response from server while setting the quota\"\n \" for tenant: %s\" % tenant_id)\n return\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Modifying quota Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n LOG_OBJ.info(\"Tenant Quota Modified. Details : %s \" % output)\n\n return True",
"def _set_value(rsm_ctx, value, value_type, resource_name=None):\n value_dict = {value_type: value}\n\n if resource_name:\n value_dict['resource_name'] = resource_name\n\n rsm_ctx.log('debug', 'Setting {}', value_dict)\n rsm_ctx.set_value(**value_dict)",
"def can_handle(self, rsm_ctx):\n return super(OpenstackQuotaHandler, self).can_handle(rsm_ctx) and \\\n SYSTEM_NAME_OPENSTACK in rsm_ctx.instance.system_name",
"def test_read_cluster_resource_quota_status(self):\n pass",
"def perform_module_operation(self):\n filesystem_name = self.module.params['filesystem_name']\n filesystem_id = self.module.params['filesystem_id']\n nas_server_name = self.module.params['nas_server_name']\n nas_server_id = self.module.params['nas_server_id']\n pool_name = self.module.params['pool_name']\n pool_id = self.module.params['pool_id']\n size = self.module.params['size']\n cap_unit = self.module.params['cap_unit']\n quota_config = self.module.params['quota_config']\n state = self.module.params['state']\n snap_schedule_name = self.module.params['snap_schedule_name']\n snap_schedule_id = self.module.params['snap_schedule_id']\n\n # result is a dictionary to contain end state and FileSystem details\n changed = False\n result = dict(\n changed=False,\n filesystem_details=None\n )\n\n to_modify_dict = None\n filesystem_details = None\n quota_config_obj = None\n\n self.validate_input_string()\n\n if size is not None and size == 0:\n self.module.fail_json(msg=\"Size can not be 0 (Zero)\")\n\n if size and not cap_unit:\n cap_unit = 'GB'\n\n if quota_config:\n if (quota_config['default_hard_limit'] is not None\n or quota_config['default_soft_limit'] is not None) and \\\n not quota_config['cap_unit']:\n quota_config['cap_unit'] = 'GB'\n\n if quota_config['grace_period'] is not None \\\n and quota_config['grace_period_unit'] is None:\n quota_config['grace_period_unit'] = 'days'\n\n if quota_config['grace_period'] is not None \\\n and quota_config['grace_period'] <= 0:\n self.module.fail_json(msg=\"Invalid grace_period provided. \"\n \"Must be greater than 0.\")\n\n if quota_config['default_soft_limit'] is not None \\\n and utils.is_size_negative(quota_config['default_soft_limit']):\n self.module.fail_json(msg=\"Invalid default_soft_limit provided. \"\n \"Must be greater than or equal to 0.\")\n\n if quota_config['default_hard_limit'] is not None \\\n and utils.is_size_negative(quota_config['default_hard_limit']):\n self.module.fail_json(msg=\"Invalid default_hard_limit provided. \"\n \"Must be greater than or equal to 0.\")\n\n if (cap_unit is not None) and not size:\n self.module.fail_json(msg=\"cap_unit can be specified along \"\n \"with size\")\n\n nas_server = None\n if nas_server_name or nas_server_id:\n nas_server = self.get_nas_server(\n name=nas_server_name, id=nas_server_id)\n\n obj_pool = None\n if pool_name or pool_id:\n obj_pool = self.get_pool(pool_name=pool_name, pool_id=pool_id)\n\n obj_fs = None\n obj_fs = self.get_filesystem(name=filesystem_name,\n id=filesystem_id,\n obj_nas_server=nas_server)\n\n self.snap_sch_id = None\n if snap_schedule_name or snap_schedule_id:\n snap_schedule_params = {\n \"name\": snap_schedule_name,\n \"id\": snap_schedule_id\n }\n self.snap_sch_id = self.resolve_to_snapschedule_id(snap_schedule_params)\n elif snap_schedule_name == \"\" or snap_schedule_id == \"\":\n self.snap_sch_id = \"\"\n\n if obj_fs:\n filesystem_details = obj_fs._get_properties()\n filesystem_id = obj_fs.get_id()\n to_modify_dict = self.is_modify_required(obj_fs, cap_unit)\n LOG.info(\"From Mod Op, to_modify_dict: %s\", to_modify_dict)\n\n if state == 'present' and not filesystem_details:\n if not filesystem_name:\n msg_noname = \"FileSystem with id {0} is not found, unable to \" \\\n \"create a FileSystem without a valid \" \\\n \"filesystem_name\".format(filesystem_id)\n self.module.fail_json(msg=msg_noname)\n\n if not pool_name and not pool_id:\n self.module.fail_json(msg=\"pool_id or pool_name is required \"\n \"to create new filesystem\")\n if not size:\n self.module.fail_json(msg=\"Size is required to create\"\n \" a filesystem\")\n size = utils.get_size_bytes(size, cap_unit)\n\n obj_fs = self.create_filesystem(name=filesystem_name,\n obj_pool=obj_pool,\n obj_nas_server=nas_server,\n size=size)\n\n LOG.debug(\"Successfully created filesystem , %s\", obj_fs)\n filesystem_id = obj_fs.id\n filesystem_details = obj_fs._get_properties()\n to_modify_dict = self.is_modify_required(obj_fs, cap_unit)\n LOG.debug(\"Got filesystem id , %s\", filesystem_id)\n changed = True\n\n if state == 'present' and filesystem_details and to_modify_dict:\n self.modify_filesystem(update_dict=to_modify_dict, obj_fs=obj_fs)\n changed = True\n\n \"\"\"\n Set quota configuration\n \"\"\"\n if state == \"present\" and filesystem_details and quota_config:\n quota_config_obj = self.get_quota_config_details(obj_fs)\n\n if quota_config_obj is not None:\n is_quota_config_modified = self.modify_quota_config(\n quota_config_obj=quota_config_obj,\n quota_config_params=quota_config)\n\n if is_quota_config_modified:\n changed = True\n else:\n self.module.fail_json(msg=\"One or more operations related\"\n \" to this task failed because the\"\n \" new object created could not be fetched.\"\n \" Please rerun the task for expected result.\")\n\n if state == 'absent' and filesystem_details:\n changed = self.delete_filesystem(filesystem_id)\n filesystem_details = None\n\n if state == 'present' and filesystem_details:\n filesystem_details = self.get_filesystem_display_attributes(\n obj_fs=obj_fs)\n\n result['changed'] = changed\n result['filesystem_details'] = filesystem_details\n self.module.exit_json(**result)",
"def __init__( self, conf ):\n \n self.conf = conf\n self.irodsu = IRODSUtils(self.conf.irods_home_dir, 'QuotaStats',\n self.conf.irods_debug)",
"def set_values(self):\n super(ResConfigInherit, self).set_values()\n self.env['ir.config_parameter'].sudo().set_param(\n 'sale_stock_restrict.product_restriction', self.product_restriction)\n self.env['ir.config_parameter'].sudo().set_param(\n 'sale_stock_restrict.check_stock', self.check_stock)",
"def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Starting executing for \"list\" operation for get usage ...'\n )\n\n execution_id = rsm_ctx.run_execution(wait=False)\n rsm_ctx.log(\n 'info',\n 'Execution started with ID: {} ...'.format(execution_id)\n )",
"def test_change_default_throttling_settings_http_with_overwrite_throttled_rate_above_account_quota():",
"def get_quota(self):\n raise NotImplementedError",
"def run_ten_years_quota(self, quota):\n #TODO: Experiment with changing to while(self.harvest_available != 0)\n # This would allow us to see how long we could sustain a population at a given quota level\n flag=True\n if flag==True:\n for x in range(10):\n self.harvest_available = self.run_step()\n self.run_year(quota)\n else:\n \n self.harvest_available = self.run_step()\n while self.harvest_available !=0:\n self.harvest_available = self.run_step()\n self.run_year(quota)",
"def api_quota_command():\n # 1. There is no parameter input required from Demisto\n # 2. Get the quota status info from SlashNext API\n response = api_quota()\n if response.get('errorNo') != 0:\n return\n # 3. Parse and format the response\n quota_data = response.get('quotaDetails')\n\n title = 'SlashNext Phishing Incident Response - API Quota\\n'\\\n '##### Note: {}'.format(quota_data.get('note'))\n\n snx_ioc_cont = {\n 'LicensedQuota': quota_data.get('licensedQuota'),\n 'RemainingQuota': quota_data.get('remainingQuota'),\n 'ExpirationDate': quota_data.get('expiryDate'),\n 'IsExpired': quota_data.get('isExpired')\n }\n\n ec = {\n 'SlashNext.Quota(val.Value === obj.Value)': snx_ioc_cont\n }\n\n md = tableToMarkdown(\n title,\n snx_ioc_cont,\n ['LicensedQuota',\n 'RemainingQuota',\n 'ExpirationDate']\n )\n\n return_outputs(md, ec, snx_ioc_cont)",
"def update(self, runningrates, rspec):\n # cache share for later comparison\n runningrates['share'] = self.Share\n\n # Query Node Manager for max rate overrides\n self.updateSliceTags(rspec)\n\n usedbytes = runningrates['usedbytes']\n usedi2bytes = runningrates['usedi2bytes']\n\n # Check limits.\n if usedbytes >= (self.bytes + (self.ThreshKByte * 1024)):\n sum = self.bytes + (self.ThreshKByte * 1024)\n maxbyte = self.MaxKByte * 1024\n bytesused = usedbytes - self.bytes\n timeused = int(time.time() - self.time)\n # Calcuate new rate. in bit/s\n new_maxrate = int(((maxbyte - bytesused) * 8)/(period - timeused))\n # Never go under MinRate\n if new_maxrate < (self.MinRate * 1000):\n new_maxrate = self.MinRate * 1000\n # State information. I'm capped.\n self.capped += True\n else:\n # Sanity Check\n new_maxrate = self.MaxRate * 1000\n self.capped += False\n\n if usedi2bytes >= (self.i2bytes + (self.Threshi2KByte * 1024)):\n maxi2byte = self.Maxi2KByte * 1024\n i2bytesused = usedi2bytes - self.i2bytes\n timeused = int(time.time() - self.time)\n # Calcuate New Rate.\n new_maxi2rate = int(((maxi2byte - i2bytesused) * 8)/(period - timeused))\n # Never go under MinRate\n if new_maxi2rate < (self.Mini2Rate * 1000):\n new_maxi2rate = self.Mini2Rate * 1000\n # State information. I'm capped.\n self.capped += True\n else:\n # Sanity\n new_maxi2rate = self.Maxi2Rate * 1000\n self.capped += False\n\n # Check running values against newly calculated values so as not to run tc\n # unnecessarily\n if (runningrates['maxrate'] != new_maxrate) or \\\n (runningrates['minrate'] != self.MinRate * 1000) or \\\n (runningrates['maxexemptrate'] != new_maxi2rate) or \\\n ('minexemptrate' in runningrates and runningrates['minexemptrate'] != self.Mini2Rate * 1000) or \\\n (runningrates['share'] != self.Share):\n # Apply parameters\n bwlimit.set(xid = self.xid, dev = dev_default,\n minrate = self.MinRate * 1000,\n maxrate = new_maxrate,\n minexemptrate = self.Mini2Rate * 1000,\n maxexemptrate = new_maxi2rate,\n share = self.Share)\n\n # Notify slice\n if self.capped == True:\n self.notify(new_maxrate, new_maxi2rate, usedbytes, usedi2bytes)",
"def test_patch_cluster_resource_quota(self):\n pass",
"def test_change_default_throttling_settings_http_with_overwrite_throttled_burst_above_account_quota():",
"def quota_update(self, tenant_id, fields):\n _url = \"http://\" + self.host_ip + \":9696/v2.0/quotas/\" + \\\n tenant_id + \".json\"\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.cloud_admin_info['token_project']}\n _body = {\"quota\": fields}\n\n response = self.request(\"PUT\", _url, _headers, json.dumps(_body))\n if response is None:\n LOG_OBJ.error(\"No response from server while updating the quota\"\n \" for tenant: %s\" % tenant_id)\n return\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Updating quota Failed with status %s \"\n % response.status)\n return response.status\n output = json.loads(response.data)\n\n LOG_OBJ.info(\"Tenant Quota Details : %s \" % output)\n return output",
"def handle_rate_limit(rate_limit):\n remaining = rate_limit['remaining']\n limit = rate_limit['limit']\n percent_remaining = remaining / limit\n reset_at = rate_limit['resetAt']\n if percent_remaining < 0.15:\n reset_at = datetime.strptime(reset_at, '%Y-%m-%dT%H:%M:%SZ')\n current_time = datetime.now()\n time_diff = reset_at - current_time\n seconds = time_diff.total_seconds()\n\n print(f'Rate Limit hit. Waiting for reset.\\nProcess will continue at: {reset_at}')\n\n time.sleep(seconds)"
] | [
"0.5784707",
"0.57681483",
"0.5694401",
"0.565285",
"0.554098",
"0.5526146",
"0.55199933",
"0.5502226",
"0.5368012",
"0.52405834",
"0.52204037",
"0.52040344",
"0.5194511",
"0.51885706",
"0.5176698",
"0.5157406",
"0.51350015",
"0.50702596",
"0.5042467",
"0.50392616",
"0.5035219",
"0.5004069",
"0.49945295",
"0.4993087",
"0.4953089",
"0.48610008",
"0.48212817",
"0.48138043",
"0.47713476",
"0.47713113"
] | 0.7451228 | 0 |
Check support 'rsm_ctx' type by handler. Instance should be NODE_TYPE_USAGE. | def can_handle(self, rsm_ctx):
return rsm_ctx.instance.type == NODE_TYPE_USAGE | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_RESULT",
"def can_handle(self, rsm_ctx):\n return not rsm_ctx.instance.type",
"def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Node instance has type with is not supported by '\n 'Resource Management Plugin. Skipping'\n )",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_QUOTA",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_PROJECT",
"def can_handle(self, rsm_ctx):\n return False",
"def can_handle(self, rsm_ctx):\n return super(OpenstackQuotaHandler, self).can_handle(rsm_ctx) and \\\n SYSTEM_NAME_OPENSTACK in rsm_ctx.instance.system_name",
"def is_node_support(self, node):\n return self.node_attribute(key=node, name=\"type\") == \"support\"",
"def check_tree_type(tree):\n return tree.type in ref",
"def check_kernel(cls):\n pass",
"def test_type(self):\n assert is_consistent_type(Context, \"Context\", TLSv1_METHOD)",
"def handle(self, rsm_ctx):\n pass",
"def check_selinux_status(self):\n\n raise NotImplementedError()",
"def check(self):\n self.isNodes = False\n self.isFixable = False\n nodeType = self.get_parser.get('SETTINGS', 'settingsinfonode')\n self.setStatus('OK')\n if not len(pm.ls(type=nodeType)):\n self.setStatus('WARNING')\n self.setErrorMessage('No %s node found in the scene.' % nodeType)\n return False, ''\n elif len(pm.ls(type=nodeType)) > 1:\n self.setStatus('ERROR')\n self.setErrorMessage('More than 1 %s node found in the scene.' % nodeType)\n return False, ''\n return True, pm.ls(type=nodeType)[0]",
"def can_reevaluate(self, node):\n return isinstance(node, (ast.Name, ast.Num, ast.Str)) or \\\n (six.PY3 and isinstance(node, ast.Bytes)) or \\\n (ast_has_name_constant and isinstance(node, ast.NameConstant))",
"def CheckType(self, *args, **kwargs):\n pass",
"def get_node_type(self, node):\n raise NotImplementedError()",
"def do_check(self):\n res = self.entity.do_check(self.context)\n if res:\n return self.RES_OK, 'Node check succeeded.'\n else:\n return self.RES_ERROR, 'Node check failed.'",
"def check_global_request(self, kind, msg):\n return False",
"def check_trace_mode(device_type, trace_mode):\n if trace_mode == tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY:\n if device_type != _DEVICE_TYPE_TPU:\n raise ValueError('Device_type \"%s\" is not yet supported for '\n 'trace mode \"%s\"' % (device_type, trace_mode))",
"async def checktype(self, ctx:commands.Context):\r\n\r\n t = await self.GetChannelType(ctx.guild, ctx.channel.id)\r\n if t == 'none':\r\n await ctx.send(\r\n f'<#{ctx.channel.id}> is a normal channel (use `register <channel type>` to make this a specialized channel)')\r\n else:\r\n await ctx.send(f'<#{ctx.channel.id}> is a {t}')",
"def test_get_node_status(self):\n pass",
"def node_is_pytest_context_manager(node: ast.AST) -> bool:\n return isinstance(node, ast.With) and bool(cm_exp.match(get_first_token(node).line))",
"def check_supported_features(self):",
"def check_type(self):\n return True",
"def check_r_type(r):\n if type(r) is str:\n raise TypeError('Get Error message.')",
"def dictGetType_granted_directly(self, type, node=None):\n\n user_name = f\"user_{getuid()}\"\n\n if node is None:\n node = self.context.node\n\n with user(node, f\"{user_name}\"):\n Suite(run=dictGetType_check,\n examples=Examples(\"privilege on grant_target_name user_name type\", [\n tuple(list(row)+[user_name,user_name,type]) for row in dictGetType_check.examples\n ], args=Args(name=\"check privilege={privilege}\", format_name=True)))",
"def check_global_attr_type(ds, attr, attr_type):\n if attr not in ds.ncattrs():\n return 0\n\n global_attr = getattr(ds, attr)\n\n if attr_type == 'int':\n attr_type_class = int\n elif attr_type == 'float':\n attr_type_class = float\n elif attr_type == 'str':\n attr_type_class = str\n else:\n return 1\n\n if len(str(global_attr)) == 0:\n return 2\n\n if np.dtype(type(global_attr)) != np.dtype(attr_type_class):\n return 3\n\n return 4",
"def has_request_context():\n from .application import Nereid\n\n return base_has_request_context() and \\\n isinstance(current_app._get_current_object(), Nereid)",
"def test_get_node_type_name(self):\n pass"
] | [
"0.68845487",
"0.6870086",
"0.6720605",
"0.65307677",
"0.6116541",
"0.5959433",
"0.5403402",
"0.5394847",
"0.52130985",
"0.51359606",
"0.50499636",
"0.5031325",
"0.50037795",
"0.4988742",
"0.4931527",
"0.48985812",
"0.48725662",
"0.48347872",
"0.47168615",
"0.471334",
"0.47120604",
"0.4702749",
"0.46957695",
"0.4629049",
"0.46137434",
"0.46033758",
"0.46032003",
"0.45947665",
"0.4584924",
"0.4575744"
] | 0.74504733 | 1 |
Logic which should be executed for given 'rsm_ctx'. Run execution on 'rsm_ctx'. | def handle(self, rsm_ctx):
rsm_ctx.log(
'info',
'Starting executing for "list" operation for get usage ...'
)
execution_id = rsm_ctx.run_execution(wait=False)
rsm_ctx.log(
'info',
'Execution started with ID: {} ...'.format(execution_id)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def handle(self, rsm_ctx):\n pass",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_RESULT",
"def handle(self, rsm_ctx):\n runtime_properties = rsm_ctx.get_execution_result()\n\n rsm_ctx.log(\n 'info',\n 'Got {} runtime_properties after execution',\n runtime_properties.keys()\n )\n\n self._process_runtime_properties(\n rsm_ctx,\n runtime_properties,\n self.VALUE_TYPE_USAGE\n )",
"def handle(self, rsm_ctx):\n rsm_ctx.log('info', 'Executing \"list\" operation for get usage ...')\n\n runtime_properties = rsm_ctx.run_execution()\n rsm_ctx.log(\n 'info',\n 'Got {} runtime_properties after execution',\n runtime_properties.keys()\n )\n\n self._process_runtime_properties(\n rsm_ctx,\n runtime_properties,\n self.VALUE_TYPE_USAGE\n )",
"def run(ctx):\n pass",
"def run_game_logic(self):\n pass",
"def _doRun(self, model: Model):\n raise Exception(\"Not implemented\")",
"def _run_scenario(self, cls, method_name, context, args, config):",
"def can_handle(self, rsm_ctx):\n return False",
"def run(self):\n if self.next_state == \"initialize_rexarm\":\n self.initialize_rexarm()\n\n if self.next_state == \"idle\":\n self.idle()\n\n if self.next_state == \"estop\":\n self.estop()\n\n if self.next_state == \"execute_tp\":\n self.execute_tp()\n\n if self.next_state == \"execute\":\n self.execute()\n\n if self.next_state == \"calibrate\":\n self.calibrate()\n\n if self.next_state == \"manual\":\n self.manual()\n\n if self.next_state == \"learn\":\n self.learn()\n\n if self.next_state == \"remember\":\n self.remember()\n\n if self.next_state == \"write\":\n self.write()\n\n if self.next_state == \"get_color\":\n self.get_color()\n\n if self.next_state == \"find_blocks\":\n self.find_blocks()\n\n # if self.next_state == \"dance\":\n # self.execute_dance()",
"def runctx(self, cmd, globals, locals):\n # B/W compatibility\n self.run(cmd, globals, locals)",
"def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Dumping gathered data to runtime_properties of {} node instance',\n rsm_ctx.instance.id\n )\n\n rsm_ctx.add_result_instance_id()\n rsm_ctx.set_runtime_properties({\n 'data': rsm_ctx.dump()\n })",
"def run(self):\r\n self.env.process(self.rw_pifo_sm())",
"def _run(self, *args, **kwargs):\n raise NotImplementedError",
"def can_handle(self, rsm_ctx):\n return not rsm_ctx.instance.type",
"def _run(self):\n raise NotImplementedError",
"def _run(self):\n raise NotImplementedError",
"def handle(self, rsm_ctx):\n rsm_ctx.log('info', 'Processing of project started')\n rsm_ctx.resolve_project()",
"def __call__(self, context):\n msg = context.latest_msg()\n # deal with some common cases\n\n # 调侃\n if msg.intent == 'tune':\n return self.utter_default, self\n\n self.on_process_message(msg)\n\n self.on_enter_state(context)\n\n ac, st = self.run(context)\n\n if st == self:\n self.repeat_times += 1\n else:\n self.on_finish_state(context)\n\n if self.repeat_times > 2:\n ac, st = self.turn_to_manual_custom_service(context), StateFinish()\n\n return ac, st",
"def execute(self, context: Any) -> Any:\n pass",
"def _run(self):\n result = self._consensus()\n if self._decision.split_group and result:\n self._set_decision(result)",
"def execute(self, driver, context):\n raise NotImplementedError",
"def _execute(self, model: ExecutableModelSpace) -> Any:\n\n from .space import BenchmarkModelSpace\n if not isinstance(model, BenchmarkModelSpace):\n warnings.warn('It would be better to use BenchmarkModelSpace for benchmarking to avoid '\n 'unnecessary overhead and silent mistakes.')\n if model.sample is None:\n raise ValueError('Model can not be evaluted because it has not been sampled yet.')\n\n return self.evaluate(model.sample)",
"def run(self, in_op):\n raise NotImplementedError",
"def _run_computation(self):\n with self.swap(stats_jobs_continuous.StatisticsAggregator,\n 'get_statistics', self._mock_get_statistics):\n ModifiedUserImpactAggregator.start_computation()\n self.process_and_flush_pending_tasks()",
"def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Node instance has type with is not supported by '\n 'Resource Management Plugin. Skipping'\n )",
"def run ( self ) :\n exec self._cmd in self._myglobals,self._mylocals",
"async def evaluate(self, ctx: Context, *, evalThis):\n\t\townercheck = self.owner_check(ctx.author.id)\n\t\tif ownercheck == False:\n\t\t\treturn await self.send('Owner-Only Command', whisper=[ctx.author.id])\n\t\tres = eval(evalThis)\n\t\tif inspect.isawaitable(res):\n\t\t\tawait self.send(await res, whisper=[ctx.author.id])\n\t\telse:\n\t\t\tawait self.send(res, whisper=[ctx.author.id])",
"def apply(ctx):\n pass",
"def _execute(self, _):\r\n pass"
] | [
"0.7543212",
"0.58339036",
"0.5705841",
"0.56092376",
"0.5581437",
"0.5514848",
"0.54928815",
"0.5442906",
"0.54387003",
"0.54118896",
"0.54112417",
"0.54038066",
"0.53368837",
"0.53190327",
"0.5260661",
"0.5252265",
"0.5252265",
"0.52007675",
"0.5192384",
"0.51621497",
"0.51299393",
"0.512294",
"0.51145166",
"0.5103023",
"0.5074761",
"0.5071312",
"0.5037123",
"0.5030227",
"0.5000912",
"0.49902862"
] | 0.6470867 | 1 |
Check support 'rsm_ctx' type by handler. Instance should be NODE_TYPE_USAGE. | def can_handle(self, rsm_ctx):
return rsm_ctx.instance.type == NODE_TYPE_USAGE | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_RESULT",
"def can_handle(self, rsm_ctx):\n return not rsm_ctx.instance.type",
"def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Node instance has type with is not supported by '\n 'Resource Management Plugin. Skipping'\n )",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_QUOTA",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_PROJECT",
"def can_handle(self, rsm_ctx):\n return False",
"def can_handle(self, rsm_ctx):\n return super(OpenstackQuotaHandler, self).can_handle(rsm_ctx) and \\\n SYSTEM_NAME_OPENSTACK in rsm_ctx.instance.system_name",
"def is_node_support(self, node):\n return self.node_attribute(key=node, name=\"type\") == \"support\"",
"def check_tree_type(tree):\n return tree.type in ref",
"def check_kernel(cls):\n pass",
"def test_type(self):\n assert is_consistent_type(Context, \"Context\", TLSv1_METHOD)",
"def handle(self, rsm_ctx):\n pass",
"def check_selinux_status(self):\n\n raise NotImplementedError()",
"def check(self):\n self.isNodes = False\n self.isFixable = False\n nodeType = self.get_parser.get('SETTINGS', 'settingsinfonode')\n self.setStatus('OK')\n if not len(pm.ls(type=nodeType)):\n self.setStatus('WARNING')\n self.setErrorMessage('No %s node found in the scene.' % nodeType)\n return False, ''\n elif len(pm.ls(type=nodeType)) > 1:\n self.setStatus('ERROR')\n self.setErrorMessage('More than 1 %s node found in the scene.' % nodeType)\n return False, ''\n return True, pm.ls(type=nodeType)[0]",
"def can_reevaluate(self, node):\n return isinstance(node, (ast.Name, ast.Num, ast.Str)) or \\\n (six.PY3 and isinstance(node, ast.Bytes)) or \\\n (ast_has_name_constant and isinstance(node, ast.NameConstant))",
"def CheckType(self, *args, **kwargs):\n pass",
"def get_node_type(self, node):\n raise NotImplementedError()",
"def do_check(self):\n res = self.entity.do_check(self.context)\n if res:\n return self.RES_OK, 'Node check succeeded.'\n else:\n return self.RES_ERROR, 'Node check failed.'",
"def check_global_request(self, kind, msg):\n return False",
"def check_trace_mode(device_type, trace_mode):\n if trace_mode == tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY:\n if device_type != _DEVICE_TYPE_TPU:\n raise ValueError('Device_type \"%s\" is not yet supported for '\n 'trace mode \"%s\"' % (device_type, trace_mode))",
"async def checktype(self, ctx:commands.Context):\r\n\r\n t = await self.GetChannelType(ctx.guild, ctx.channel.id)\r\n if t == 'none':\r\n await ctx.send(\r\n f'<#{ctx.channel.id}> is a normal channel (use `register <channel type>` to make this a specialized channel)')\r\n else:\r\n await ctx.send(f'<#{ctx.channel.id}> is a {t}')",
"def test_get_node_status(self):\n pass",
"def node_is_pytest_context_manager(node: ast.AST) -> bool:\n return isinstance(node, ast.With) and bool(cm_exp.match(get_first_token(node).line))",
"def check_supported_features(self):",
"def check_type(self):\n return True",
"def check_r_type(r):\n if type(r) is str:\n raise TypeError('Get Error message.')",
"def dictGetType_granted_directly(self, type, node=None):\n\n user_name = f\"user_{getuid()}\"\n\n if node is None:\n node = self.context.node\n\n with user(node, f\"{user_name}\"):\n Suite(run=dictGetType_check,\n examples=Examples(\"privilege on grant_target_name user_name type\", [\n tuple(list(row)+[user_name,user_name,type]) for row in dictGetType_check.examples\n ], args=Args(name=\"check privilege={privilege}\", format_name=True)))",
"def check_global_attr_type(ds, attr, attr_type):\n if attr not in ds.ncattrs():\n return 0\n\n global_attr = getattr(ds, attr)\n\n if attr_type == 'int':\n attr_type_class = int\n elif attr_type == 'float':\n attr_type_class = float\n elif attr_type == 'str':\n attr_type_class = str\n else:\n return 1\n\n if len(str(global_attr)) == 0:\n return 2\n\n if np.dtype(type(global_attr)) != np.dtype(attr_type_class):\n return 3\n\n return 4",
"def has_request_context():\n from .application import Nereid\n\n return base_has_request_context() and \\\n isinstance(current_app._get_current_object(), Nereid)",
"def test_get_node_type_name(self):\n pass"
] | [
"0.68845487",
"0.6870086",
"0.6720605",
"0.65307677",
"0.6116541",
"0.5959433",
"0.5403402",
"0.5394847",
"0.52130985",
"0.51359606",
"0.50499636",
"0.5031325",
"0.50037795",
"0.4988742",
"0.4931527",
"0.48985812",
"0.48725662",
"0.48347872",
"0.47168615",
"0.471334",
"0.47120604",
"0.4702749",
"0.46957695",
"0.4629049",
"0.46137434",
"0.46033758",
"0.46032003",
"0.45947665",
"0.4584924",
"0.4575744"
] | 0.74504733 | 0 |
Logic which should be executed for given 'rsm_ctx'. Process state from properties and run set_value on 'rsm_ctx'. | def handle(self, rsm_ctx):
runtime_properties = rsm_ctx.get_execution_result()
rsm_ctx.log(
'info',
'Got {} runtime_properties after execution',
runtime_properties.keys()
)
self._process_runtime_properties(
rsm_ctx,
runtime_properties,
self.VALUE_TYPE_USAGE
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def handle(self, rsm_ctx):\n pass",
"def handle(self, rsm_ctx):\n self._process_runtime_properties(\n rsm_ctx,\n rsm_ctx.instance.runtime_properties,\n self.VALUE_TYPE_QUOTA\n )",
"def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Dumping gathered data to runtime_properties of {} node instance',\n rsm_ctx.instance.id\n )\n\n rsm_ctx.add_result_instance_id()\n rsm_ctx.set_runtime_properties({\n 'data': rsm_ctx.dump()\n })",
"def handle(self, rsm_ctx):\n rsm_ctx.log('info', 'Executing \"list\" operation for get usage ...')\n\n runtime_properties = rsm_ctx.run_execution()\n rsm_ctx.log(\n 'info',\n 'Got {} runtime_properties after execution',\n runtime_properties.keys()\n )\n\n self._process_runtime_properties(\n rsm_ctx,\n runtime_properties,\n self.VALUE_TYPE_USAGE\n )",
"def state_processing_do(cfg, app, win, events):",
"def process_command(self, msg):\n if msg[\"content\"][\"command\"] == \"update config\":\n if msg[\"content\"][\"target\"] == self.type + \".state variable\":\n if \"state variable\" in msg[\"content\"][\"value\"]:\n curnames = list(self.stateVar.keys())\n needupdate = False\n for vardef in msg[\"content\"][\"value\"][\"state variable\"]:\n vname = self.type + \".state variable.\" + vardef[\"name\"]\n if vname in curnames:\n curnames.remove(vname)\n try:\n needupdate = needupdate or self.stateVar[\n vname].update(vardef)\n except:\n self.stateVar[vname] = sv_factory(vardef)\n needupdate = True\n else:\n self.stateVar[vname] = sv_factory(vardef)\n needupdate = True\n for name in curnames:\n del(self.stateVar[name])\n needupdate = True\n\n if needupdate:\n self.sending({\"subject\": \"control.\" + self.type,\n \"content_type\": \"request\",\n \"content\": {\"request\": \"save property\",\n \"target\": self.type,\n \"name\": \"state variable\",\n #\"token\": self.target,\n \"value\": bl.encrypt([x.vardef for x in self.stateVar.values()], self.config[\"buddykey\"])}})\n if self.log:\n self.log.debug(\"The new property stored is {}\".format(\n [x.vardef for x in self.stateVar.values()]))\n\n if msg[\"content\"][\"target\"] == self.type + \".rules\":\n if \"rules\" in msg[\"content\"][\"value\"]:\n curnames = list(self.rules.keys())\n needupdate = False\n for vardef in msg[\"content\"][\"value\"][\"rules\"]:\n vname = self.type + \".rules.\" + vardef[\"name\"]\n if vname in curnames:\n curnames.remove(vname)\n try:\n needupdate = needupdate or self.rules[\n vname].update(vardef)\n except:\n self.rules[vname] = Rule(vardef)\n needupdate = True\n else:\n self.rules[vname] = Rule(vardef)\n needupdate = True\n for name in curnames:\n del(self.rules[name])\n needupdate = True\n\n if needupdate:\n self.sending({\"subject\": \"control.\" + self.type,\n \"content_type\": \"request\",\n \"content\": {\"request\": \"save property\",\n \"target\": self.type,\n \"name\": \"rules\",\n #\"token\": self.target,\n \"value\": bl.encrypt([x.vardef for x in self.rules.values()], self.config[\"buddykey\"])}})\n if self.log:\n self.log.debug(\"The new property stored is {}\".format(\n [x.vardef for x in self.rules.values()]))\n\n if msg[\"content\"][\"target\"] == self.type + \".\"+self.subtype:\n\n for k, v in msg[\"content\"][\"value\"].items():\n action_config_default[k] = v\n self.sending({\"subject\": \"control\" + \".\" + self.subtype,\n \"content_type\": \"request\",\n \"content\": {\"request\": \"save configuration\",\n \"target\": self.type,\n #\"token\": self.target,\n \"value\": bl.encrypt(action_config_default, self.config[\"buddykey\"])}})\n\n # Now value contains the definition of that state variable\n # type is the type one of \"state\" or \"time\" because device state variable are\n # created automatically when devices are referenced in a rule\n # name is the name of the variable it must be unique\n # mickname an easier name",
"def __call__(self, context):\n msg = context.latest_msg()\n # deal with some common cases\n\n # 调侃\n if msg.intent == 'tune':\n return self.utter_default, self\n\n self.on_process_message(msg)\n\n self.on_enter_state(context)\n\n ac, st = self.run(context)\n\n if st == self:\n self.repeat_times += 1\n else:\n self.on_finish_state(context)\n\n if self.repeat_times > 2:\n ac, st = self.turn_to_manual_custom_service(context), StateFinish()\n\n return ac, st",
"def processSetConfig(self, msg):\r\n try:\r\n #---------------------------------------------------------------\r\n # RunInto config\r\n #---------------------------------------------------------------\r\n value = eval(msg[RunInto])\r\n if value is not None and type(value)==bool:\r\n self.runInto = value\r\n if value == True:\r\n LOG(\"Enabled RunInto\")\r\n self.controller.enableRunInto()\r\n elif value == False:\r\n LOG(\"Disabled RunInto\")\r\n self.controller.disableRunInto()\r\n #---------------------------------------------------------------\r\n # ExecDelay config\r\n #---------------------------------------------------------------\r\n value = eval(msg[ExecDelay])\r\n if value is not None and type(value) in [int,float]:\r\n LOG(\"Set execution delay: \" + repr(value))\r\n self.execDelay = value\r\n self.controller.setExecutionDelay(value)\r\n #---------------------------------------------------------------\r\n # ByStep config\r\n #---------------------------------------------------------------\r\n value = eval(msg[ByStep])\r\n if value is not None and type(value)==bool:\r\n LOG(\"Set step-by-step: \" + repr(value))\r\n self.stepByStep = value\r\n except BaseException,ex:\r\n LOG(\"Could not parse configuration: \" + repr(cfg), LOG_ERROR)\r\n resp = MsgHelper.createResponse(Messages.RSP_SET_CONFIG, msg)\r\n #TODO: send notification EXECUTOR CONFIGURED\r\n return resp",
"def _localSetState(self,pdict):\n self.mu = pdict.pop('mu')",
"def evaluate(self, state):\n abstract",
"def _set_value(rsm_ctx, value, value_type, resource_name=None):\n value_dict = {value_type: value}\n\n if resource_name:\n value_dict['resource_name'] = resource_name\n\n rsm_ctx.log('debug', 'Setting {}', value_dict)\n rsm_ctx.set_value(**value_dict)",
"def _eval_state(hass):\n state_str = ''.join(['1' if val else '0' for val in PERSIST['states']])\n state = int(state_str, 2)\n mode = PERSIST['mode']\n output = state in SCHEDULES[mode][0]\n _LOGGER.debug('Eval: %s %s = %s',\n PERSIST['mode'], str(PERSIST['states']), repr(output))\n\n if output != PERSIST['last_cmd']:\n PERSIST['last_cmd'] = output\n if output:\n _call_service(hass, SCHEDULES[mode][1], 'turn_on')\n else:\n _call_service(hass, SCHEDULES[mode][1], 'turn_off')",
"def process_event(self, event):\n if not self.frozen:\n if event[\"event\"] == self.event:\n if self.what is None or event[\"target\"].startswith(self.what):\n self._varstate = event\n try:\n for key in self.subval:\n self._varstate = self._varstate[key]\n\n if bridgectl.log:\n bridgectl.log.debug(\n \"New value for {} is {}\".format(\n self.name,\n self._varstate))\n except Exception as e:\n if bridgectl.log:\n bridgectl.log.critical(\n \"Failed to process event for rule {}\".format(\n self.name),\n exc_info=(type(e),\n e,\n e.__traceback__))\n pass\n if event['event'] == 'time tick':\n if self.period in event[\"starts\"]:\n self._varstate = self.reset()",
"def run(self):\n if self.next_state == \"initialize_rexarm\":\n self.initialize_rexarm()\n\n if self.next_state == \"idle\":\n self.idle()\n\n if self.next_state == \"estop\":\n self.estop()\n\n if self.next_state == \"execute_tp\":\n self.execute_tp()\n\n if self.next_state == \"execute\":\n self.execute()\n\n if self.next_state == \"calibrate\":\n self.calibrate()\n\n if self.next_state == \"manual\":\n self.manual()\n\n if self.next_state == \"learn\":\n self.learn()\n\n if self.next_state == \"remember\":\n self.remember()\n\n if self.next_state == \"write\":\n self.write()\n\n if self.next_state == \"get_color\":\n self.get_color()\n\n if self.next_state == \"find_blocks\":\n self.find_blocks()\n\n # if self.next_state == \"dance\":\n # self.execute_dance()",
"def run(self):\n\n for key, value in self.source.iteritems():\n if key in self._handler:\n # call the corresponding handler\n method = getattr(self, self._handler[key])\n method(value)\n elif key in self._attr:\n self._assign(self._attr[key], value)\n elif key in self._ignore:\n continue\n else:\n raise regrws.restful.RegRwsError('%s has no attribute corresponding to key %s' % (self.payload.__class__, key))\n return self.payload",
"def process_event(self, event):\n if not self.frozen:\n if event[\"event\"] == self.event:\n if self.what is None or event[\"target\"].startswith(self.what):\n self._varstate = event\n try:\n for key in self.subval:\n self._varstate = self._varstate[key]\n\n if bridgectl.log:\n bridgectl.log.debug(\n \"New value for {} is {}\".format(\n self.name,\n self._varstate))\n except Exception as e:\n if bridgectl.log:\n bridgectl.log.critical(\n \"Failed to process event for {}\".format(\n self.name),\n exc_info=(type(e),\n e,\n e.__traceback__))\n pass\n if event['event'] == 'time tick':\n if self.period in event[\"starts\"]:\n self._varstate = self.reset()",
"def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Starting executing for \"list\" operation for get usage ...'\n )\n\n execution_id = rsm_ctx.run_execution(wait=False)\n rsm_ctx.log(\n 'info',\n 'Execution started with ID: {} ...'.format(execution_id)\n )",
"def run_state_machine(self):\n\n # Initial check for startup state\n if self.status == 'STARTUP':\n self.update_action(self.status, None, None, None)\n # Get apriltag data from the detector\n\n return_tag_data = self.apriltag_detector.get_apriltag_data()\n\n # Check if it saw an apriltag\n if len(return_tag_data) != 0:\n\n # Iterate over the data from each of the apriltags seen\n for i in range(len(return_tag_data)):\n\n # Temp apriltag data\n temp_tag = return_tag_data[i]\n\n print(\"current state: \", self.status)\n\n print(\"Statemachine x VAL: \", temp_tag[1])\n\n print(\"Statemachine y VAL: \", temp_tag[2])\n\n # If the smallbot is currently in the CREEP_FORWARD state\n # handle cases for when the camera sees the side tags while it is driving forward\n if self.status == 'CREEP_FORWARD' and (temp_tag[0] == self.back_tag):\n self.current_tag = temp_tag[0]\n print(\"ATTEMPTING TO UPDATE STATUS 1\")\n self.update_action(self.status, temp_tag[1], temp_tag[2], temp_tag[3])\n # Ignore cases when the camera sees the side apriltags\n elif self.status == 'CREEP_FORWARD' and (temp_tag[0] == self.right_tag):\n print(\"ATTEMPTING TO UPDATE STATUS 2\")\n pass\n elif self.status == 'CREEP_FORWARD' and (temp_tag[0] == self.left_tag):\n print(\"ATTEMPTING TO UPDATE STATUS 2\")\n pass\n else:\n self.current_tag = temp_tag[0]\n print(\"ATTEMPTING TO UPDATE STATUS 3\")\n self.update_action(self.status, temp_tag[1], temp_tag[2], temp_tag[3])\n\n # If the camera did not see any apriltags\n else:\n self.current_tag = None\n self.update_action(None, None, None, None)",
"def _localSetState(self,pdict):\n self.mean = pdict.pop('mean' )\n self.sigma = pdict.pop('sigma')",
"def _localSetState(self,pdict):\n self.mean = pdict.pop('mean' )\n self.sigma = pdict.pop('sigma')",
"def proc_status_effect(\n self,\n status_att=None,\n status_val=False,\n resist=None\n ):\n\n # If a resist attribute is passed, the player\n # will attempt to resist the status change\n\n if resist is not None:\n succ,bonus = RandomRoll(\n self,\n getattr(self,resist),\n 75\n )\n else:\n succ = False\n\n if succ:\n pass\n else:\n setattr(self,status_att,status_val)",
"def _localSetState(self,pdict):\n self.lambdaVar = pdict.pop('lambda')\n self.low = pdict.pop('low' )",
"def state_processing_enter(cfg, app, win):",
"def execute(self):\n self.driver.run(ffd_order=self.ffd_order, case_id=self._case_id)\n\n valids = self._valid_dict\n\n # now update boundary outputs\n for expr in self._exprmapper.get_output_exprs():\n if valids[expr.text] is False:\n srctxt = self._exprmapper.get_source(expr.text)\n srcexpr = self._exprmapper.get_expr(srctxt)\n expr.set(srcexpr.evaluate(), src=srctxt)\n # setattr(self, dest, srccomp.get_wrapped_attr(src))\n else:\n # PassthroughProperty always valid for some reason.\n try:\n dst_type = self.get_trait(expr.text).trait_type\n except AttributeError:\n pass\n else:\n if isinstance(dst_type, PassthroughProperty):\n srctxt = self._exprmapper.get_source(expr.text)\n srcexpr = self._exprmapper.get_expr(srctxt)\n expr.set(srcexpr.evaluate(), src=srctxt)",
"def execute(self) -> None:\n self.state()",
"def psychometrics_data_update_handler(state):\r\n try:\r\n state = json.loads(sm.state)\r\n done = state['done']\r\n except:\r\n log.exception(\"Oops, failed to eval state for %s (state=%s)\" % (sm, sm.state))\r\n return\r\n\r\n pmd.done = done\r\n try:\r\n pmd.attempts = state.get('attempts', 0)\r\n except:\r\n log.exception(\"no attempts for %s (state=%s)\" % (sm, sm.state))\r\n\r\n try:\r\n checktimes = eval(pmd.checktimes) # update log of attempt timestamps\r\n except:\r\n checktimes = []\r\n checktimes.append(datetime.datetime.now(UTC))\r\n pmd.checktimes = checktimes\r\n try:\r\n pmd.save()\r\n except:\r\n log.exception(\"Error in updating psychometrics data for %s\" % sm)",
"def run_states(self):\n if (self.state == \"off\"):\n if (self.in_power.value == 1):\n self.off_to_on()\n \n elif self.state == \"on\":\n if (self.in_power.value == 0):\n self.any_to_off()\n elif (self.in_alert.value == 1):\n self.on_to_alert()\n \n elif self.state == \"alert\":\n if (self.in_power.value == 0):\n self.any_to_off()\n elif (self.in_alert.value == 0):\n self.alert_to_was_alert()\n\n elif self.state == \"was_alert\":\n if (self.in_power.value == 0):\n self.any_to_off()",
"def process_property(self, resources, resource, model, prop, context):\n pass",
"def execute(self, cmd, state):\n state[:] = interface.execute_arm_command(cmd, 0)\n self.env.update() # note that the sim update is called twice, once here and once by the hand's sim_connection",
"def _do_compute(self, var_map):\n raise Exception(\"Not implemented. Subclass responsibility\")"
] | [
"0.6495517",
"0.6103459",
"0.58350176",
"0.56913376",
"0.5490284",
"0.5385307",
"0.53602254",
"0.5344496",
"0.5338183",
"0.5319866",
"0.5295569",
"0.5289274",
"0.5206606",
"0.5166721",
"0.51118433",
"0.5051935",
"0.5042953",
"0.5034518",
"0.5019281",
"0.5019281",
"0.4992732",
"0.49645045",
"0.49579546",
"0.495281",
"0.49518782",
"0.4946695",
"0.49088487",
"0.48932514",
"0.48912778",
"0.4889538"
] | 0.70196915 | 0 |
Key in SUPPRESS list | def _suppress(self, key):
return key in self.SUPPRESS | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def keys(self, data, installer_context):",
"def key(key):\n return key",
"def secondary_keys(self):",
"def _GetKeyString(self):",
"def _GetKeyString(self):",
"def extract_key_usage(self, ext):\n res = []\n fields = KU_FIELDS[:]\n\n # \"error-on-access\", real funny\n if not ext.key_agreement:\n fields.remove('encipher_only')\n fields.remove('decipher_only')\n\n for k in fields:\n val = getattr(ext, k, False)\n if val:\n res.append(k)\n return res",
"def press(self, key):\n self.view.filter_short_keys([key], [])\n return self",
"def handle_key(self, key):\n pass",
"def __getitem__(self, key):\n\n return self.additional[key]",
"def __getitem__(self, key):\n\n return self.additional[key]",
"def key():",
"def _extra_keys(self):\r\n return []",
"def keys():",
"def cli(ctx):\n return ctx.gi.cannedkeys.get_keys()",
"def keys(self):\n return ['title', 'keywords', 'description', 'url', 'content_file',\n 'language', 'phone', 'email']",
"def __strengthen_key(self, key):\n if not self.contains_lowercase(key):\n index = random.randint(0, len(key))\n key = key[:index] + random.choice(self.LOWERCASE_LETTERS) + key[index:]\n if not self.contains_uppercase(key):\n index = random.randint(0, len(key))\n key = key[:index] + random.choice(self.UPPERCASE_LETTERS) + key[index:]\n if not self.contains_numbers(key):\n index = random.randint(0, len(key))\n key = key[:index] + random.choice(self.NUMBERS) + key[index:]\n if not self.contains_symbols(key):\n index = random.randint(0, len(key))\n key = key[:index] + random.choice(self.SYMBOLS) + key[index:]\n return key",
"def key(self):\n\n return self.qualifiers.get(\"key\", False)",
"def is_perCapita(key):",
"def ask_keys(self, update, context):\r\n update.message.reply_text('Введите новый ключ')\r\n return self.LISTEN",
"def keysAll():",
"def __getitem__(self, key):\n # TODO - iteratable and other dict-like functions\n if key not in self.__products__:\n raise SatProcessError(\"%s product not available in %s\" % (key, self.classname()))\n return self.__products__[key]",
"def membership(self, key):\n pass",
"def key_usages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"key_usages\")",
"def key_usages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"key_usages\")",
"def lookup(self, key):",
"def handle_key(self, k):\n\t\treturn False",
"def check_keys(self):",
"def accesskey(request):\n return request.config.getoption(\"--accesskey\")",
"def recipient_public_key(self):",
"def Keys(self) -> _n_1_t_4:"
] | [
"0.59111524",
"0.56453145",
"0.55836684",
"0.547909",
"0.547909",
"0.544734",
"0.5446958",
"0.542185",
"0.5421237",
"0.5421237",
"0.5408113",
"0.54074985",
"0.5386925",
"0.5340487",
"0.5334346",
"0.5295114",
"0.52809834",
"0.52733964",
"0.52320004",
"0.5206396",
"0.5179361",
"0.5170929",
"0.51697755",
"0.51697755",
"0.5168828",
"0.51669866",
"0.51593703",
"0.5157918",
"0.5152766",
"0.5136833"
] | 0.57660186 | 1 |
Translate key by translate dict | def _translate(self, key):
return self.TRANSLATE.get(key, key) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def translate(key, dictionary):\n if key in dictionary:\n return dictionary[key]\n if key in dictionary.values():\n return key\n raise Exception(\"no entry {} in dictionary {}\".format(key, dictionary))",
"def _make_trans_from_dict(translations):\n\n from_str = ''\n to_str = ''\n for key in translations:\n from_str += key\n to_str += translations[key]\n return str.maketrans(from_str, to_str)",
"def translator(dict):\n f = lambda match: dict.get(match.group(), match.group())\n return lambda expression: _word_pattern.sub(f,expression)",
"def translate(word1, key, word2):\n key = dict(zip(word1, key))\n return ''.join(key[sym] for sym in word2)",
"def translate(word, translateDict):\n\n translation = \"\"\n for char in word:\n translation += translateDict.get(char,\" \")\n\n return translation",
"def translate():\n pass",
"def f_translate_key(self, key):\n if isinstance(key, int):\n if key == 0:\n key = self.v_name\n else:\n key = self.v_name + \"_%d\" % key\n return key",
"def translate(phrase_dict: dict, file_text: list, dest=language):\n translator = Translator()\n translated_phrase_list = translator.translate(list(phrase_dict.values()), dest=dest) \n print(Fore.GREEN + \"Translated successfully!!\" + Fore.RESET)\n #phrase_dict = {k: phrase.text for phrase in translated_phrase_list for k in phrase_dict}\n key_list = list(phrase_dict.keys())\n for i in range(len(key_list)): \n phrase_dict[key_list[i]] = translated_phrase_list[i].text\n for index_line, translated_phrase in phrase_dict.items():\n print(Fore.CYAN + f'Writen as: {translated_phrase}')\n file_text[index_line+1] = f'msgstr \"{translated_phrase}\"'\n return file_text",
"def translate(self):\n pass",
"def get_translation(self):\n trans_keys = ''.join(self._trans_dict.keys())\n trans_values = ''.join(self._trans_dict.values())\n\n trans_table = string.maketrans(trans_keys, trans_values)\n translation = self._puzzle.translate(trans_table)\n return translation",
"def translate(self, phrase):\n\n if phrase not in TRANSLATIONS or self.ui_lang_code not in TRANSLATIONS[phrase]:\n return phrase\n return TRANSLATIONS[phrase][self.ui_lang_code]",
"def translate(translate_from, translate_to, string_to_translate=\"\"):\n dictionary = DICTIONARIES.get(\"%s_%s\" % (translate_from, translate_to))\n if not dictionary:\n print(\"Offline: No such translation direction in dictionary: %s-%s\" % (translate_from, translate_to))\n else:\n words = [dictionary.get(w, w) for w in string_to_translate.split(' ')]\n print(\"Offline: %s\" % (' '.join(words)))",
"def translate_to(common_form, target):\r\n # retrieve the correct translation dictionary\r\n target_dict = get_dict(target)\r\n # recreate the form with the translated keys\r\n target_form = {target_dict[key]: common_form[key]\r\n for key in target_dict.keys()}\r\n return target_form",
"def test_translate_unique_langs(self):\n\n trans_msgs_dict = MessageController.translate_unique_langs({'2': 'es', '4': 'fr'}, \n 'hi', 'en', False, False)\n\n self.assertEqual(trans_msgs_dict, {'es': u'{hola}', 'fr': u'salut'})",
"def __translate(obj, names):\n\t\t\tif isinstance(obj, int):\n\t\t\t\treturn names[obj]\n\t\t\telif isinstance(obj, list):\n\t\t\t\treturn [__translate(x, names) for x in obj]\n\t\t\telif isinstance(obj, dict):\n\t\t\t\tnew_obj = {}\n\t\t\t\tfor k,v in obj.items():\n\t\t\t\t\tnew_obj[__translate(k, names)] = __translate(v, names)\n\t\t\t\treturn new_obj\n\t\t\telse:\n\t\t\t\treturn obj",
"def TranslateKeyValue(key, value):\n key = SETTINGS.get(key, key)\n if key not in SETTINGS_INVERSE:\n raise Exception(\"Didn't understand key %s\" % key)\n\n value = str(value)\n valueMap = VALUES.get(key, {})\n if valueMap:\n value = valueMap.get(value, value)\n if not value.isdigit() or int(value) < 0 or int(value) >= len(valueMap):\n raise Exception(\"Didn't understand value %s for key %s\" % (value, key))\n\n else:\n parts = (value[1:] if value.startswith('-') else value).split('.')\n error = None\n if len(parts) is 0:\n error = 'Empty'\n elif len(parts) > 2:\n error = 'Too many . in'\n elif not parts[0].isdigit():\n error = 'Non-digit in'\n elif len(parts) is 2 and not parts[1].isdigit():\n error = 'Non-digit in'\n\n if error:\n raise 'Exception: %s number %s for key %s' % (value, key)\n\n return key, value",
"def TranslateDict(d):\n\n return dict(TranslateKeyValue(k, v) for k, v in d.iteritems())",
"def trans_format(trans_key, **kwargs):\n translated: str = _(trans_key)\n return translated.format(**kwargs)",
"def interpolate_insted_of_translate(\n self, msgid, mapping=None, *args, **kw): # pragma: no cover webdriver\n return zope.i18n.interpolate(msgid, mapping)",
"def transkey(self, keycode, keyname):\n self.logger.debug(\"keycode=%d keyname='%s'\" % (\n keycode, keyname))\n\n try:\n return self._keytbl[keyname.lower()]\n\n except KeyError:\n return keyname",
"def transform_key(self, key):\n return key.lower()",
"def question_new_translate():",
"def map_caesar(key, plaintext):\n letters = string.ascii_lowercase\n mask = letters[key:] + letters[:key]\n transtab = str.maketrans(letters, mask)\n return plaintext.translate(transtab)",
"def transbrl (arg):\r\n return n.translate(p.translate(arg))",
"def translation(self, d):\n newreg = self.copy()\n _translate(newreg, d)\n return newreg",
"def translate(text, translation):\n new = [] \n for i in xrange(0, len(text)):\n char = text[i]\n try:\n new.append(translation[char])\n except KeyError:\n new.append('_')\n return ''.join(new)",
"def retranslate(self):\r\n pass",
"def retranslate(self):\r\n pass",
"def substitution(plainText, key):\n return plainText",
"def _remap_key(key):\n if key in KNOWN_PARAMS:\n return key\n if key.lower() in known_params:\n return KNOWN_PARAMS[known_params.index(key.lower())]\n return key"
] | [
"0.7452892",
"0.7021639",
"0.6875848",
"0.6788803",
"0.6653619",
"0.65519345",
"0.6360589",
"0.6328695",
"0.62527615",
"0.6251204",
"0.6133195",
"0.6096026",
"0.6090223",
"0.60032105",
"0.5933524",
"0.5926303",
"0.59189796",
"0.59064764",
"0.59049064",
"0.5901391",
"0.5849293",
"0.58423674",
"0.5841128",
"0.58349276",
"0.5831565",
"0.5803098",
"0.57700914",
"0.57700914",
"0.5768681",
"0.57587385"
] | 0.74695766 | 0 |
Check support 'rsm_ctx' type by handler. Instance should be NODE_TYPE_QUOTA and SYSTEM_NAME_OPENSTACK in 'system_name'. | def can_handle(self, rsm_ctx):
return super(OpenstackQuotaHandler, self).can_handle(rsm_ctx) and \
SYSTEM_NAME_OPENSTACK in rsm_ctx.instance.system_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_QUOTA",
"def can_handle(self, rsm_ctx):\n return not rsm_ctx.instance.type",
"def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Node instance has type with is not supported by '\n 'Resource Management Plugin. Skipping'\n )",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_RESULT",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_PROJECT",
"def can_handle(self, rsm_ctx):\n return False",
"def check_selinux_status(self):\n\n raise NotImplementedError()",
"def check_kernel(cls):\n pass",
"def context(self) -> ContextType:\n return ContextType.SYSTEM",
"def is_system(self) -> bool:",
"def isSystem(self):\n return _libsbml.XMLError_isSystem(self)",
"def system_pmu_type(pmu_name):\n if pmu_exists(pmu_name):\n return SysPMU(pmu_name).type\n else:\n return None",
"def handle(self, rsm_ctx):\n self._process_runtime_properties(\n rsm_ctx,\n rsm_ctx.instance.runtime_properties,\n self.VALUE_TYPE_QUOTA\n )",
"def min_system_resources(node):\n\n min_sys_res = True\n\n # CPUs\n if \"layout\" in node[\"cpu\"]:\n total_cpus = len(node[\"cpu\"][\"layout\"])\n if total_cpus < 2:\n print(\n \"\\nThere is only {} CPU(s) available on this system. \"\n \"This is not enough to run VPP.\".format(total_cpus)\n )\n min_sys_res = False\n\n # System Memory\n if (\n \"free\" in node[\"hugepages\"]\n and \"memfree\" in node[\"hugepages\"]\n and \"size\" in node[\"hugepages\"]\n ):\n free = node[\"hugepages\"][\"free\"]\n memfree = float(node[\"hugepages\"][\"memfree\"].split(\" \")[0])\n hugesize = float(node[\"hugepages\"][\"size\"].split(\" \")[0])\n\n memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize\n percentmemhugepages = (memhugepages / memfree) * 100\n if free is \"0\" and percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:\n print(\n \"\\nThe System has only {} of free memory. You will not \"\n \"be able to allocate enough Huge Pages for VPP.\".format(\n int(memfree)\n )\n )\n min_sys_res = False\n\n return min_sys_res",
"def handle(self, rsm_ctx):\n pass",
"def _check_family(self):\n return",
"def is_system(self) -> undefined.UndefinedOr[bool]:",
"def get_os_type(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetOsType', self.handle)",
"def is_node_support(self, node):\n return self.node_attribute(key=node, name=\"type\") == \"support\"",
"def test_type(self):\n assert is_consistent_type(Context, \"Context\", TLSv1_METHOD)",
"def check_available():\n\n rm = current_app.config['rm_object']\n\n return rm.check_availability()",
"def check_tree_type(tree):\n return tree.type in ref",
"def check_global_request(self, kind, msg):\n return False",
"def check_supported_features(self):",
"def _check_groups_kvm():\n if not _user_belongs_to('libvirtd') and not _user_belongs_to('kvm'):\n _raise_group_error('kvm')",
"def test_os_node(self):\n self.assertEqual(self.settings.OS_NODE, platform.node())",
"def is_system(self):\n\t\treturn self.__is_system"
] | [
"0.6766187",
"0.6549585",
"0.6380219",
"0.62915754",
"0.62915754",
"0.62915754",
"0.6178638",
"0.58965355",
"0.5851137",
"0.5643693",
"0.51984125",
"0.51205176",
"0.5115497",
"0.50924265",
"0.50415015",
"0.49992657",
"0.4892618",
"0.4861141",
"0.4819281",
"0.47552007",
"0.47480687",
"0.47225004",
"0.47159004",
"0.46508598",
"0.4646227",
"0.4639464",
"0.4619887",
"0.4586204",
"0.45511043",
"0.45461357"
] | 0.70649564 | 0 |
Check support 'rsm_ctx' type by handler. Instance should be NODE_TYPE_RESULT. | def can_handle(self, rsm_ctx):
return rsm_ctx.instance.type == NODE_TYPE_RESULT | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def can_handle(self, rsm_ctx):\n return not rsm_ctx.instance.type",
"def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Node instance has type with is not supported by '\n 'Resource Management Plugin. Skipping'\n )",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_USAGE",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_PROJECT",
"def can_handle(self, rsm_ctx):\n return False",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_QUOTA",
"def handle(self, rsm_ctx):\n pass",
"def do_check(self):\n res = self.entity.do_check(self.context)\n if res:\n return self.RES_OK, 'Node check succeeded.'\n else:\n return self.RES_ERROR, 'Node check failed.'",
"def check_r_type(r):\n if type(r) is str:\n raise TypeError('Get Error message.')",
"def check(self):\n self.isNodes = False\n self.isFixable = False\n nodeType = self.get_parser.get('SETTINGS', 'settingsinfonode')\n self.setStatus('OK')\n if not len(pm.ls(type=nodeType)):\n self.setStatus('WARNING')\n self.setErrorMessage('No %s node found in the scene.' % nodeType)\n return False, ''\n elif len(pm.ls(type=nodeType)) > 1:\n self.setStatus('ERROR')\n self.setErrorMessage('More than 1 %s node found in the scene.' % nodeType)\n return False, ''\n return True, pm.ls(type=nodeType)[0]",
"def expects_result(self, command):\n return isinstance(command, (self.package(\"Syntax\").Operator,\n self.package(\"Syntax\").Formule))",
"def test_type(self):\n assert is_consistent_type(Context, \"Context\", TLSv1_METHOD)",
"def test_get_node_status(self):\n pass",
"def check_selinux_status(self):\n\n raise NotImplementedError()",
"def can_handle(self, rsm_ctx):\n return super(OpenstackQuotaHandler, self).can_handle(rsm_ctx) and \\\n SYSTEM_NAME_OPENSTACK in rsm_ctx.instance.system_name",
"def check_tree_type(tree):\n return tree.type in ref",
"def is_node_support(self, node):\n return self.node_attribute(key=node, name=\"type\") == \"support\"",
"def get_node_type(self, node):\n raise NotImplementedError()",
"def getType_(self, ctx):\n # type: (Optional[RelayParser.Type_Context]) -> Optional[ty.Type]\n\n if ctx is None:\n return None\n\n return self.visit(ctx)",
"def CheckType(self, *args, **kwargs):\n pass",
"async def checktype(self, ctx:commands.Context):\r\n\r\n t = await self.GetChannelType(ctx.guild, ctx.channel.id)\r\n if t == 'none':\r\n await ctx.send(\r\n f'<#{ctx.channel.id}> is a normal channel (use `register <channel type>` to make this a specialized channel)')\r\n else:\r\n await ctx.send(f'<#{ctx.channel.id}> is a {t}')",
"def _cim_result_type():\n return {\n 'name' : 'cim_result_type',\n 'is_open' : False,\n 'doc' : None,\n 'members' : [\n ('plot', None),\n ('document', None),\n ('logfile', None),\n ],\n }",
"def check(self):\n self.isNodes = True\n self.isFixable = True\n errorNodes = list()\n for each in pm.ls(type='unknown'):\n errorNodes.append(each)\n self.status = 'OK'\n if len(errorNodes):\n self.setErrorNodes(errorNodes)\n self.setStatus('ERROR')",
"def handle(self, rsm_ctx):\n runtime_properties = rsm_ctx.get_execution_result()\n\n rsm_ctx.log(\n 'info',\n 'Got {} runtime_properties after execution',\n runtime_properties.keys()\n )\n\n self._process_runtime_properties(\n rsm_ctx,\n runtime_properties,\n self.VALUE_TYPE_USAGE\n )",
"def check_status_and_state(self, results, operation=''):\n\n omci_msg = results.fields['omci_message'].fields\n status = omci_msg['success_code']\n error_mask = omci_msg.get('parameter_error_attributes_mask', 'n/a')\n failed_mask = omci_msg.get('failed_attributes_mask', 'n/a')\n unsupported_mask = omci_msg.get('unsupported_attributes_mask', 'n/a')\n\n self.log.debug(\"OMCI Result: %s\", operation, omci_msg=omci_msg,\n status=status, error_mask=error_mask,\n failed_mask=failed_mask, unsupported_mask=unsupported_mask)\n\n if status == RC.Success:\n self.strobe_watchdog()\n return True\n\n elif status == RC.InstanceExists:\n return False",
"def checkStructure(self, result, resultType):\n res = True\n if resultType:\n try:\n structure = json.loads(resultType)\n result_structure = self.getStructure(result)\n res = structure[\"type\"] == result_structure[\"type\"] and all(elem in list(result_structure[\"columns\"])\n for elem in list(structure[\"columns\"])) and all(elem in list(result_structure[\"indexes\"])\n for elem in list(structure[\"indexes\"]))\n except Exception as ex:\n print(f\"Error checking structure: {ex}\")\n\n return res",
"def test_type_result(self):\n result = self.parser.msg_analysis(MSG_TEST_NO_RESULT[0])\n assert isinstance(result, list)",
"def get_type_check(self, arg, option):\n pass"
] | [
"0.6539492",
"0.6527251",
"0.6194596",
"0.6194596",
"0.6194596",
"0.58348286",
"0.57641536",
"0.57499075",
"0.52276134",
"0.5095498",
"0.5037327",
"0.4948946",
"0.49151015",
"0.48483157",
"0.4833058",
"0.47804075",
"0.47727802",
"0.47572267",
"0.47519362",
"0.46803394",
"0.4646102",
"0.46276158",
"0.45647562",
"0.45590168",
"0.455689",
"0.45486692",
"0.45476967",
"0.45420635",
"0.45132682",
"0.450023"
] | 0.77807736 | 0 |
Logic which should be executed for given 'rsm_ctx'. Dump state to runtime properties. | def handle(self, rsm_ctx):
rsm_ctx.log(
'info',
'Dumping gathered data to runtime_properties of {} node instance',
rsm_ctx.instance.id
)
rsm_ctx.add_result_instance_id()
rsm_ctx.set_runtime_properties({
'data': rsm_ctx.dump()
}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def handle(self, rsm_ctx):\n pass",
"def handle(self, rsm_ctx):\n runtime_properties = rsm_ctx.get_execution_result()\n\n rsm_ctx.log(\n 'info',\n 'Got {} runtime_properties after execution',\n runtime_properties.keys()\n )\n\n self._process_runtime_properties(\n rsm_ctx,\n runtime_properties,\n self.VALUE_TYPE_USAGE\n )",
"def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Starting executing for \"list\" operation for get usage ...'\n )\n\n execution_id = rsm_ctx.run_execution(wait=False)\n rsm_ctx.log(\n 'info',\n 'Execution started with ID: {} ...'.format(execution_id)\n )",
"def handle(self, rsm_ctx):\n rsm_ctx.log('info', 'Executing \"list\" operation for get usage ...')\n\n runtime_properties = rsm_ctx.run_execution()\n rsm_ctx.log(\n 'info',\n 'Got {} runtime_properties after execution',\n runtime_properties.keys()\n )\n\n self._process_runtime_properties(\n rsm_ctx,\n runtime_properties,\n self.VALUE_TYPE_USAGE\n )",
"def run(self):\n if self.next_state == \"initialize_rexarm\":\n self.initialize_rexarm()\n\n if self.next_state == \"idle\":\n self.idle()\n\n if self.next_state == \"estop\":\n self.estop()\n\n if self.next_state == \"execute_tp\":\n self.execute_tp()\n\n if self.next_state == \"execute\":\n self.execute()\n\n if self.next_state == \"calibrate\":\n self.calibrate()\n\n if self.next_state == \"manual\":\n self.manual()\n\n if self.next_state == \"learn\":\n self.learn()\n\n if self.next_state == \"remember\":\n self.remember()\n\n if self.next_state == \"write\":\n self.write()\n\n if self.next_state == \"get_color\":\n self.get_color()\n\n if self.next_state == \"find_blocks\":\n self.find_blocks()\n\n # if self.next_state == \"dance\":\n # self.execute_dance()",
"def state_processing_do(cfg, app, win, events):",
"def run(self):\r\n self.env.process(self.rw_pifo_sm())",
"def state_processing_enter(cfg, app, win):",
"def run_state_machine(self):\n\n # Initial check for startup state\n if self.status == 'STARTUP':\n self.update_action(self.status, None, None, None)\n # Get apriltag data from the detector\n\n return_tag_data = self.apriltag_detector.get_apriltag_data()\n\n # Check if it saw an apriltag\n if len(return_tag_data) != 0:\n\n # Iterate over the data from each of the apriltags seen\n for i in range(len(return_tag_data)):\n\n # Temp apriltag data\n temp_tag = return_tag_data[i]\n\n print(\"current state: \", self.status)\n\n print(\"Statemachine x VAL: \", temp_tag[1])\n\n print(\"Statemachine y VAL: \", temp_tag[2])\n\n # If the smallbot is currently in the CREEP_FORWARD state\n # handle cases for when the camera sees the side tags while it is driving forward\n if self.status == 'CREEP_FORWARD' and (temp_tag[0] == self.back_tag):\n self.current_tag = temp_tag[0]\n print(\"ATTEMPTING TO UPDATE STATUS 1\")\n self.update_action(self.status, temp_tag[1], temp_tag[2], temp_tag[3])\n # Ignore cases when the camera sees the side apriltags\n elif self.status == 'CREEP_FORWARD' and (temp_tag[0] == self.right_tag):\n print(\"ATTEMPTING TO UPDATE STATUS 2\")\n pass\n elif self.status == 'CREEP_FORWARD' and (temp_tag[0] == self.left_tag):\n print(\"ATTEMPTING TO UPDATE STATUS 2\")\n pass\n else:\n self.current_tag = temp_tag[0]\n print(\"ATTEMPTING TO UPDATE STATUS 3\")\n self.update_action(self.status, temp_tag[1], temp_tag[2], temp_tag[3])\n\n # If the camera did not see any apriltags\n else:\n self.current_tag = None\n self.update_action(None, None, None, None)",
"def can_handle(self, rsm_ctx):\n return rsm_ctx.instance.type == NODE_TYPE_RESULT",
"def execute(self) -> None:\n self.state()",
"def run_game_logic(self):\n pass",
"def handleCondorStatusService(self):\n procScript = \"cmssw_handle_condor_status_service.py\"\n cmd = \"%s --input_pkl %s --output_pkl %s --name %s\" % (\n procScript,\n os.path.join(self.stepSpace.location, self.configPickle),\n os.path.join(self.stepSpace.location, self.configPickle),\n self.step.data._internal_name)\n self.scramRun(cmd)\n\n return",
"def _eval_state(hass):\n state_str = ''.join(['1' if val else '0' for val in PERSIST['states']])\n state = int(state_str, 2)\n mode = PERSIST['mode']\n output = state in SCHEDULES[mode][0]\n _LOGGER.debug('Eval: %s %s = %s',\n PERSIST['mode'], str(PERSIST['states']), repr(output))\n\n if output != PERSIST['last_cmd']:\n PERSIST['last_cmd'] = output\n if output:\n _call_service(hass, SCHEDULES[mode][1], 'turn_on')\n else:\n _call_service(hass, SCHEDULES[mode][1], 'turn_off')",
"def __call__(self, context):\n msg = context.latest_msg()\n # deal with some common cases\n\n # 调侃\n if msg.intent == 'tune':\n return self.utter_default, self\n\n self.on_process_message(msg)\n\n self.on_enter_state(context)\n\n ac, st = self.run(context)\n\n if st == self:\n self.repeat_times += 1\n else:\n self.on_finish_state(context)\n\n if self.repeat_times > 2:\n ac, st = self.turn_to_manual_custom_service(context), StateFinish()\n\n return ac, st",
"def execute(self, context):\n logger.info(\"///////////////////////////////////////////////////////////////////////\")\n logger.info(\"[ChainedTransfig] Collectd-Jmx w/ Attribute-wise [%s] ...\", self._attribute)\n logger.info(\"///////////////////////////////////////////////////////////////////////\")\n\n super().execute(context)\n\n logger.info(\"///////////////////////////////////////////////////////////////////////\")\n logger.info(\"[ChainedTransfig] Collectd-Jmx w/ Attribute-wise [%s] ... COMPLETES\",\n self._attribute)\n logger.info(\"///////////////////////////////////////////////////////////////////////\")",
"def dispatch(self, vm):\n return_value = vm.frame[self.B]\n vm.restore_dump(return_value)",
"def handle(self, rsm_ctx):\n rsm_ctx.log(\n 'info',\n 'Node instance has type with is not supported by '\n 'Resource Management Plugin. Skipping'\n )",
"def _run_scenario(self, cls, method_name, context, args, config):",
"def state_print_do(cfg, app, win, events):",
"def execute(self, context):\n app = context[CTX_KEY_COMMON_COLLECTD_JMX_APP_PREFIX]\n logger.info(\"///////////////////////////////////////////////////////////////////////\")\n logger.info(\"[ChainedTransfig] Collectd-Jmx w/ Application-wise [%s] ...\", app)\n logger.info(\"///////////////////////////////////////////////////////////////////////\")\n\n super().execute(context)\n\n logger.info(\"///////////////////////////////////////////////////////////////////////\")\n logger.info(\"[ChainedTransfig] Collectd-Jmx w/ Application-wise [%s] ... \"\n \"COMPLETES\", app)\n logger.info(\"///////////////////////////////////////////////////////////////////////\")",
"def evaluate(self, state):\n abstract",
"def state_capture_do(cfg, app, win, events):",
"def _get_state(self):",
"def run(self, state: State) -> State:",
"def get_config(ctx):\n global HISTORY_LOGS, EXPERIMENT_ID #Ugly hack, make it better at some point, may be ;)\n id = ctx.job.id\n EXPERIMENT_ID = hash(id)\n\n import montezuma_env\n\n ctx.job.register_action(\"Set starting point procssor:\",\n lambda str: set_motezuma_env_options(str, montezuma_env.STARTING_POINT_SELECTOR))\n ctx.job.register_action(\"Set rewards:\",\n lambda str: set_motezuma_env_options(str, montezuma_env.REWARDS_FILE))\n\n logger.auto_set_dir(suffix=id)\n\n # (self, parameters, number_of_actions, input_shape)\n\n M = EXPERIMENT_MODEL\n\n name_base = str(uuid.uuid1())[:6]\n PIPE_DIR = os.environ.get('TENSORPACK_PIPEDIR_{}'.format(id), '.').rstrip('/')\n namec2s = 'ipc://{}/sim-c2s-{}-{}'.format(PIPE_DIR, name_base, id)\n names2c = 'ipc://{}/sim-s2c-{}-{}'.format(PIPE_DIR, name_base, id)\n procs = [MySimulatorWorker(k, namec2s, names2c) for k in range(SIMULATOR_PROC)]\n ensure_proc_terminate(procs)\n start_proc_mask_signal(procs)\n\n master = MySimulatorMaster(namec2s, names2c, M)\n dataflow = BatchData(DataFromQueue(master.queue), BATCH_SIZE)\n\n # My stuff - PM\n neptuneLogger = NeptuneLogger.get_instance()\n lr = tf.Variable(0.001, trainable=False, name='learning_rate')\n tf.scalar_summary('learning_rate', lr)\n num_epochs = get_atribute(ctx, \"num_epochs\", 100)\n\n rewards_str = get_atribute(ctx, \"rewards\", \"5 1 -200\")\n with open(montezuma_env.REWARDS_FILE, \"w\") as file:\n file.write(rewards_str)\n\n\n if hasattr(ctx.params, \"learning_rate_schedule\"):\n schedule_str = str(ctx.params.learning_rate_schedule)\n else: #Default value inhereted from tensorpack\n schedule_str = \"[[80, 0.0003], [120, 0.0001]]\"\n logger.info(\"Setting learing rate schedule:{}\".format(schedule_str))\n learning_rate_scheduler = ScheduledHyperParamSetter('learning_rate', json.loads(schedule_str))\n\n if hasattr(ctx.params, \"entropy_beta_schedule\"):\n schedule_str = str(ctx.params.entropy_beta_schedule)\n else: #Default value inhereted from tensorpack\n schedule_str = \"[[80, 0.0003], [120, 0.0001]]\"\n logger.info(\"Setting entropy beta schedule:{}\".format(schedule_str))\n entropy_beta_scheduler = ScheduledHyperParamSetter('entropy_beta', json.loads(schedule_str))\n\n if hasattr(ctx.params, \"explore_factor_schedule\"):\n schedule_str = str(ctx.params.explore_factor_schedule)\n else: #Default value inhereted from tensorpack\n schedule_str = \"[[80, 2], [100, 3], [120, 4], [140, 5]]\"\n logger.info(\"Setting explore factor schedule:{}\".format(schedule_str))\n explore_factor_scheduler = ScheduledHyperParamSetter('explore_factor', json.loads(schedule_str))\n\n\n\n return TrainConfig(\n dataset=dataflow,\n optimizer=tf.train.AdamOptimizer(lr, epsilon=1e-3),\n callbacks=Callbacks([\n StatPrinter(), ModelSaver(),\n learning_rate_scheduler, entropy_beta_scheduler, explore_factor_scheduler,\n HumanHyperParamSetter('learning_rate'),\n HumanHyperParamSetter('entropy_beta'),\n HumanHyperParamSetter('explore_factor'),\n NeputneHyperParamSetter('learning_rate', ctx),\n NeputneHyperParamSetter('entropy_beta', ctx),\n NeputneHyperParamSetter('explore_factor', ctx),\n master,\n StartProcOrThread(master),\n PeriodicCallback(Evaluator(EVAL_EPISODE, ['state'], ['logits'], neptuneLogger, HISTORY_LOGS), 1),\n neptuneLogger,\n ]),\n session_config=get_default_sess_config(0.5),\n model=M,\n step_per_epoch=STEP_PER_EPOCH,\n max_epoch=num_epochs,\n )",
"def execute(self, context):\n logger.info(\"///////////////////////////////////////////////////////////////////////\")\n logger.info(\"[ChainedTransfig] FULL Collectd-Jmx Transfiguration ...\")\n logger.info(\"///////////////////////////////////////////////////////////////////////\")\n\n super().execute(context)\n\n logger.info(\"///////////////////////////////////////////////////////////////////////\")\n logger.info(\"[ChainedTransfig] FULL Collectd-Jmx Transfiguration ... COMPLETE\")\n logger.info(\"///////////////////////////////////////////////////////////////////////\")",
"def run(self, in_op):\n self.move_inner_state(in_op)\n if isinstance(in_op, memops.ReorderBase):\n self.substitute_reorder(in_op)\n elif isinstance(in_op, memops.FlushBase):\n self.flush_stores(in_op)\n elif isinstance(in_op, memops.Store):\n self._ops_list.append(in_op)\n elif isinstance(in_op, memops.Register_file):\n self.reg_file(in_op)\n\n return True",
"def _evaluate_workflow_final_context(self, cause_task_ex):\n raise NotImplementedError",
"def run(ctx):\n pass"
] | [
"0.6944394",
"0.60357314",
"0.55758333",
"0.54697585",
"0.52686495",
"0.52058613",
"0.5138877",
"0.5134144",
"0.5079232",
"0.505829",
"0.50382674",
"0.49983215",
"0.49911332",
"0.49770048",
"0.49382457",
"0.49272338",
"0.49066004",
"0.49020687",
"0.4894868",
"0.48867136",
"0.4877949",
"0.48751384",
"0.4823082",
"0.4810161",
"0.4810099",
"0.4805761",
"0.47999546",
"0.4782957",
"0.4781363",
"0.47804186"
] | 0.68619585 | 1 |
Determine ticket id either from existing subject line or from uid If the Subject line contains an ID, it is taken. If it doesn't, a new one is generated. | def determine_ticket_ID(self):
hashid = hashids.Hashids(salt=self.config.idSalt, alphabet=self.config.idAlphabet, min_length=self.config.idMinLength)
# See if hashid is set in headers
if self.parsed["X-Jicket-HashID"] is not None:
self.tickethash = self.parsed["X-Jicket-HashID"]
self.ticketid = hashid.decode(self.parsed["X-Jicket-HashID"])
else:
idregex = "\\[#%s([%s]{%i,}?)\\]" % (re.escape(self.config.idPrefix), re.escape(self.config.idAlphabet), self.config.idMinLength)
match = re.search(idregex, self.subject)
if match:
self.tickethash = match.group(1)
self.ticketid = hashid.decode(self.tickethash)
else:
self.tickethash = hashid.encode(self.uid)
self.ticketid = self.uid
self.prefixedhash = self.config.idPrefix + self.tickethash | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_subject_id(self, rec:str) -> int:\n raise NotImplementedError",
"def get_ticket_id():\n return str(time.time()) + str(uuid.uuid4())",
"def _get_ticket_id(self, str):\n pat = r'^\\s*#(\\d+)'\n try:\n return int(re.search(pat, str).group(1))\n except:\n return 0",
"def ticket_id(self):\n return self._ticket_id",
"def create_ticket(self, ticket):\r\n ticket_url = self._zendesk_instance.create_ticket(data=ticket)\r\n return zendesk.get_id_from_url(ticket_url)",
"def check_for_duplicate_subject_identifier(self):\n pass",
"def _get_user_provided_subject_identifier(self):\n if self.get_user_provided_subject_identifier_attrname() in dir(self):\n return getattr(self, self.get_user_provided_subject_identifier_attrname())\n else:\n return None",
"def get_thread_id(self):\n\t\tl = re.findall('(?<=\\[)[\\w/-]+', self.subject)\n\t\treturn l and l[0] or None",
"def getID():",
"def get_ref_id_or_record(self, uid):\n if uid not in self._cache:\n find_data = self.find_one({ user_id_ref_data.UID: uid })\n if find_data is None:\n new_data = user_id_ref_data.init_by_field(uid)\n self._cache[uid] = self.insert_one(new_data).inserted_seq_id\n else:\n find_data = user_id_ref_data(find_data)\n self._cache[uid] = find_data.seq_id\n \n return self._cache[uid]",
"def make_uid(self) -> str:\n while True:\n uid = ''.join(secrets.choice(self.CHARS) for i in range(self.uid_length))\n\n if self.user_repo.get_by_uid(uid) is None:\n return uid",
"def create_id(uid, begintime, endtime):\n allowed_chars = string.ascii_lowercase[:22] + string.digits\n temp = re.sub('[^{}]'.format(allowed_chars), '', uid.lower())\n return re.sub('[^{}]'.format(allowed_chars), '', uid.lower()) + str(arrow.get(begintime).timestamp) + str(arrow.get(endtime).timestamp)",
"def _check_if_duplicate_subject_identifier(self, using):\n if not self.pk and self.subject_identifier:\n if self.__class__.objects.using(using).filter(subject_identifier=self.subject_identifier):\n raise IdentifierError('Attempt to insert duplicate value for '\n 'subject_identifier {0} when saving {1} '\n 'on add.'.format(self.subject_identifier, self))\n else:\n if self.__class__.objects.using(using).filter(\n subject_identifier=self.subject_identifier).exclude(pk=self.pk):\n raise IdentifierError('Attempt to insert duplicate value for '\n 'subject_identifier {0} when saving {1} '\n 'on change.'.format(self.subject_identifier, self))\n self.check_for_duplicate_subject_identifier()",
"def uid(self):\n return self._serial_number",
"def find_issue_id(self):",
"def get_user_provided_subject_identifier_attrname(self):\n return None",
"def get_id(self, term):\n term = term.lower() if self.lower else term\n try:\n return self.term2id[term]\n except KeyError:\n return self.term2id[self.unk_term]",
"def get_id(self):\n return self.uid",
"def __create_ticket(user, subject, description, topic):\n\n target = settings.SLACK_TARGET_TFED\n if topic == 'Database':\n target = settings.SLACK_TARGET_TFED_DB\n user_email = user['user']['profile'].get('email', '[email protected]')\n display_name = user['user']['profile']['real_name']\n resp = rt_api.create_ticket(topic, user_email, subject, description + \"\\n\\n- \" + display_name)\n ticket_id = resp.get('id', None)\n if ticket_id:\n ticket_info = {\n \"url\": 'https://lnl-rt.wpi.edu/rt/Ticket/Display.html?id=' + ticket_id,\n \"id\": ticket_id,\n \"subject\": subject,\n \"description\": description,\n \"status\": \"New\",\n \"assignee\": None,\n \"reporter\": user['user']['name']\n }\n ticket = views.tfed_ticket(ticket_info)\n slack_post(target, text=description, content=ticket, username='Request Tracker')\n return\n error_message = \"Whoops! It appears something went wrong while attempting to submit your request. \" \\\n \"Please wait a few minutes then try again. If the problem persists, please email \" \\\n \"us directly at [email protected].\"\n post_ephemeral(target, error_message, user['user']['id'], username=\"Request Tracker\")",
"def uid(self):\n if self.part:\n return \"%s_%s\" % (self.c_id, self.part)\n else:\n return self.c_id",
"def subject_uuid(self, subject_uuid):\r\n\r\n self._subject_uuid = subject_uuid",
"def get_userid(self, claims_set):\n userid_claim = self.userid_claim\n if userid_claim in claims_set:\n userid = claims_set[userid_claim]\n else:\n return None\n return userid",
"def _evaluate_user_id(self, dispatcher, tracker):\n person = dispatcher.output_channel.get_person_by_id(dispatcher.sender_id)\n user = tracker.get_slot('user')\n if user is None:\n # Todo Replace self assignment\n user = person.aclattr\n\n return user",
"def ticket_id(self, ticket_id):\n self._ticket_id = ticket_id",
"def find_by_id(self, subject_id: str) -> any:\n pass",
"def __genNewAttachId(self): \n while 1:\n tmp = ''.join(random.choice(ID_LETTERS) for _ in range(AID_SIZE))\n if tmp in self._attachments: continue\n else: return tmp",
"def set_id(self, uid):\n self.nccl_id = uid\n return self.nccl_id",
"def __getIDFromCID(self, cid):\n if cid == \"daemon\": return self._did\n \n if cid in self._attachments or cid == self._did:\n return cid\n \n for k,v in self._attachments.items():\n if cid == v.cmd: return k\n \n return None",
"def insert(self, when=0, db=None):\r\n assert not self.exists, 'Cannot insert an existing ticket'\r\n if not db:\r\n db = self.env.get_db_cnx()\r\n handle_ta = True\r\n else:\r\n handle_ta = False\r\n\r\n # Add a timestamp\r\n if not when:\r\n when = int(time.time())\r\n self.time_created = self.time_changed = when\r\n\r\n cursor = db.cursor()\r\n\r\n # The owner field defaults to the component owner\r\n if self.values.get('component') and not self.values.get('owner'):\r\n try:\r\n component = Component(self.env, self['component'], db=db)\r\n if component.owner:\r\n self['owner'] = component.owner\r\n except TracError, e:\r\n # Assume that no such component exists\r\n pass\r\n\r\n # Insert ticket record\r\n std_fields = [f['name'] for f in self.fields if not f.get('custom')\r\n and self.values.has_key(f['name'])]\r\n cursor.execute(\"INSERT INTO ticket (%s,time,changetime) VALUES (%s)\"\r\n % (','.join(std_fields),\r\n ','.join(['%s'] * (len(std_fields) + 2))),\r\n [self[name] for name in std_fields] +\r\n [self.time_created, self.time_changed])\r\n tkt_id = db.get_last_id(cursor, 'ticket')\r\n\r\n # Insert custom fields\r\n custom_fields = [f['name'] for f in self.fields if f.get('custom')\r\n and self.values.has_key(f['name'])]\r\n if custom_fields:\r\n cursor.executemany(\"INSERT INTO ticket_custom (ticket,name,value) \"\r\n \"VALUES (%s,%s,%s)\", [(tkt_id, name, self[name])\r\n for name in custom_fields])\r\n\r\n if handle_ta:\r\n db.commit()\r\n self.id = tkt_id\r\n self._old = {}\r\n return self.id",
"def get_ticket(self, ticket_id):\r\n mask = ('mask[id, title, assignedUser[firstName, lastName],'\r\n 'createDate,lastEditDate,updates[entry],updateCount]')\r\n return self.ticket.getObject(id=ticket_id, mask=mask)"
] | [
"0.6021244",
"0.5966582",
"0.5956366",
"0.5919215",
"0.58400536",
"0.5819156",
"0.5633356",
"0.55684143",
"0.5405211",
"0.5354974",
"0.5307806",
"0.5300704",
"0.5289483",
"0.52597475",
"0.5240629",
"0.5164551",
"0.51528645",
"0.5145069",
"0.51301694",
"0.5126908",
"0.51150215",
"0.5098609",
"0.50922894",
"0.50777787",
"0.507603",
"0.50429994",
"0.5039286",
"0.503159",
"0.49981114",
"0.49962652"
] | 0.6965168 | 0 |
Convert text bodies to text that can be attached to an issue | def textfrombodies(self) -> str:
type_priority = ["plain", "html", "other"] # TODO: Make configurable
for texttype in type_priority:
if texttype == "plain" and texttype in self.textbodies:
"""Text is plain, so it can be used verbatim"""
return self.textbodies[texttype]
if texttype == "html" and texttype in self.textbodies:
"""HTML text. Convert to markup with html2text and remove extra spaces"""
text = html2text.html2text(self.textbodies[texttype])
# Remove every second newline which is added to distinguish between paragraphs in Markdown, but makes
# the jira ticket hard to read.
return re.sub("(\n.*?)\n", "\g<1>", text)
if texttype == "other" and len(self.textbodies):
# If no other text is found, return the first available body if any.
return self.textbodies[list(self.textbodies.keys())[0]]
return "The email contained no text bodies." | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def preprocess(self, text):\r\n return text",
"def generate_body(issue):\n markdown = \"### {}\\n\".format(issue.pop('title'))\n for k, v in issue.iteritems():\n markdown += \"- {}: {}\\n\".format(k, v)\n return markdown",
"def convert_to_markdown(self, text: str) -> str:",
"def get_text(downgrade_titles=False):",
"def __mantis_issue_to_strings(cls, issue, foreign_project=None):\n\n title_prefix = None\n title = None\n message = None\n\n for text in (issue.summary, issue.description):\n if text is not None and isinstance(text, bytes):\n text = text.decode(\"utf-8\", \"ignore\")\n if text is not None and text != \"\":\n if title is None:\n title_prefix = \"[%s on %s] \" % \\\n (issue.reporter, issue.date_submitted)\n title = text\n else:\n message = \"\\n\" + text\n\n if title is None:\n print(\"WARNING: No summary/description for issue #%d\" %\n (issue.id, ), file=sys.stderr)\n title = \"Mantis issue %d\" % issue.id\n\n if foreign_project is not None:\n title = \"%s: %s\" % (foreign_project, title)\n\n if title_prefix is not None:\n title = \"%s: %s\" % (title_prefix, title)\n\n for fld, text in ((\"Steps to Reproduce\", issue.steps_to_reproduce),\n (\"Additional Information\",\n issue.additional_information)):\n if text is not None and text != \"\":\n if message is None:\n message = \"%s: %s\" % (fld, text)\n else:\n message += \"\\n\\n%s: %s\" % (fld, text)\n\n return title, message",
"def render_text(self):\n if self.text_type == 1:\n return markdown.markdown(self.text)\n else:\n return self.text",
"def _convert_to_fancypants(self, markdown_text: str) -> dict: # noqa: ANN001\n text_data = {\"output_mode\": \"rtjson\", \"markdown_text\": markdown_text}\n return self._reddit.post(API_PATH[\"convert_rte_body\"], data=text_data)[\"output\"]",
"def render(txt):\n\n # Removing links to other channels\n txt = re.sub(r'<#[^\\|]*\\|(.*)>', r'#\\g<1>', txt)\n\n # Removing links to other users\n txt = re.sub(r'<(@.*)>', r'\\g<1>', txt)\n\n # handle named hyperlinks\n txt = re.sub(r'<([^\\|]*)\\|([^\\|]*)>', r'<a href=\"\\g<1>\" target=\"blank\">\\g<2></a>', txt)\n\n # handle unnamed hyperlinks\n txt = re.sub(r'<([^a|/a].*)>', r'<a href=\"\\g<1>\" target=\"blank\">\\g<1></a>', txt)\n\n # handle ordered and unordered lists\n for delimeter in LIST_DELIMITERS:\n slack_tag = delimeter\n class_name = LIST_DELIMITERS[delimeter]\n\n # Wrap any lines that start with the slack_tag in <li></li>\n list_regex = u'(?:^|\\n){}\\s?(.*)'.format(slack_tag)\n list_repl = r'<li class=\"list-item-{}\">\\g<1></li>'.format(class_name)\n txt = re.sub(list_regex, list_repl, txt)\n\n # hanlde blockquotes\n txt = re.sub(u'(^|\\n)(?:>){3}\\s?(.*)$', r'\\g<1><blockquote>\\g<2></blockquote>', txt, flags=re.DOTALL)\n txt = re.sub(u'(?:^|\\n)>\\s?(.*)\\n?', r'<blockquote>\\g<1></blockquote>', txt)\n\n # handle code blocks\n txt = re.sub(r'```\\n?(.*)```', r'<pre>\\g<1></pre>', txt, flags=re.DOTALL)\n txt = re.sub(r'\\n(</pre>)', r'\\g<1>', txt)\n\n # handle bolding, italics, and strikethrough\n for wrapper in FORMATTERS:\n slack_tag = wrapper\n html_tag = FORMATTERS[wrapper]\n\n # Grab all text in formatted characters on the same line unless escaped\n regex = r'(?<!\\\\)\\{t}([^\\{t}|\\n]*)\\{t}'.format(t=slack_tag)\n repl = r'<{t}>\\g<1></{t}>'.format(t=html_tag)\n txt = re.sub(regex, repl, txt)\n\n # convert line breaks\n txt = txt.replace('\\n', '<br />')\n\n # clean up bad HTML\n parser = CustomSlackdownHTMLParser(txt)\n txt = parser.clean()\n\n # convert multiple spaces\n txt = txt.replace(r' ', '  ')\n\n return txt",
"def preprocess_note(text):\n # replace redacted info with tokens\n text = replace_redacted(text)\n \n # misc scrubbing\n text = replace_misc(text) \n return text",
"def normalize(self, text: str) -> str:",
"def preprocess(self, text):\n if self.model_name == \"bert-base-arabert\":\n return self._old_preprocess(\n text,\n do_farasa_tokenization=True,\n )\n\n if self.model_name == \"bert-base-arabertv01\":\n return self._old_preprocess(text, do_farasa_tokenization=False)\n\n text = str(text)\n text = html.unescape(text)\n if self.strip_tashkeel:\n text = araby.strip_tashkeel(text)\n if self.strip_tatweel:\n text = araby.strip_tatweel(text)\n\n if self.replace_urls_emails_mentions:\n # replace all possible URLs\n for reg in url_regexes:\n text = re.sub(reg, \" [رابط] \", text)\n # REplace Emails with [بريد]\n for reg in email_regexes:\n text = re.sub(reg, \" [بريد] \", text)\n # replace mentions with [مستخدم]\n text = re.sub(user_mention_regex, \" [مستخدم] \", text)\n\n if self.remove_html_markup:\n # remove html line breaks\n text = re.sub(\"<br />\", \" \", text)\n # remove html markup\n text = re.sub(\"</?[^>]+>\", \" \", text)\n\n # remove repeated characters >2\n if self.remove_elongation:\n text = self._remove_elongation(text)\n\n # insert whitespace before and after all non Arabic digits or English Digits and Alphabet and the 2 brackets\n if self.insert_white_spaces:\n text = re.sub(\n \"([^0-9\\u0621-\\u063A\\u0641-\\u064A\\u0660-\\u0669a-zA-Z\\[\\]])\",\n r\" \\1 \",\n text,\n )\n\n # insert whitespace between words and numbers or numbers and words\n text = re.sub(\n \"(\\d+)([\\u0621-\\u063A\\u0641-\\u064A\\u0660-\\u066C]+)\", r\" \\1 \\2 \", text\n )\n text = re.sub(\n \"([\\u0621-\\u063A\\u0641-\\u064A\\u0660-\\u066C]+)(\\d+)\", r\" \\1 \\2 \", text\n )\n\n # remove unwanted characters\n if self.keep_emojis:\n emoji_regex = \"\".join(list(self.emoji.UNICODE_EMOJI[\"en\"].keys()))\n rejected_chars_regex2 = \"[^%s%s]\" % (chars_regex, emoji_regex)\n text = re.sub(rejected_chars_regex2, \" \", text)\n else:\n text = re.sub(rejected_chars_regex, \" \", text)\n\n # remove extra spaces\n text = \" \".join(text.replace(\"\\uFE0F\", \"\").split())\n\n if (\n self.model_name == \"bert-base-arabertv2\"\n or self.model_name == \"bert-large-arabertv2\"\n ):\n if self.keep_emojis:\n new_text = []\n for word in text.split():\n if word in list(self.emoji.UNICODE_EMOJI[\"en\"].keys()):\n new_text.append(word)\n else:\n new_text.append(self.farasa_segmenter.segment(word))\n text = \" \".join(new_text)\n else:\n text = self.farasa_segmenter.segment(text)\n return self._farasa_segment(text)\n\n # ALl the other models dont require Farasa Segmentation\n return text",
"def _formatBody(self, body_contents):\n body_contents = str(body_contents)\n\n # Replace github image tag  with plain url\n p = re.compile('!\\[.*?\\]\\((.*?)\\)')\n body_contents = p.sub('\\g<1>', body_contents)\n\n # Replace github image tag <img> with plain url\n p = re.compile('<img.*src=\"(.*?)\".*>')\n body_contents = p.sub('\\g<1>', body_contents)\n\n # Replace ``` with [code] tag\n p = re.compile('```(.*?)(```|$)', re.DOTALL)\n body_contents = p.sub('[code]\\g<1>[/code]', body_contents)\n\n return self._cutBody(body_contents)",
"def postprocess(self, text):\r\n return text",
"def parse(text):\n md = markdown.Markdown(['codehilite', 'tables', ])\n\n for iref in re.findall(img_ref_re, text):\n img_id = iref[7]\n try:\n image = FlatPageImage.objects.get(pk=int(img_id))\n md.references[img_id] = (image.image_path.url, '')\n except ObjectDoesNotExist:\n pass\n\n for lref in re.findall(reference_re, text):\n doc_name = lref[7]\n try:\n doc = File.objects.get(name=doc_name)\n md.references[doc_name]= (doc.url, doc.name)\n except ObjectDoesNotExist:\n pass\n\n return md.convert(text)",
"def post_process_text(self, text):\n\t\treturn text",
"def astext(self):\n self.elements.update({\n 'body': u''.join(self.body),\n 'indices': self.generate_indices()\n })\n return self.render('beamer.tex_t', self.elements)",
"def process_text(self, text, language):",
"def text():\n return {\n \"@context\": \"http://www.w3.org/ns/anno.jsonld\",\n \"type\": \"Annotation\",\n \"body\": {\n \"creator\": \"user\",\n \"type\": \"TextualBody\",\n \"value\": \"string\"\n },\n \"generator\": {\n \"homepage\": \"http://mnemosyne.ml\",\n \"id\": \"string\",\n \"name\": \"Mnemosyne\",\n \"type\": \"Mnemosyne\"\n },\n \"target\": {\n \"id\": \"string\",\n \"type\": \"TextQuoteSelector\",\n \"exact\": \"string\",\n \"format\": \"string\",\n \"source\": \"string\",\n \"prefix\": 0,\n \"suffix\": 0,\n \"refinedBy\": {\n \"type\": \"TextPositionSelector\",\n \"start\": \"/div[2]\",\n \"end\": \"/div[2]\"\n },\n },\n }",
"def process_md(text_md):\n\tprocessed_text_md = ( pre_proc.replace_br(text_md)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_false_titles)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_blank_lines)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.replace_cid)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.replace_with_dash)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_by_hyphen)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_lines)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_lines)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_et_al)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_beta)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_vs)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.fix_enye)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_ellipsis)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_subtraction)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_by_colon)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_duplicated_dashes)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.fix_marks)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.join_title_questions)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_useless_lines)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_duplicated_whitespaces)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t| p(pre_proc.remove_repeated_strings)\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t)\n\treturn processed_text_md",
"def _create_text(self):\n assert len(self.state) > 0\n tmp = \"\"\n for tag in self.state:\n if \"<span\" in tag or \"<div\" in tag:\n continue\n if len(tag) > self._max_len:\n tmp += self.__split_seq(tag) + \"\\n\" + \"\\n\"\n else:\n tmp += tag + \"\\n\" + \"\\n\"\n\n self.text = copy.copy(tmp)",
"def trans(monitext):\n result = ''\n last_line = 'empty'\n\n while monitext:\n # newline character or empty line(s)\n matched = re.match(r'\\n+', monitext, re.M)\n\n if matched:\n result += matched.group()\n if len(matched.group()) > 1:\n last_line = 'empty'\n elif last_line == 'title':\n result += '\\n'\n last_line = 'empty'\n monitext = monitext[matched.end():]\n continue\n\n # code block\n matched = re.match(r'{{{.*?\\n((\\n|.)*?)\\n}}}', monitext, re.M)\n\n if matched:\n body = matched.groups()[0]\n result += '\\n\\t' + '\\n\\t'.join(body.split('\\n'))\n monitext = monitext[matched.end():]\n last_line = 'code'\n continue\n\n # header\n matched = re.match(r'^(=+) (.+) (=+)', monitext)\n\n if matched:\n title = matched.groups()[1]\n level = len(matched.groups()[0])\n\n if last_line != 'empty':\n result += '\\n'\n\n if level < 4:\n underscore = {2 : '=', 3 : '-'}[level] * mbstrlen(title)\n result += title + os.linesep + underscore\n else:\n result += ('#' * level) + \" \" + title\n monitext = monitext[matched.end():]\n\n last_line = 'title'\n\n continue\n\n # link\n matched = re.match(r'(.*)\\[([^\\s]+[ \\t]+)?(.+)\\]', monitext)\n\n if matched:\n pre = matched.groups()[0]\n url = matched.groups()[1]\n if url:\n url = url.strip()\n name = matched.groups()[2]\n\n if url:\n replaced = \"%s[%s](%s)\" % (pre, name, url)\n else:\n replaced = \"%s[%s](%s)\" % (pre, name, name)\n\n monitext = monitext[:matched.start()] + replaced\\\n + monitext[matched.end():]\n\n # important\n monitext = re.sub(r'\\'\\'\\'(.*?)\\'\\'\\'', r'**\\1**', monitext)\n\n # italic\n monitext = re.sub(r'\\'\\'(.*?)\\'\\'', r'_\\1_', monitext)\n\n # list\n matched = re.match(r'^(\\s*)\\* (.*)', monitext)\n\n if matched:\n depth = len(matched.groups()[0])\n body = matched.groups()[1]\n result += (depth - 1) * '\\t' + '* ' + body\n monitext = monitext[matched.end():]\n\n last_line = 'others'\n\n try:\n # Go to the next line\n index = monitext.index('\\n')\n result += monitext[:index]\n monitext = monitext[index:]\n except ValueError:\n result += monitext\n break\n\n return result",
"def _prepare_text(body):\n text = body.lower()\n text = text.replace('\\n', ' ')\n regex = re.compile('[^a-z ]')\n return regex.sub('', text)",
"def convert(md_text):\n # separate by line\n md_text = md_text.split('\\n')\n\n # save the html content for return\n html_text = ''\n\n # begin looping from the first line\n index = -1\n while index < len(md_text) - 1:\n index += 1\n line = md_text[index]\n\n # code segment\n if len(line) >= 3 and line[:3] == '```':\n html_line = \"\"\n language = line[3:].replace(' ', '')\n if len(language) == 0:\n language = False\n order_index = index + 1\n find_end = False\n while order_index < len(md_text):\n if md_text[order_index][:3] == '```':\n find_end = True\n break\n else:\n temp_line = md_text[order_index]\n temp_line = temp_line.replace('<', '<')\n temp_line = temp_line.replace('>', '>')\n temp_line = temp_line.replace(' ', ' ')\n html_line += temp_line + '<br />'\n order_index += 1\n\n if find_end:\n # if language is not False:\n # html_text += ('<pre><code class=\"' + language + '\">' + html_line + '</code></pre>')\n # else:\n html_text += ('<code>' + html_line + '</code>')\n # print(language)\n index = order_index\n continue\n\n # inline code\n\n\n # header\n is_header, html_line = check_header(line)\n if is_header:\n html_text = html_text + html_line\n continue\n\n # horizontal rule\n is_horizontal_rule, html_line = check_horizontal_rule(line)\n if is_horizontal_rule:\n html_text = html_text + html_line\n continue\n\n # paragraph\n line = check_paragraph(line)\n\n # deal with ordered list\n if len(line.split('.')) != 0 and '1.' == line[:2]:\n html_line = '<ol>'\n order_index = index\n while order_index < len(md_text)\\\n and len(md_text[order_index].split('.')) != 0\\\n and (str(order_index - index + 1) == md_text[order_index].split('.')[0]\n or '1' == md_text[order_index].split('.')[0]):\n to_replace = [str(order_index - index + 1) + '.', '1.']\n for replace_content in to_replace:\n md_text[order_index] = md_text[order_index].replace(replace_content, '')\n html_line = html_line + '<li>' + md_text[order_index] + '</li>'\n\n order_index += 1\n index = order_index - 1\n html_line = html_line + '</ol>'\n line = html_line\n\n # deal with unordered list\n is_unordered_list, html_line = check_unordered_list(line)\n if is_unordered_list:\n line = html_line\n\n # deal with strong\n line = strong(line)\n\n # Scratch\n line = scratch(line)\n\n # italics\n line = italics(line)\n\n # image\n while len(re.match(r'((?P<pre_text>.*)!\\[(?P<alt_text>.*)\\]\\((?P<link>.*)\\)(?P<after_text>.*))*', line).group())\\\n != 0:\n match = re.match(r'((?P<pre_text>.*)!\\[(?P<alt_text>.*)\\]\\((?P<link>.*)\\)(?P<after_text>.*))*', line)\n pre_text = match.group('pre_text')\n alt_text = match.group('alt_text')\n link = match.group('link')\n after_text = match.group('after_text')\n img_html = '<img src=\"' + link + '\" alt=\"' + alt_text + '\">'\n line = pre_text + img_html + after_text\n\n # link\n while len(re.match(r'((?P<pre_text>.*)\\[(?P<alt_text>.*)\\]\\((?P<link>.*)\\)(?P<after_text>.*))*', line).group())\\\n != 0:\n match = re.match(r'((?P<pre_text>.*)\\[(?P<alt_text>.*)\\]\\((?P<link>.*)\\)(?P<after_text>.*))*', line)\n pre_text = match.group('pre_text')\n alt_text = match.group('alt_text')\n link = match.group('link')\n after_text = match.group('after_text')\n img_html = '<a href=\"' + link + '\">' + alt_text + '</a>'\n line = pre_text + img_html + after_text\n\n html_text = html_text + line\n if not is_unordered_list:\n html_text = html_text + '<br>'\n\n return html_text",
"def processText(text):\n print(type(text))\n for line in text:\n print(line)\n return text",
"async def ascii(self, ctx, *, text):\n text = text.replace(' ', '\\n')\n \n if not text:\n await ctx.send(f\"{ctx.tick(False)} You need to specify the text you want to convert!\")\n \n _fig = figlet_format(text.replace(' ', '\\n'))\n \n if len(_fig) > 1300:\n await ctx.send(f\"{ctx.tick(False)} That message is too long!\")\n await ctx.send(f\"{ctx.tick(True)} Done!\")\n await ctx.send(f\"```{_fig}```\")",
"def clean_text(text2, project_key):\n\n text = text2\n text = return_text_without_headlines(text)\n # remove text written between double curly braces\n text = re.sub(r\"{{code}}.*{{code}}\", \"code.\", text)\n text = re.sub(r\"{code.*{code}\", \"code.\", text)\n text = re.sub(r\"{code:java}.*{code:java}\", \"code.\", text)\n text = re.sub(r\"{noformat}.*{noformat}\", \"code.\", text)\n text = re.sub(r\"{{monospaced}}.*{{monospaced}}\", \"code.\", text)\n text = re.sub(r'<script type=\"text/javascript\">.*</noscript>', 'code.', text)\n text = re.sub(r\"'''.*'''\", \"code\", text)\n text = text.replace('<p> </p>', \"\")\n text = text.replace('<div> </div>', \"\")\n text = text.replace(' ', \" \")\n # remove URLs link\n text = re.sub(r\"<a href=.*</a>\", \"url. \", text)\n text = re.sub(r\"http\\S+\", \"url. \", text)\n text = re.sub(r\"hdfs://\\S+\", \"url. \", text)\n text = re.sub(r\"tcp://\\S+\", \"url. \", text)\n text = re.sub(r\"webhdfs://\\S+\", \"url. \", text)\n text = re.sub(r\":/\\S+\", \"url. \", text)\n text = re.sub(r\"\\S+.com \", \"url. \", text)\n text = re.sub(r\"N/A]\", \" \", text)\n text = \" \".join(x for x in text.split() if not x.endswith('.com'))\n text = \" \".join(x for x in text.split() if not x.endswith('.com*'))\n text = \" \".join(x for x in text.split() if not x.endswith('.org'))\n text = \" \".join(x for x in text.split() if not x.endswith('.xml'))\n text = \" \".join(x for x in text.split() if not x.startswith('*javax.xml.'))\n text = \" \".join(x for x in text.split() if not x.startswith('javax.xml.'))\n # remove Image attachments\n text = re.sub(r\"<p><img alt=.></p>\", \"image.\", text)\n text = re.sub(r\"{}-\\d+\".format(project_key), \"issue\", text)\n # remove date\n text = re.sub(r'(\\w{4})-(\\d{1,2})-(\\d{1,2}) ', 'date.', text)\n text = re.sub(r'(\\w{3,4,5})-(\\d{1,2})-(\\d{4})', 'date.', text)\n text = re.sub(r'(\\d{1,2})/(\\d{1,2})/(\\d{4})', 'date.', text)\n text = re.sub(r'(\\w{3}). (\\d{1,2}), (\\d{4})', 'date.', text)\n text = re.sub(r'(\\w{3}). (\\d{1,2}) (\\d{4})', 'date.', text)\n text = re.sub(r'<= Today’s Date AND', 'date.', text)\n text = re.sub(r'yyyy-mm-dd', 'date', text)\n # remove text written between small braces\n text = re.sub(r'<.+?>', \"\", text)\n text = text.replace(\"e.g.,\", \" \")\n text = text.replace(\"e.g.\", \" \")\n text = text.replace(\"i.e.,\", \" \")\n text = text.replace(\"i.e.\", \" \")\n # replace non-breaking space with regular space\n text = text.replace(u'\\xa0', u' ')\n # replace all punctuations with space\n text = text.replace('-->', \" \")\n text = text.replace('--', \" \")\n text = text.replace('-', \" \")\n text = text.replace('/', \" \")\n text = text.replace('&', \" \")\n text = text.replace(' * ', \". \")\n text = re.sub(r\"\\\"|\\#|\\“|\\*|\\'|\\]|\\^|\\`|\\(|\\)|\\~\", \"\", text)\n text = re.sub(r\"\\\"|\\$|\\%|\\&|\\/|\\|\\=|\\>|\\<|\\@|\\[|\\\\|\\]|\\{|\\||\\}\", \" \", text)\n text = text.replace('$', \"\")\n text = text.replace('?', \".\")\n text = text.replace('+', \" \")\n text = re.sub(r\" \\d\\.\\d\\.N \", \" \", text)\n text = re.sub(r\" \\d\\.\\d\\.b.\", \" \", text)\n text = re.sub(r\" \\d\\.\\d\\.b \", \" \", text)\n text = re.sub(r\"\\d\\.\\d\\.N\", \" \", text)\n text = re.sub(r\"\\d\\.\\d\\.X\", \" \", text)\n text = re.sub(r\"v\\d\\.\\d\\.\\d+\", \" \", text)\n text = re.sub(r\"V\\d\\.\\d\\.\\d+\", \" \", text)\n text = re.sub(r\"v\\d\\.\\d+\", \" \", text)\n text = re.sub(r\"V\\d\\.\\d+\", \" \", text)\n text = re.sub(r\"\\d\\.\\d+\", \" \", text)\n text = re.sub(r\"\\d\\.\\d\\.\\d+\", \" \", text)\n text = text.replace(\"V1\", \" \")\n text = text.replace(\"v1\", \" \")\n # remove digits from text\n text = re.sub(r\"\\d+\", \"\", text)\n text = text.replace('lt;=', \" \")\n text = text.replace('.!', \".\")\n text = text.replace('!.', \".\")\n text = text.replace('!', \".\")\n text = text.replace('... ', \". \")\n text = text.replace('.. ', \". \")\n text = text.replace('..', \".\")\n text = text.replace('. . . ', \". \")\n text = text.replace('. . ', \". \")\n text = text.replace('. . ', \". \")\n text = text.replace(' .', \".\")\n text = text.replace('. . ', \". \")\n text = text.replace('. . ', \". \")\n text = text.replace(':.', \".\")\n text = text.replace(' :', \" \")\n text = text.lower()\n text = text.replace('..', \".\")\n text = ' '.join(text.split())\n\n return text",
"def format_body(self):\n mt = deque(str(self.movetext).split(' ') + [])\n out = mt.popleft()\n ll = len(out)\n while True:\n if len(mt) is 0:\n break\n\n n = mt.popleft()\n # If the current line length + space + character is less than\n # 80 chars long\n if ll + len(n) + 1 < 80:\n to_add = \" \" + n\n out += \" \" + n\n ll += len(to_add)\n else:\n out += \"\\n\" + n\n ll = len(n)\n return out + str(self.score)",
"def process_text(text):\n text = re.sub(r'<@>\\s+|<s>\\s+|</s>\\s+|<p>\\s+|</p>\\s+|\\s+\\,|\\'s|\\'|\\;|\\(|\\)|\\-\\-\\s+|\\s+\\.', '', text)\n text = re.sub(r'\\.\\,', '. ,', text)\n text = re.sub(r'\\,', '', text)\n text = re.sub(r'\\$', '$ ', text)\n text = re.sub(r'\\%', ' %', text)\n text = re.sub(r'\\s\\\"\\s', ' ', text)\n text = re.sub(r'\\.\\s+', '. ', text)\n text = text.lower()\n return text",
"def process_text(text):\n fix_dict = {'fig.': 'fig', 'fig .': 'fig ', 'Fig.': 'Fig', 'Fig .': 'Fig ',\n 'figure.': 'figure', 'figure .': 'figure ', 'Figure.': 'Fig', 'Figure .': 'Fig ',\n 'et al.': 'et al', 'III': '3', 'II': '2', 'I': '1'}\n\n for old_pattern in fix_dict.keys():\n text = text.replace(old_pattern, fix_dict[old_pattern])\n return text",
"def text(self) -> str:"
] | [
"0.61873615",
"0.6027262",
"0.58918947",
"0.5888332",
"0.5878906",
"0.5850792",
"0.58205616",
"0.5773772",
"0.5754406",
"0.5704299",
"0.56952536",
"0.56866527",
"0.5672063",
"0.56352043",
"0.562836",
"0.56220996",
"0.56156236",
"0.5604763",
"0.56015706",
"0.55987185",
"0.55968964",
"0.5582225",
"0.55773586",
"0.554427",
"0.5542895",
"0.5535587",
"0.55342734",
"0.5528587",
"0.55184215",
"0.5517222"
] | 0.71296585 | 0 |
Save the given TF session at PATH = "./model/tmpmodel" | def _save_model(graph_or_sess):
if isinstance(graph_or_sess, tf.Graph):
ops = graph_or_sess.get_operations()
for op in ops:
if 'variable' in op.type.lower():
raise ValueError('Please input a frozen graph (no variables). Or pass in the session object.')
with graph_or_sess.as_default():
sess = tf.Session(config=configProto)
fake_var = tf.Variable([0.0], name="fake_var")
sess.run(tf.global_variables_initializer())
else:
sess=graph_or_sess
PATH = os.path.join("model", "tmp-model")
make_dir(path = os.path.dirname(PATH))
saver = tf.train.Saver()
#i should deal with the case in which sess is closed.
saver.save(sess, PATH)
if isinstance(graph_or_sess, tf.Graph):
sess.close()
return PATH + ".meta" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save(self):\n\n self.saver.save(self.sess, self.path + '/tensorflow-model', global_step=self.counter.count)",
"def save_session(self):\n\n # Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it\n checkpoint_dir = os.path.abspath(os.path.join(self.FLAGS.model_dir, \"checkpoints\"))\n checkpoint_prefix = os.path.join(checkpoint_dir, \"model\")\n if not os.path.exists(checkpoint_dir):\n os.makedirs(checkpoint_dir)\n current_step = tf.train.global_step(self.session, self.global_step)\n path = self.saver.save(self.session, checkpoint_prefix, global_step=current_step)\n print(\"Saved model checkpoint to {}\\n\".format(path))",
"def save(self, sess, model_path):\n\n saver = tf.train.Saver()\n save_path = saver.save(sess, model_path)\n return save_path",
"def save(self, sess, save_path=\"./log/model.ckpt\", verbose=True):\n if(verbose): print(\"Saving model in: \" + str(save_path))\n save_path = self.tf_saver.save(sess, save_path)\n if(verbose): print(\"Done!\")",
"def save_session(self):\r\n if not os.path.exists(self.config.dir_model):\r\n os.makedirs(self.config.dir_model)\r\n self.saver.save(self.sess, self.config.dir_model)\r\n print(\"Save session succeed\")",
"def save(self, sess):\n ckpt_path = os.path.join(self.model.ckpt_dir, 'model')\n if not os.path.exists(self.model.ckpt_dir):\n os.makedirs(self.model.ckpt_dir)\n self.saver.save(sess, ckpt_path, global_step=self.gstep)",
"def save(self, PATH):\n self._saver.save(self._sess, PATH)",
"def save_model(self, checkpoint_path, epoch):\n self.saver.save(self.sess, checkpoint_path, global_step = epoch)",
"def save_model(session: tf.Session, model_dir: Text, global_step: int = None,\n max_to_keep: int = 5) -> Text:\n saver = tf.train.Saver(max_to_keep=max_to_keep)\n return saver.save(session, model_dir, global_step=global_step)",
"def save(self,sess):\n self.saver.save(sess,\"./Models/\" + self.mod_name + \".ckpt\")",
"def save_model(model, output):\n\n # model.save(os.path.join(output))\n tf.saved_model.save(model, os.path.join(output, \"1\"))\n\n # tf.saved_model.save(model, os.path.join(output, \"1\"))\n print(\"Model successfully saved at: {}\".format(output))",
"def saveModel(self, fileName):\n\n if self.saver is None:\n self.saver = tf.train.Saver()\n self.saver.save(self.sess, fileName)\n else:\n self.saver.save(self.sess, fileName)",
"def save_model(self, path=\"/model\"):\n state = {\n 'epoch': self.epoch_counter,\n 'state_dict': self.net.state_dict(),\n 'optimizer': self.optimizer.state_dict()\n }\n torch.save(state, path)",
"def save_graph(self):\n with tf.Session(graph=self.graph) as sess:\n saver = tf.train.Saver()\n sess.run(tf.global_variables_initializer())\n\n save_path = saver.save(sess, os.path.join(MODELS_PATH, \"model\"))\n print(\"Model saved in path: %s\" % save_path)\n\n with open(os.path.join(MODELS_PATH, \".model.inputs\"), \"w\") as file:\n for v in self.inputs.values():\n file.write(v.name + \"\\n\")\n with open(os.path.join(MODELS_PATH, \".model.output\"), \"w\") as file:\n file.write(self.output.name)",
"def save_session():\n\n filename = request.json.get(\"path\")\n finished = request.json.get(\"finished\")\n config = request.json.get(\"config\")\n\n success = engine.io.save(filename, state.proc, state.corpus, state.test_corpus, state.classifier, state.last_result, finished, config)\n\n if success:\n return jsonify({\"saved\":True})\n else:\n return 'Could not save session file.', 428",
"def save_model(fn, model, ckpt=None):\n if fn[-3] != \".tf\":\n fn += \".tf\"\n if not hasattr(model,\"saver\") or model.saver is None:\n with model.graph.as_default():\n model.saver = tf.train.Saver()\n if ckpt is None:\n ckpt = fn.replace(\".tf\",\".ckpt\")\n ckpt = os.path.basename(ckpt)\n log(\"Saving model to {}\".format(fn))\n model.saver.save(model.session, fn, latest_filename=ckpt)",
"def save_tf_export(self, session):\n raise NotImplementedError(\"Implement save_tf_export() method\")",
"def save_session(self):\n if not os.path.exists(self.config.dir_model):\n os.makedirs(self.config.dir_model)\n self.saver.save(self.sess, self.config.dir_model)\n\n if not os.path.isfile(self.config.dir_model_root + 'modelResults.json') and not os.access(\n self.config.dir_model_root + 'modelsResults.json',\n os.R_OK):\n with open(self.config.dir_model_root + 'modelResults.json', 'w') as json_file:\n json.dump({\"finalResults\": [], \"allParams\": []}, json_file) # write model stats into file\n json_file.close()",
"def backup_session(saver, sess, model_dir, global_t, n_episode=0):\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n filename = \"checkpoint-%d\" % (n_episode)\n saver.save(sess, model_dir + \"/\" + filename, global_step=global_t)\n return",
"def save_model(trainer, full_path):\n print(\"Writing model to disk...\")\n model = trainer.model.cpu()\n torch.save(model.state_dict(), full_path)\n if trainer.device is not None:\n trainer.model.cuda(trainer.device)",
"def save_model(path_name, model):\n\n # Specify a path\n PATH = path_name\n \n # Save\n torch.save(model.state_dict(), PATH)",
"def save_model(path, epoch, model, optimizer):\n assert os.path.exists(path)\n\n # cannot pickle model.device, set it to None before saving\n device = model.device\n model.device = None\n dict_model = dict(\n model=model,\n args=model.args,\n statistics=model.statistics,\n optimizer_state_dict=optimizer.state_dict(),\n epoch=epoch)\n\n model_name = 'model_{}.pth'.format(epoch)\n model_path = os.path.join(path, model_name)\n torch.save(dict_model, model_path)\n\n model.device = device\n\n # create symlink to last saved model\n model_symlink = os.path.join(path, 'model_current.pth')\n if os.path.islink(model_symlink):\n os.unlink(model_symlink)\n os.symlink(model_path, model_symlink)",
"def save_checkpoint(model, path):\n\n model_name = path.split('-')[0]\n assert (model_name in ['vgg16', 'resnet50'\n ]), \"Path must have the correct model name\"\n\n # Basic details\n checkpoint = {\n 'class_to_idx': model.class_to_idx,\n 'idx_to_class': model.idx_to_class,\n 'epochs': model.epochs,\n }\n\n # Extract the final classifier and the state dictionary\n if model_name == 'vgg16':\n # Check to see if model was parallelized\n if multi_gpu:\n checkpoint['classifier'] = model.module.classifier\n checkpoint['state_dict'] = model.module.state_dict()\n else:\n checkpoint['classifier'] = model.classifier\n checkpoint['state_dict'] = model.state_dict()\n\n elif model_name == 'resnet50':\n if multi_gpu:\n checkpoint['fc'] = model.module.fc\n checkpoint['state_dict'] = model.module.state_dict()\n else:\n checkpoint['fc'] = model.fc\n checkpoint['state_dict'] = model.state_dict()\n\n # Add the optimizer\n checkpoint['optimizer'] = model.optimizer\n checkpoint['optimizer_state_dict'] = model.optimizer.state_dict()\n\n # Save the data to the path\n torch.save(checkpoint, path)",
"def save_model(net, path):\n x_conv_weights = sess.run(net.parameters)\n x_bn_params = sess.run(get_batch_norm_vars(net))\n np.save(path, [x_conv_weights, x_bn_params])\n print(\"\\x1b[35mSaved model to:\\x1b[0m\", path)",
"def save_model(self):\n self.pred_net.save((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.save((self.save_path / \"iqn_target_net\").absolute().as_posix())",
"def save(model: nn.Module, path):\n save_model(model, path)",
"def save_model(model):\n model.to_disk(\"../model/custom_ner_model\")",
"def save_checkpoint(model, save_path):\n torch.save(model.state_dict(), save_path)",
"def save_model(self, path):\n # Save server model\n self.server_model.set_params(self.model)\n model_sess = self.server_model.sess\n return self.server_model.saver.save(model_sess, path)",
"def save_checkpoint(self):\n \n if not os.path.isdir(self.path + '/checkpoint/'):\n os.makedirs(self.path + '/checkpoint/')\n\n if self.saver == None:\n with self.graph.as_default():\n self.saver = tf.train.Saver(tf.global_variables())\n\n self.saver.save(self.session, self.path + '/checkpoint/model.ckpt')"
] | [
"0.77613735",
"0.756965",
"0.75423574",
"0.73797125",
"0.7366821",
"0.7349892",
"0.71762145",
"0.7025819",
"0.6998633",
"0.6955972",
"0.69415617",
"0.6939384",
"0.6921636",
"0.6920539",
"0.6881578",
"0.6872373",
"0.6870211",
"0.68608505",
"0.6825167",
"0.6805065",
"0.6735286",
"0.67022973",
"0.66886765",
"0.6682691",
"0.66740537",
"0.66685903",
"0.6662173",
"0.66416806",
"0.6636464",
"0.66174185"
] | 0.79623353 | 0 |
Fix the markdown links based on the pages that we know. | def _fix_links(self, text, page_names):
for n in page_names:
text = text.replace(f"]({n})", f"]({n}.html)")
text = text.replace(f"]({n}.md)", f"]({n}.html)")
return text | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def on_page_markdown(self, markdown, **kwargs):\n for autolink in self.config[\"autolinks\"]:\n markdown = replace_autolink_references(markdown, autolink[\"reference_prefix\"], autolink[\"target_url\"])\n\n return markdown",
"def fix_links():\n pass",
"def on_page_markdown(self, markdown, page, config, files):\n listext = self.config['ext']\n src_file_path = page.file.abs_src_path\n prepath, ext = os.path.splitext(src_file_path)\n lang = ext.lstrip('.')\n filename = page.file.name\n if ext in listext:\n new_markdown = \"# {0}\\n\\n```{1}\\n\".format(filename, lang) + markdown + \"\\n```\"\n return new_markdown\n else:\n return markdown",
"def markdown_links(self):\n return self.findall_markdown_cells(MARKDOWN_LINK)",
"def extendMarkdown(self, md, md_globals):\r\n md.inlinePatterns['autolink'] = UrlizePattern(URLIZE_RE, md)",
"def markdown_converter(links_to_convert, root=\"./\"):\n \n def to_markdown(element):\n \"\"\"This is a version of `safe_html_to_markdown` with link conversion baked in.\n \n NB links will all start with \"\"\" + root + \"\"\".\n \"\"\"\n return safe_html_to_markdown(element, \n links_to_convert={k:root + v \n for k, v in links_to_convert.items()})\n return to_markdown",
"def update_links(self):\n for a in self.book.xpath(\"//a[@href]\"):\n href = a.xpath(\"@href\")[0]\n index_list = a.xpath(\"@data-index\")\n \n ### If there is no data-index it is assumed link comes from initial book landing page (the index page)\n if index_list == []:\n index = self.manager.get_page_index(\"index.html\")\n else:\n index = index_list[0]\n \n ### Fix people who are bad at links\n if href.startswith(\"www.\"):\n href = \"https://\" + href\n a.set(\"href\", href)\n \n ## Correct for ambiguity (Naive assumption that this error only occours on index page)\n if href == \"./\":\n href = \"index.html\"\n \n if not href:\n return None\n \n href = self.manager.convert_link(href, index)\n a.set(\"href\", href)",
"def filter_markdown(md, mode=\"html\", currentpage={}, logger=None, **kwargs):\n globals()[\"logger\"] = logger\n if mode != \"md\":\n return md\n\n if LINK_SUBS_FIELD in currentpage:\n link_subs = currentpage[LINK_SUBS_FIELD]\n md = substitute_md_links(md, link_subs)\n\n if IMAGE_SUBS_FIELD in currentpage:\n image_subs = currentpage[IMAGE_SUBS_FIELD]\n md = substitute_md_images(currentpage, image_subs)\n md = substitute_md_links(currentpage, image_subs)\n\n if LINK_RE_SUBS_FIELD in currentpage:\n link_re_subs = currentpage[LINK_RE_SUBS_FIELD]\n md = substitute_md_links(md, link_re_subs, regex_search=True)\n\n if IMAGE_RE_SUBS_FIELD in currentpage:\n image_re_subs = currentpage[IMAGE_RE_SUBS_FIELD]\n md = substitute_md_images(md, image_re_subs, regex_search=True)\n md = substitute_md_links(md, link_re_subs, regex_search=True)\n\n return md",
"def fixaSintaxiGitHub(md):\n md = fixaBlocs(md)\n md = fixaLiniesComencenPerCometes(md)\n return md",
"def _update_urls(self):\n\n to_fix = [\n # We fix the urls in the README file.\n PyFunceble.CONFIG_DIRECTORY + \"README.rst\",\n # We fix the urls in the configuration file.\n PyFunceble.CONFIG_DIRECTORY + \".PyFunceble_production.yaml\",\n # We fix the urls in the setup.py file.\n PyFunceble.CONFIG_DIRECTORY + \"setup.py\",\n # We fix the urls in the documentation index.\n PyFunceble.CONFIG_DIRECTORY\n + directory_separator\n + \"docs\"\n + directory_separator\n + \"index.rst\",\n # We fix the urls in the documentation logic representation.\n PyFunceble.CONFIG_DIRECTORY\n + directory_separator\n + \"docs\"\n + directory_separator\n + \"code\"\n + directory_separator\n + \"logic-representation.rst\",\n # We fix the urls in the usage documentation.\n PyFunceble.CONFIG_DIRECTORY\n + directory_separator\n + \"docs\"\n + directory_separator\n + \"usage\"\n + directory_separator\n + \"from-a-terminal.rst\",\n # We fix the urls in the links configuration documentation.\n PyFunceble.CONFIG_DIRECTORY\n + directory_separator\n + \"docs\"\n + directory_separator\n + \"configuration\"\n + directory_separator\n + \"links.rst\",\n ]\n\n for fix_it in to_fix:\n if PyFunceble.helpers.File(fix_it).exists():\n self._update_docs(fix_it)\n elif PyFunceble.helpers.Directory(fix_it).exists():\n for root, _, files in walk(fix_it):\n for file in files:\n self._update_docs(root + directory_separator + file)\n else:\n raise FileNotFoundError(fix_it)",
"def canonical_to_jekyll(local_path: str) -> str:\n match = re.match(r\"(?P<base>.+\\.md)#?(?P<anchor>.*)\", local_path)\n base = match.group(\"base\")\n anchor = match.group(\"anchor\")\n\n # Transform absolute path to Jekyll relatives path:\n base = base.replace(\"/docs/\", \"_docs/\")\n if anchor:\n return f\"{{% link {base} %}}#{anchor}\"\n else:\n return f\"{{% link {base} %}}\"",
"def _remove_invalid_links(text):\n\n for reply_number in re.finditer(REGEX_REPLY, text):\n post_id = reply_number.group(1)\n post = Post.objects.filter(id=post_id)\n if not post.exists():\n text = string.replace(text, REFLINK_PREFIX + post_id, post_id)\n\n return text",
"def _do_links(self, text):\r\n MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24\r\n\r\n # `anchor_allowed_pos` is used to support img links inside\r\n # anchors, but not anchors inside anchors. An anchor's start\r\n # pos must be `>= anchor_allowed_pos`.\r\n anchor_allowed_pos = 0\r\n\r\n curr_pos = 0\r\n while True: # Handle the next link.\r\n # The next '[' is the start of:\r\n # - an inline anchor: [text](url \"title\")\r\n # - a reference anchor: [text][id]\r\n # - an inline img: \r\n # - a reference img: ![text][id]\r\n # - a footnote ref: [^id]\r\n # (Only if 'footnotes' extra enabled)\r\n # - a footnote defn: [^id]: ...\r\n # (Only if 'footnotes' extra enabled) These have already\r\n # been stripped in _strip_footnote_definitions() so no\r\n # need to watch for them.\r\n # - a link definition: [id]: url \"title\"\r\n # These have already been stripped in\r\n # _strip_link_definitions() so no need to watch for them.\r\n # - not markup: [...anything else...\r\n try:\r\n start_idx = text.index('[', curr_pos)\r\n except ValueError:\r\n break\r\n text_length = len(text)\r\n\r\n # Find the matching closing ']'.\r\n # Markdown.pl allows *matching* brackets in link text so we\r\n # will here too. Markdown.pl *doesn't* currently allow\r\n # matching brackets in img alt text -- we'll differ in that\r\n # regard.\r\n bracket_depth = 0\r\n for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL,\r\n text_length)):\r\n ch = text[p]\r\n if ch == ']':\r\n bracket_depth -= 1\r\n if bracket_depth < 0:\r\n break\r\n elif ch == '[':\r\n bracket_depth += 1\r\n else:\r\n # Closing bracket not found within sentinel length.\r\n # This isn't markup.\r\n curr_pos = start_idx + 1\r\n continue\r\n link_text = text[start_idx+1:p]\r\n\r\n # Possibly a footnote ref?\r\n if \"footnotes\" in self.extras and link_text.startswith(\"^\"):\r\n normed_id = re.sub(r'\\W', '-', link_text[1:])\r\n if normed_id in self.footnotes:\r\n self.footnote_ids.append(normed_id)\r\n result = '<sup class=\"footnote-ref\" id=\"fnref-%s\">' \\\r\n '<a href=\"#fn-%s\">%s</a></sup>' \\\r\n % (normed_id, normed_id, len(self.footnote_ids))\r\n text = text[:start_idx] + result + text[p+1:]\r\n else:\r\n # This id isn't defined, leave the markup alone.\r\n curr_pos = p+1\r\n continue\r\n\r\n # Now determine what this is by the remainder.\r\n p += 1\r\n if p == text_length:\r\n return text\r\n\r\n # Inline anchor or img?\r\n if text[p] == '(': # attempt at perf improvement\r\n match = self._tail_of_inline_link_re.match(text, p)\r\n if match:\r\n # Handle an inline anchor or img.\r\n is_img = start_idx > 0 and text[start_idx-1] == \"!\"\r\n if is_img:\r\n start_idx -= 1\r\n\r\n url, title = match.group(\"url\"), match.group(\"title\")\r\n if url and url[0] == '<':\r\n url = url[1:-1] # '<url>' -> 'url'\r\n # We've got to encode these to avoid conflicting\r\n # with italics/bold.\r\n url = url.replace('*', self._escape_table['*']) \\\r\n .replace('_', self._escape_table['_'])\r\n if title:\r\n title_str = ' title=\"%s\"' % (\r\n _xml_escape_attr(title)\r\n .replace('*', self._escape_table['*'])\r\n .replace('_', self._escape_table['_']))\r\n else:\r\n title_str = ''\r\n if is_img:\r\n result = '<img src=\"%s\" alt=\"%s\"%s%s' \\\r\n % (url.replace('\"', '"'),\r\n _xml_escape_attr(link_text),\r\n title_str, self.empty_element_suffix)\r\n if \"smarty-pants\" in self.extras:\r\n result = result.replace('\"', self._escape_table['\"'])\r\n curr_pos = start_idx + len(result)\r\n text = text[:start_idx] + result + text[match.end():]\r\n elif start_idx >= anchor_allowed_pos:\r\n result_head = '<a href=\"%s\"%s>' % (url, title_str)\r\n result = '%s%s</a>' % (result_head, link_text)\r\n if \"smarty-pants\" in self.extras:\r\n result = result.replace('\"', self._escape_table['\"'])\r\n # <img> allowed from curr_pos on, <a> from\r\n # anchor_allowed_pos on.\r\n curr_pos = start_idx + len(result_head)\r\n anchor_allowed_pos = start_idx + len(result)\r\n text = text[:start_idx] + result + text[match.end():]\r\n else:\r\n # Anchor not allowed here.\r\n curr_pos = start_idx + 1\r\n continue\r\n\r\n # Reference anchor or img?\r\n else:\r\n match = self._tail_of_reference_link_re.match(text, p)\r\n if match:\r\n # Handle a reference-style anchor or img.\r\n is_img = start_idx > 0 and text[start_idx-1] == \"!\"\r\n if is_img:\r\n start_idx -= 1\r\n link_id = match.group(\"id\").lower()\r\n if not link_id:\r\n link_id = link_text.lower() # for links like [this][]\r\n if link_id in self.urls:\r\n url = self.urls[link_id]\r\n # We've got to encode these to avoid conflicting\r\n # with italics/bold.\r\n url = url.replace('*', self._escape_table['*']) \\\r\n .replace('_', self._escape_table['_'])\r\n title = self.titles.get(link_id)\r\n if title:\r\n before = title\r\n title = _xml_escape_attr(title) \\\r\n .replace('*', self._escape_table['*']) \\\r\n .replace('_', self._escape_table['_'])\r\n title_str = ' title=\"%s\"' % title\r\n else:\r\n title_str = ''\r\n if is_img:\r\n result = '<img src=\"%s\" alt=\"%s\"%s%s' \\\r\n % (url.replace('\"', '"'),\r\n link_text.replace('\"', '"'),\r\n title_str, self.empty_element_suffix)\r\n if \"smarty-pants\" in self.extras:\r\n result = result.replace('\"', self._escape_table['\"'])\r\n curr_pos = start_idx + len(result)\r\n text = text[:start_idx] + result + text[match.end():]\r\n elif start_idx >= anchor_allowed_pos:\r\n result = '<a href=\"%s\"%s>%s</a>' \\\r\n % (url, title_str, link_text)\r\n result_head = '<a href=\"%s\"%s>' % (url, title_str)\r\n result = '%s%s</a>' % (result_head, link_text)\r\n if \"smarty-pants\" in self.extras:\r\n result = result.replace('\"', self._escape_table['\"'])\r\n # <img> allowed from curr_pos on, <a> from\r\n # anchor_allowed_pos on.\r\n curr_pos = start_idx + len(result_head)\r\n anchor_allowed_pos = start_idx + len(result)\r\n text = text[:start_idx] + result + text[match.end():]\r\n else:\r\n # Anchor not allowed here.\r\n curr_pos = start_idx + 1\r\n else:\r\n # This id isn't defined, leave the markup alone.\r\n curr_pos = match.end()\r\n continue\r\n\r\n # Otherwise, it isn't markup.\r\n curr_pos = start_idx + 1\r\n\r\n return text",
"def convert_links(mkd):\n\t\n\tmd_link_codes = re.findall(r\"\\[.*?\\]\\(.*?\\)\", mkd, re.M)\n\tfor md_code in md_link_codes:\n\t\tlabel, link = re.findall(r\"\\[(.*?)\\]\\((.*?)\\)\", md_code, re.M)[0]\n\t\ttex_code = \"\\\\href{\" + link + \"}{\" + label + \"}\"\n\t\tmkd = mkd.replace(md_code, tex_code)\n\n\treturn mkd, bool(md_link_codes)",
"def process_page(html,dest):\n html0 = html[:]\n to_root = os.path.relpath(export_path,dest)\n to_root = to_root[1:]# Change '../' or '..' to '.' or './'\n \n # Fix links to directories first since that is easier to find\n html,N1 = re_dirlinks.subn(r'\\1=\"/\\2/index.html\"',html)\n \n # all pages links\n html,N2 = re_all.subn(r'\\1=\"/_all/\\2/index.html\"',html)\n \n # Add index.html for any other internal links. NOTE: by preprocessing\n # all internal links from the main content will already end in .html so this\n # is just special pages.\n for match in re_intlinks.finditer(html):\n dest = match.groups()[-1]\n ext = os.path.splitext(dest)[-1]\n if ext == '':\n old = r'{}=\"/{}\"'.format(*match.groups())\n new = r'{}=\"/{}\"'.format(match.groups()[0], os.path.join(match.groups()[1],'index.html') )\n html = html.replace(old,new)\n \n # Now make all links to the root\n html,N3 = re_intlinks.subn(r'\\1=\"{}/\\2\"'.format(to_root),html)\n \n # Remove the search stuff\n out = []\n ff = False\n for line in html.split('\\n'):\n if not ff and '<!-- search -->' not in line:\n out.append(line)\n continue\n \n if '<!-- search -->' in line:\n ff = True\n \n if ff and '<!-- /search -->' in line:\n ff = False\n\n html = '\\n'.join(out)\n return html",
"def cleanup_links(path, inspect_links=False):\n with open(path) as f:\n text = f.read()\n\n# if 'BokehJS does not appear to have successfully loaded' in text:\n# for k, v in BOKEH_REPLACEMENTS.items():\n# text = text.replace(k, v)\n\n text = component_links(text, path)\n soup = BeautifulSoup(text, features=\"html.parser\")\n for a in soup.findAll('a'):\n href = a.get('href', '')\n if '.ipynb' in href and 'http' not in href:\n # for k, v in LINK_REPLACEMENTS.items():\n # href = href.replace(k, v)\n a['href'] = href.replace('.ipynb', '.html')\n\n # check to make sure that path exists, if not, try un-numbered version\n try_path = os.path.join(os.path.dirname(path), a['href'])\n if not os.path.exists(try_path):\n num_name = os.path.basename(try_path)\n name = re.split(r\"^\\d+( |-|_)\", num_name)[-1]\n new_path = try_path.replace(num_name, name)\n if os.path.exists(new_path):\n a['href'] = os.path.relpath(new_path, os.path.dirname(path))\n else:\n also_tried = 'Also tried: {}'.format(name) if name != num_name else ''\n warnings.warn('Found missing link {} in: {}. {}'.format(a['href'], path, also_tried))\n\n if inspect_links and 'http' in a['href']:\n print(a['href'])\n for img in soup.findAll('img'):\n src = img.get('src', '')\n if 'http' not in src and 'assets' in src:\n try_path = os.path.join(os.path.dirname(path), src)\n if not os.path.exists(try_path):\n also_tried = os.path.join('..', src)\n if os.path.exists(os.path.join(os.path.dirname(path), also_tried)):\n img['src'] = also_tried\n else:\n warnings.warn('Found reference to missing image {} in: {}. Also tried: {}'.format(src, path, also_tried))\n with open(path, 'w') as f:\n f.write(str(soup))",
"def html_from_markdown(content): \n\n \"\"\"\n Bold \n \"\"\" \n # Convert to <strong></strong>\n regx = re.compile(r\"^\\*\\*(.*?)\\*\\*\", re.MULTILINE)\n content = regx.sub(r\"<strong>\\1</strong>\",content) \n\n \"\"\"\n Link \n \"\"\" \n # Convert to <a>\n regx = re.compile(r\"\\[(.*)\\]\\((.*)\\)\", re.MULTILINE)\n content = regx.sub(r\"<a href=\\2>\\1</a>\",content) \n\n \"\"\"\n Paragraph \n \"\"\" \n new_content = \"\"\n for line in content.splitlines():\n line = re.sub(r'^(?!#|\\*)(.+)', r'<p>\\1</p>', line)\n new_content = new_content + line + \"\\n\"\n content = new_content\n\n \"\"\"\n Unordered lists\n \"\"\" \n new_content = \"\" \n u_list = False\n for line in content.splitlines():\n\n if len(line) > 0: # Check the line is not empty\n\n l = line[:2]\n if u_list and l!=\"* \": # check if there and unordered list to be closed.\n new_content = new_content + \"</ul>\"\n u_list = False # Flag indicates the unordered list has finished\n\n #if line[0]!=\"#\" and line[0]!=\"*\": # Add the paragraph to the line\n # line = \"<p>\" + line + \"</p>\\n\"\n\n if line[:2]==\"* \": # Check if the lins is an unordered list\n if not u_list: # Check if it´s the first item of the list\n line = \"<ul><li>\" + line [2:] + \"</li>\"\n u_list = True # Flag indicates the unordered list has started.\n else:\n line = \"<li>\" + line [2:] + \"</li>\"\n\n new_content = new_content + line + \"\\n\"\n\n if u_list : # in case still have an unordered list to be closed.\n new_content = new_content + \"</ul>\"\n\n content = new_content\n\n \"\"\"\n Headers \n \"\"\" \n # Convert to h1\n regx = re.compile(r\"^#\\s(.*?)\\n\", re.MULTILINE)\n content = regx.sub(r\"<h1>\\1</h1>\\n\",content) \n\n # Convert to h2\n regx = re.compile(r\"^##\\s(.*?)\\n\", re.MULTILINE)\n content = regx.sub(r\"<h2>\\1</h2>\\n\",content) \n\n # Convert to h3\n regx = re.compile(r\"^###\\s(.*?)\\n\", re.MULTILINE)\n content = regx.sub(r\"<h3>\\1</h3>\\n\",content) \n\n # Convert to h4\n regx = re.compile(r\"^####\\s(.*?)\\n\", re.MULTILINE)\n content = regx.sub(r\"<h4>\\1</h4>\\n\",content) \n\n # Convert to h5\n regx = re.compile(r\"^#####\\s(.*?)\\n\", re.MULTILINE)\n content = regx.sub(r\"<h5>\\1</h5>\\n\",content) \n\n # Convert to h6\n regx = re.compile(r\"^######\\s(.*?)\\n\", re.MULTILINE) \n content = regx.sub(r\"<h6>\\1</h6>\\n\",content) \n\n\n return content",
"def fix_genindex(self, tree: list[tuple[str, list[tuple[str, Any]]]]) -> None:\n # XXX: modifies tree inline\n # Logic modeled from themes/basic/genindex.html\n for _key, columns in tree:\n for _entryname, (links, subitems, _key) in columns:\n for (i, (ismain, link)) in enumerate(links):\n m = self.refuri_re.match(link)\n if m:\n links[i] = (ismain,\n self.fix_fragment(m.group(1), m.group(2)))\n for _subentryname, subentrylinks in subitems:\n for (i, (ismain, link)) in enumerate(subentrylinks):\n m = self.refuri_re.match(link)\n if m:\n subentrylinks[i] = (ismain,\n self.fix_fragment(m.group(1), m.group(2)))",
"def build_pages(config, dirty=False):\n\n site_navigation = nav.SiteNavigation(config)\n\n # Run `nav` plugin events.\n site_navigation = config['plugins'].run_event('nav', site_navigation, config=config)\n\n env = config['theme'].get_env()\n\n # Run `env` plugin events.\n env = config['plugins'].run_event(\n 'env', env, config=config, site_navigation=site_navigation\n )\n\n for template in config['theme'].static_templates:\n if utils.is_error_template(template):\n build_error_template(template, env, config, site_navigation)\n else:\n build_template(template, env, config, site_navigation)\n\n build_extra_templates(config['extra_templates'], config, site_navigation)\n\n log.debug(\"Building markdown pages.\")\n for page in site_navigation.walk_pages():\n try:\n # When --dirty is used, only build the page if the markdown has been modified since the\n # previous build of the output.\n if dirty and (utils.modified_time(page.abs_input_path) < utils.modified_time(page.abs_output_path)):\n continue\n\n log.debug(\"Building page %s\", page.input_path)\n _build_page(page, config, site_navigation, env)\n except Exception:\n log.error(\"Error building page %s\", page.input_path)\n raise",
"def on_page_markdown(self, markdown, page, config, files):\n repo = Repo(page.file.abs_src_path, search_parent_directories=True)\n current_tag = next(\n (tag for tag in repo.tags if tag.commit == repo.head.commit), None\n )\n template = Template(markdown, undefined=DebugUndefined)\n return template.render({\"git_tag\": current_tag})",
"def fix_post(post_name):\n #find the image links\n with open(\"_posts\" + post_name) as fd:\n image_links, browse_links = post_to_list_of_image_and_browselinks(fd)\n gallery_name = make_gallery_name_from_post_name(post_name)\n gallery_path = os.path.join(\"../galleries\", gallery_name)\n try:\n os.makedirs(os.path.join(gallery_path, \"images\"))\n except OSError as err:\n if err.errno != 17:\n raise\n\n for image in image_links:\n #download image to it (both normal and thumb)\n with open(os.path.join(gallery_path, image), \"wb\") as output:\n with closing(urllib2.urlopen(\"http://orionrobots.co.uk/%s\" % image)) as original:\n output.write(original.read())\n with open(os.path.join(gallery_path, \"thm_\" + image), \"wb\") as output:\n with closing(urllib2.urlopen(\"http://orionrobots.co.uk/%s\" % image)) as original:\n output.write(original.read())\n\n #Log that the link to X in the post will now need to be a link to Y.\n #if there are browseimage links\n #make gallery thumb page.\n #For each browseimaqe link\n #Match with an image link\n #prepare list\n #log link change\n #For each in list\n #make gallery front end for it with\n #First/last/prev/next/thumbs/blog post",
"def test_link_without_no_follow(self):\n comment = \"[link](http://foo.com)\"\n comment_md = Markdown(no_follow=False).render(comment)\n self.assertEqual(comment_md, '<p><a href=\"http://foo.com\">link</a></p>')",
"def _build_links(links):\n for link in links:\n link['href'] = link['href'].replace('servers', 'instances')\n return links",
"def fix_links_to_other_chapters(chapter, chapters, all_headers):\n soup = BeautifulSoup(chapter['html'])\n for link in soup.find_all('a'):\n if 'href' in link.attrs:\n if link['href'].startswith('#'):\n header_id = link['href'][1:]\n assert header_id in all_headers, \\\n \"#{} does not exist, referred in {}\".format(\n header_id, chapter['file'])\n other_chapter = chapters[all_headers[header_id]]\n link['href'] = '{}#{}'.format(\n other_chapter['link'],\n header_id)\n chapter['html'] = unicode(soup)",
"def transform_github_links(app, doctree, fromdocname):\n\n try:\n target_format = app.builder.link_suffix\n except AttributeError:\n # if the builder has no link_suffix, then no need to modify\n # the current links.\n return\n\n source_suffix = app.config.source_suffix\n # Links are either absolute against the repository or relative to\n # the current document's directory. Note that this is not\n # necessarily app.srcdir, which is the documentation root\n # directory. Instead rely on 'source' attribute of doctree to\n # identify the path of the file providing the current doctree\n try:\n doc_path = doctree.attributes['source']\n doc_dir = os.path.dirname(doc_path)\n except KeyError:\n # some doctrees added by other libraries through dynamic\n # generation do not have a source file. Assume paths are\n # relative to the repo.\n doc_dir = \"\"\n\n for node in doctree.traverse(nodes.reference):\n if 'refuri' not in node:\n continue\n if node['refuri'].startswith('http'):\n continue\n\n try:\n link, anchor = node['refuri'].split('#', 1)\n anchor = '#' + anchor\n except ValueError:\n link = node['refuri']\n anchor = ''\n\n if link is None:\n continue\n\n # Replace the suffix with the correct target format file ending,\n # but only if the link ends with both the correct source suffix\n # and refers to a local file.\n for src_suffix in source_suffix:\n if link.endswith(src_suffix):\n # absolute paths are considered relative to repo\n if link.startswith(\"/\"):\n basepath = \"\"\n # relative paths are against the current doctree source path\n else:\n basepath = doc_dir\n if os.path.exists(os.path.join(basepath, link)):\n node['refuri'] = (link[:-len(source_suffix)] + target_format +\n anchor)",
"def pagelink(self, on, pagename='', page=None, **kw):\n FormatterBase.pagelink(self, on, pagename, page, **kw)\n if 'generated' in kw:\n del kw['generated']\n if page is None:\n page = Page(self.request, pagename, formatter=self)\n if self.request.user.show_nonexist_qm and on and not page.exists():\n self.pagelink_preclosed = True\n return (page.link_to(self.request, on=1, **kw) +\n self.text(\"?\") +\n page.link_to(self.request, on=0, **kw))\n elif not on and self.pagelink_preclosed:\n self.pagelink_preclosed = False\n return \"\"\n else:\n return page.link_to(self.request, on=on, **kw)",
"def test_url_link_multiple(self):\n content = ('[Link]([url(\\'/content/pages/test1.md\\')])'\n '[Link]([url(\\'/content/pages/test2.md\\')])')\n self.pod.write_file('/content/pages/test.md', content)\n content = '{{doc.html|safe}}'\n self.pod.write_file('/views/base.html', content)\n self.pod.router.add_all(use_cache=False)\n result = testing.render_path(self.pod, '/test/')\n self.assertIn('href=\"/test1/\"', result)\n self.assertIn('href=\"/test2/\"', result)",
"def append_links(self, lines, lang):\n lines.append(\"verbatim \")\n lines.append(\"section Links\")\n lines.append(\"external http://polcasaglia.blogspot.com Blog\")\n lines.append(\"external http://www.uisp-fe.it/calcio.php UISP\" )\n lines.append(\"verbatim \")\n return lines",
"def correct_links(html_file, schema_name):\n return html_file.replace(schema_name.replace(\".\", \"_\") + \"_xsd.html#\", \"#\").replace(\"target=\\\"mainFrame\\\"\", \"\")",
"def test_link(self):\n comment = \"[link](http://foo.com)\"\n comment_md = Markdown().render(comment)\n self.assertEqual(comment_md, '<p><a rel=\"nofollow\" href=\"http://foo.com\">link</a></p>')"
] | [
"0.7468987",
"0.70825726",
"0.6409713",
"0.6033815",
"0.5947633",
"0.5921285",
"0.5885464",
"0.5884731",
"0.5832796",
"0.58249164",
"0.57960886",
"0.5768971",
"0.57507855",
"0.5704053",
"0.55767506",
"0.5538461",
"0.5510497",
"0.5480508",
"0.54325324",
"0.54244053",
"0.54046035",
"0.5392383",
"0.5371425",
"0.5366745",
"0.5362361",
"0.53302443",
"0.53178316",
"0.53059715",
"0.52993745",
"0.5233606"
] | 0.7852134 | 0 |
Split the markdown into parts based on sections. Each part is either text or a tuple representing a section. | def _split(self):
text = self.md
self.parts = parts = []
self.headers = headers = []
lines = []
# Split in parts
for line in text.splitlines():
if line.startswith(("# ", "## ", "### ", "#### ", "##### ")):
# Finish pending lines
parts.append("\n".join(lines))
lines = []
# Process header
level = len(line.split(" ")[0])
title = line.split(" ", 1)[1]
title_short = title.split("(")[0].split("<")[0].strip().replace("`", "")
headers.append((level, title_short))
parts.append((level, title_short, title))
else:
lines.append(line)
parts.append("\n".join(lines))
# Now convert all text to html
for i in range(len(parts)):
if not isinstance(parts[i], tuple):
parts[i] = markdown.markdown(parts[i], extensions=[]) + "\n\n" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def split_into_sections(text):\n headings_regex = re.compile(\n r'^={1,6}.*?={1,6}(?: *<!--.*?-->)?\\s*$', flags=re.M\n )\n sections = list()\n last_match_start = 0\n for match in headings_regex.finditer(text):\n match_start = match.start()\n if match_start > 0:\n sections.append(text[last_match_start:match_start])\n last_match_start = match_start\n sections.append(text[last_match_start:])\n return sections",
"def split_markdown(source: str) -> List[Dict[str, str]]:\n cells: List[Dict] = []\n in_code = False\n in_tab = False\n cur_code_mark = None\n cur_tag = None\n cur_src = []\n\n def _add_cell(cur_src: List[str], cells: List[Dict]):\n if cur_src:\n src = '\\n'.join(cur_src).strip()\n if in_code:\n cells.append({\n 'type': 'code',\n 'fence': cur_code_mark,\n 'class': cur_tag,\n 'source': src})\n else:\n if not src and not cur_tag:\n return\n cells.append({'type': 'markdown', 'source': src})\n if cur_tag:\n cells[-1]['class'] = cur_tag\n\n for l in source.splitlines():\n code = common.md_code_fence.match(l)\n tab = common.md_mark_pattern.match(l)\n if code:\n # code can be nested\n if in_tab or (in_code and code.groups()[0] != cur_code_mark):\n cur_src.append(l)\n else:\n _add_cell(cur_src, cells)\n cur_src = []\n cur_code_mark, cur_tag = code.groups()\n in_code ^= True\n elif tab:\n begin = tab.groups()[0] == 'begin_tab'\n end = tab.groups()[0] == 'end_tab'\n if in_code or (not begin and not end):\n cur_src.append(l)\n else:\n _add_cell(cur_src, cells)\n cur_src = []\n if begin:\n cur_tag = tab.groups()[1]\n else:\n cur_tag = None\n in_tab = begin\n else:\n cur_src.append(l)\n _add_cell(cur_src, cells)\n return cells",
"def parse_sections(article, as_list: bool = False):\n article_text = article.find(\"text\")\n divs = article_text.find_all(\"div\", attrs={\"xmlns\": \"http://www.tei-c.org/ns/1.0\"})\n sections = []\n for div in divs:\n div_list = list(div.children)\n if len(div_list) == 0:\n heading = \"\"\n text = \"\"\n elif len(div_list) == 1:\n if isinstance(div_list[0], NavigableString):\n heading = str(div_list[0])\n text = \"\"\n else:\n heading = \"\"\n text = div_list[0].text\n else:\n text = []\n heading = div_list[0]\n if isinstance(heading, NavigableString):\n heading = str(heading)\n p_all = list(div.children)[1:]\n else:\n heading = \"\"\n p_all = list(div.children)\n for p in p_all:\n if p is not None:\n try:\n text.append(p.text)\n except:\n pass\n if not as_list:\n text = \"\\n\".join(text)\n\n if heading is not \"\" or text is not \"\":\n ref_dict = calculate_number_of_references(div)\n sections.append(\n {\n \"heading\": heading,\n \"text\": text,\n \"n_publication_ref\": ref_dict[\"n_publication_ref\"],\n \"n_figure_ref\": ref_dict[\"n_figure_ref\"],\n }\n )\n return sections",
"def chunk_generator(self, note_text):\n\n # section regular expression\n sec_re = r'\\[start section id=\\\"(.+)\"\\](.*?)\\[end section id=\\\"\\1\"\\]'\n\n # sentence regular expressions; use group 0 for entire match\n sent_re = r'(.+?\\.\\s\\s)|(.+?\\.\\n)|(.+?\\n)'\n\n # iterate over sections; using DOTALL to match newlines\n for sec_match in re.finditer(sec_re, note_text, re.DOTALL):\n\n section_id = sec_match.group(1)\n if section_id in sections_to_skip:\n continue\n\n section_text = sec_match.group(2)\n sec_start, sec_end = sec_match.start(2), sec_match.end(2)\n\n sent_offsets = []\n for sent_match in re.finditer(sent_re, section_text):\n sent_start, sent_end = sent_match.start(0), sent_match.end(0)\n sent_offsets.append((sent_start, sent_end))\n\n # form this many chunks (add an overflow chunk)\n section_length = len(self.tokenizer(section_text).input_ids)\n n_chunks = (section_length // self.chunk_size) + 1\n\n for sents in numpy.array_split(sent_offsets, n_chunks):\n\n # this happens if there are fewer paragraphs than chunks\n # e.g. 2 large paragraphs in section and n_chunks is 3\n if sents.size == 0:\n continue\n\n chunk_start, _ = sents[0].tolist()\n _, chunk_end = sents[-1].tolist()\n yield sec_start + chunk_start, sec_start + chunk_end",
"def split_to_sections(code: List[SourceLine]) -> Tuple[List[str], List[SourceLine]]:\n section: Optional[str] = None\n if code[0].line in [mipsRE.DATA_SEC, mipsRE.TEXT_SEC]:\n section = code[0].line\n\n if section is None:\n raise MipsException(\"first line must be .text/.data\")\n\n sections: Dict[str, Any] = {mipsRE.DATA_SEC: [], mipsRE.TEXT_SEC: []}\n for srcline in code:\n if srcline.line not in [mipsRE.DATA_SEC, mipsRE.TEXT_SEC]:\n if section == mipsRE.DATA_SEC:\n sections[section].append(srcline.line) # Discard line number\n continue\n if section == mipsRE.TEXT_SEC:\n sections[section].append(srcline) # Save og line number\n continue\n else:\n section = srcline.line\n\n return sections[mipsRE.DATA_SEC], sections[mipsRE.TEXT_SEC]",
"def split_text(text: str) -> List[Dict[str, str]]:\n # split into paragraphs\n lines = text.splitlines()\n groups = common.group_list(lines, lambda a, _: a.strip() == '')\n paras = ['\\n'.join(item) for empty_line, item in groups if not empty_line]\n\n def _fallback(p, type):\n logging.warn(f'Wrong {type} format:\\n' + p)\n cells.append({'type': 'text', 'source': p})\n\n cells = []\n for p in paras:\n lines = p.splitlines() + ['']\n p += '\\n'\n if p.startswith('#'):\n # parse title\n if not _is_mark(lines[1:]):\n _fallback(p, 'title')\n else:\n m = re.match(r'#+ *', lines[0])\n cells.append({\n 'type': 'title',\n 'prefix': m[0],\n 'source': lines[0][m.span()[1]:],\n 'mark': '\\n'.join(lines[1:])})\n elif p.startswith('$$'):\n # parse equations\n m = re.findall(r'\\$\\$', p)\n if len(m) != 2:\n _fallback(p, 'equation')\n else:\n cells.append({'type': 'equation', 'source': p})\n elif p.startswith('!['):\n # parse images\n if not lines[0].strip().endswith(')') or not _is_mark(lines[1:]):\n _fallback(p, 'image')\n else:\n cells.append({'type': 'image', 'source': p})\n elif p.startswith('|'):\n # parse table\n for i, l in enumerate(lines):\n if not l.startswith('|'):\n break\n if not _is_mark(lines[i:]):\n _fallback(p, 'equation')\n else:\n cells.append({'type': 'table', 'source': p})\n else:\n groups = common.group_list(lines, _list)\n for prefix, item in groups:\n if len(prefix.split('__')) == 2:\n prefix = prefix.split('__')[0]\n source = '\\n'.join(item)[len(prefix):]\n if prefix == '':\n cells.append({'type': 'text', 'source': source})\n else:\n cells.append({\n 'type': 'list',\n 'prefix': prefix,\n 'source': source})\n return cells",
"def parse_lit(self, lines):\n comment_char = \"#\" # TODO: move this into a directive option\n comment = re.compile(r\"^\\s*{}[ \\n]\".format(comment_char))\n section_test = lambda val: bool(comment.match(val))\n\n sections = []\n for is_doc, group in itertools.groupby(lines, section_test):\n if is_doc:\n text = [comment.sub(\"\", i).rstrip(\"\\r\\n\") for i in group]\n else:\n text = [i.rstrip(\"\\r\\n\") for i in group]\n\n sections.append((is_doc, text))\n\n return sections",
"def html_from_markdown(content): \n\n \"\"\"\n Bold \n \"\"\" \n # Convert to <strong></strong>\n regx = re.compile(r\"^\\*\\*(.*?)\\*\\*\", re.MULTILINE)\n content = regx.sub(r\"<strong>\\1</strong>\",content) \n\n \"\"\"\n Link \n \"\"\" \n # Convert to <a>\n regx = re.compile(r\"\\[(.*)\\]\\((.*)\\)\", re.MULTILINE)\n content = regx.sub(r\"<a href=\\2>\\1</a>\",content) \n\n \"\"\"\n Paragraph \n \"\"\" \n new_content = \"\"\n for line in content.splitlines():\n line = re.sub(r'^(?!#|\\*)(.+)', r'<p>\\1</p>', line)\n new_content = new_content + line + \"\\n\"\n content = new_content\n\n \"\"\"\n Unordered lists\n \"\"\" \n new_content = \"\" \n u_list = False\n for line in content.splitlines():\n\n if len(line) > 0: # Check the line is not empty\n\n l = line[:2]\n if u_list and l!=\"* \": # check if there and unordered list to be closed.\n new_content = new_content + \"</ul>\"\n u_list = False # Flag indicates the unordered list has finished\n\n #if line[0]!=\"#\" and line[0]!=\"*\": # Add the paragraph to the line\n # line = \"<p>\" + line + \"</p>\\n\"\n\n if line[:2]==\"* \": # Check if the lins is an unordered list\n if not u_list: # Check if it´s the first item of the list\n line = \"<ul><li>\" + line [2:] + \"</li>\"\n u_list = True # Flag indicates the unordered list has started.\n else:\n line = \"<li>\" + line [2:] + \"</li>\"\n\n new_content = new_content + line + \"\\n\"\n\n if u_list : # in case still have an unordered list to be closed.\n new_content = new_content + \"</ul>\"\n\n content = new_content\n\n \"\"\"\n Headers \n \"\"\" \n # Convert to h1\n regx = re.compile(r\"^#\\s(.*?)\\n\", re.MULTILINE)\n content = regx.sub(r\"<h1>\\1</h1>\\n\",content) \n\n # Convert to h2\n regx = re.compile(r\"^##\\s(.*?)\\n\", re.MULTILINE)\n content = regx.sub(r\"<h2>\\1</h2>\\n\",content) \n\n # Convert to h3\n regx = re.compile(r\"^###\\s(.*?)\\n\", re.MULTILINE)\n content = regx.sub(r\"<h3>\\1</h3>\\n\",content) \n\n # Convert to h4\n regx = re.compile(r\"^####\\s(.*?)\\n\", re.MULTILINE)\n content = regx.sub(r\"<h4>\\1</h4>\\n\",content) \n\n # Convert to h5\n regx = re.compile(r\"^#####\\s(.*?)\\n\", re.MULTILINE)\n content = regx.sub(r\"<h5>\\1</h5>\\n\",content) \n\n # Convert to h6\n regx = re.compile(r\"^######\\s(.*?)\\n\", re.MULTILINE) \n content = regx.sub(r\"<h6>\\1</h6>\\n\",content) \n\n\n return content",
"def convert_to_markdown(lines):\n # description = get_description(lines)\n blocks = get_blocks(lines)\n out = []\n for block in blocks:\n item = align_block(block)\n item = format_headings(item)\n item = format_lists(item)\n item = format_numb_list(item)\n out.append(item)\n return join_blocks(out)",
"def split_full_text(self, full_text, headers_list):\n\n sectioned_text = {}\n indices = {}\n no_abstr = False\n\n for i, hd in enumerate(headers_list):\n #need to replace special regex characters before matching substrings\n if '(' in hd:\n hd = hd.replace('(', '\\(')\n\n if ')' in hd:\n hd = hd.replace(')', '\\)')\n\n if '[' in hd:\n hd = hd.replace('[', '\\[')\n\n if ']' in hd:\n hd = hd.replace(']', '\\]')\n\n if '{' in hd:\n hd = hd.replace('{', '\\{')\n\n if '}' in hd:\n hd = hd.replace('}', '\\}')\n\n if '+' in hd:\n hd = hd.replace('+', '\\+')\n\n if '*' in hd:\n hd = hd.replace('*', '\\*')\n\n if ':' in hd:\n hd = hd.replace(':', '\\:')\n\n if i == 0: # meta-data has no substring-matching to do\n\n inds = [m.start() for m in re.finditer(hd, full_text)]\n #Abstract can appear in text, but isn't listed w/ headers\n #Only use first instance\n if len(inds) > 0:\n indices[hd] = inds[0]\n\n else: #if there is no abstract, use figures to remove meta-data\n fig_text = [m.start() for m in re.finditer('Figure', full_text)]\n indices[hd] = fig_text[0]\n no_abstr = True\n\n else:\n inds = [m.start() for m in re.finditer(hd, full_text)]\n #assume final instance of substring match corresponds\n #to the correct header text instance\n indices[hd] = inds[-1]\n\n\n for i, hd in enumerate(headers_list):\n\n if i == 0:\n if no_abstr == True:\n\n #get meta-data, which has no keyword matching\n sectioned_text['Section Headers'] = headers_list\n end_ind = indices[' Abstract ']\n sectioned_text['Meta-data'] = full_text[:end_ind]\n\n #indicate there is no abstract\n start_id = indices[' Abstract ']\n end_id = indices[list(indices.keys())[1]]\n sectioned_text[' Abstract '] = ''\n\n\n if no_abstr == False:\n #get meta-data, which has no keyword matching\n sectioned_text['Section Headers'] = headers_list\n end_ind = indices[' Abstract ']\n sectioned_text['Meta-data'] = full_text[:end_ind]\n\n #get abstract\n start_id = indices[' Abstract ']\n end_id = indices[list(indices.keys())[1]]\n sectioned_text[hd] = full_text[start_id : end_id]\n\n if i > 0 and i < len(headers_list)-1: #all setions but final section\n if i == 1:\n if no_abstr == True:\n start_id = indices[' Abstract ']\n end_id = indices[list(indices.keys())[i+1]]\n sectioned_text[hd] = full_text[start_id:end_id]\n\n else:\n start_id = indices[list(indices.keys())[i]]\n end_id = indices[list(indices.keys())[i+1]]\n sectioned_text[hd] = full_text[start_id:end_id]\n\n else:\n start_id = indices[list(indices.keys())[i]]\n end_id = indices[list(indices.keys())[i+1]]\n sectioned_text[hd] = full_text[start_id:end_id]\n\n if i == len(headers_list) - 1: #final header\n start_id = indices[list(indices.keys())[i]]\n sectioned_text[hd] = full_text[start_id:]\n\n return sectioned_text",
"def collate_sections(self,paper_text,section_list:List[Section],split_upto=0.2,split_bins=10):\n current_text_split = []\n prev_section = None\n curr_text = str(paper_text)\n unfound_sections = []\n some_section_not_found = False\n for index,s in enumerate(section_list):\n curr_text,section_status = self.split_and_find_section(curr_text,s.name,prev_section,split_upto=split_upto,split_bins=split_bins)\n if not section_status: # If couldn't match section add it here. \n some_section_not_found = True\n # print('\\n\\t'+s.name) \n prev_section = s \n for ss in s.subsections:\n curr_text,section_status = self.split_and_find_section(curr_text,ss.name,prev_section,split_upto=split_upto,split_bins=split_bins)\n if not section_status:\n some_section_not_found = True\n # print(\"Cannot Match For :\",ss.name)\n prev_section = ss\n # print('\\n\\t\\t'+ss.name)\n if index == len(section_list)-1:\n s.text = curr_text\n return section_list,some_section_not_found",
"def multiple_sections(): # noqa: D416",
"def get_section_choices(sections):\n ret = []\n if sections == None:\n return ret\n sections = string.splitfields(decode_html(sections), '\\n')\n for s in sections :\n s = string.strip(s)\n ret.append((s, s))\n return ret\n # if s != '':\n # yield(encode_html(s), s)",
"def _parse_markdown(self):\n renderer = MyRenderer()\n md = mistune.Markdown(renderer=renderer)\n md.render(self._markdown_text)\n self._bash_commands = renderer._bash_commands",
"def extract_features_from_args(markdown, args):\n if args.notebooks:\n markdown_l = []\n for notebook in args.notebooks:\n markdown_l.extend(generate_markdown_cells(\n load(notebook), args.pattern\n ))\n markdown += ''.join(markdown_l)\n\n if args.markdowns:\n for mark in args.markdowns:\n with open(mark, 'r') as fil:\n markdown += (\n args.pattern.format(mark)\n + fil.read()\n )\n\n blocks = split_markdown(markdown, args.pattern)\n for block in blocks:\n block['features'] = extract_features(block['code'])\n return blocks",
"def split_and_find_section(curr_text,curr_sec_name,prev_section,split_upto=0.2,split_bins=10):\n current_text_split = split_match(curr_sec_name,curr_text,split_upto=split_upto,split_bins=split_bins)\n # print(\"Found Splits,\",curr_sec_name,len(current_text_split))\n if len(current_text_split) == 0: \n # This means no splits were found \n return curr_text,False\n\n portion_before_section = current_text_split[0] \n\n if prev_section is not None:\n prev_section.text = portion_before_section\n # print(ss.name,\"added To Section \",prev_section.name,len(prev_section.text))\n portion_after_section = current_text_split[1:]\n curr_text = ''.join(portion_after_section)\n return curr_text,True",
"def _split_into_body_and_options(\n section_content: str,\n) -> Tuple[str, Optional[str], Dict[int, bool]]:\n lines = section_content.strip().splitlines()\n\n skipif_expr = None\n flag_settings = {}\n i = 0\n for line in lines:\n stripped = line.strip()\n if _OPTION_SKIPIF_RE.match(stripped):\n skipif_match = _OPTION_SKIPIF_RE.match(stripped)\n assert skipif_match is not None\n skipif_expr = skipif_match.group(1)\n i += 1\n elif _OPTION_DIRECTIVE_RE.match(stripped):\n directive_match = _OPTION_DIRECTIVE_RE.match(stripped)\n assert directive_match is not None\n option_strings = directive_match.group(1).replace(\",\", \" \").split()\n for option in option_strings:\n if (\n option[0] not in \"+-\"\n or option[1:] not in doctest.OPTIONFLAGS_BY_NAME\n ):\n raise ValueError(f\"doctest has an invalid option {option}\")\n flag = doctest.OPTIONFLAGS_BY_NAME[option[1:]]\n flag_settings[flag] = option[0] == \"+\"\n i += 1\n elif stripped == \":hide:\":\n i += 1\n else:\n break\n\n if i == len(lines):\n raise ValueError(\"no code/output\")\n\n body = \"\\n\".join(lines[i:]).lstrip()\n if not body:\n raise ValueError(\"no code/output\")\n\n if i and lines[i].strip():\n # no newline between option block and body\n raise ValueError(f\"invalid option block: {section_content!r}\")\n\n return body, skipif_expr, flag_settings",
"def text_to_parts(text: str) -> list:\n parts = []\n first_block_start, first_block_end, typee = find_first_block(text)\n parts.append(text[first_block_start : first_block_end + 1])\n if len(text) == first_block_end + 1:\n return [text]\n parts.append(text[first_block_end + 1])\n parts += text_to_parts(text[first_block_end + 2 : ])\n return parts",
"def get_partitioned_full_text(self, full_text):\n error1 = 0 #empty article\n error2 = 0 #fails length_check. problem with header extraction or full-text splitting\n error3 = 0 #no section headers, full-text remains unpartitioned\n error4 = 0 #non-numbered section headers. Text may not be fully partitioned\n error5 = 0 #error getting header text. Substrings in self.get_header_text don't match\n\n if full_text != '': #ensure that text string contains article\n\n try:\n #narrows string down to meta-info segment containing primarily section headers\n narrowed_string = self.get_header_text(full_text)\n\n if len(narrowed_string) > 2500:\n #no section headers. narrowed string gets full article\n nums = [-2]\n error3 = 1\n\n else:\n #check for header numbers\n number_pattern = '\\s\\d{1,2}\\s' #No nesting\n nums = re.findall(number_pattern, narrowed_string)\n\n if len(nums) > 1: #if there are numbered section headers\n headers_list = self.get_numbered_section_headers(full_text)\n sectioned_text = self.split_full_text(full_text, headers_list)\n\n elif nums == [-2]:\n headers_list = ['no section headers']\n sectioned_text = {'Section Headers': headers_list, 'full text': full_text}\n\n else:\n header_list = self.get_nonnumbered_section_headers(full_text)\n sectioned_text = self.split_full_text(full_text, header_list)\n error4 = 1\n\n if self.check_partition(sectioned_text, full_text) == False:\n error2 = 1\n\n except:\n sectioned_text = {'Section Headers':['error locating headers'], 'full text':full_text}\n error5 = 1\n\n else:\n error1 = 1\n sectioned_text = {'full text' : 'there is no text for this article'}\n \n keywords = self.get_keywords(sectioned_text)\n sectioned_text['keywords'] = keywords\n\n error_codes = [error1, error2, error3, error4, error5]\n sectioned_text['errors'] = error_codes\n\n return sectioned_text",
"def split(text):\n articles = re.split(\"<doc>\", text)\n del articles[0]\n return articles",
"def split_paragraphs(block):\n # Break block contents into paragraphs by blank lines.\n def gen(block):\n par = []\n for obj in block:\n if isinstance(obj, Text) and obj.empty:\n # New paragraph.\n yield par\n par = []\n else:\n par.append(obj)\n yield par\n\n # Combine paragraphs. \n def finish(pars):\n for par in pars:\n if len(par) == 0:\n continue\n elif any( isinstance(o, Text) for o in par ):\n # Paragraph contains text. Use a P element.\n yield Block(par, tag='P')\n else:\n # Doesn't contain text; don't wrap it.\n yield from par\n\n block[:] = finish(gen(block))",
"def parse(text):\n ret = Docstring()\n if not text:\n return ret\n\n # Clean according to PEP-0257\n text = inspect.cleandoc(text)\n\n # Find first title and split on its position\n match = _titles_re.search(text)\n if match:\n desc_chunk = text[: match.start()]\n meta_chunk = text[match.start():]\n else:\n desc_chunk = text\n meta_chunk = \"\"\n\n # Break description into short and long parts\n parts = desc_chunk.split(\"\\n\", 1)\n ret.short_description = parts[0] or None\n if len(parts) > 1:\n long_desc_chunk = parts[1] or \"\"\n ret.blank_after_short_description = long_desc_chunk.startswith(\"\\n\")\n ret.blank_after_long_description = long_desc_chunk.endswith(\"\\n\\n\")\n ret.long_description = long_desc_chunk.strip() or None\n\n # Split by sections determined by titles\n matches = list(_titles_re.finditer(meta_chunk))\n if not matches:\n return ret\n splits = []\n for j in range(len(matches) - 1):\n splits.append((matches[j].end(), matches[j + 1].start()))\n splits.append((matches[-1].end(), len(meta_chunk)))\n\n chunks = {}\n for j, (start, end) in enumerate(splits):\n title = matches[j].group(1)\n if title not in _valid:\n continue\n chunks[title] = meta_chunk[start:end].strip(\"\\n\")\n if not chunks:\n return ret\n\n # Add elements from each chunk\n for title, chunk in chunks.items():\n # Determine indent\n indent_match = re.search(r\"^\\s+\", chunk)\n if not indent_match:\n raise ParseError('Can\\'t infer indent from \"{}\"'.format(chunk))\n indent = indent_match.group()\n\n # Check for returns/yeilds (only one element)\n if _sections[title] in (\"returns\", \"yields\"):\n part = inspect.cleandoc(chunk)\n ret.meta.append(_build_meta(part, title))\n continue\n\n # Split based on lines which have exactly that indent\n _re = \"^\" + indent + r\"(?=\\S)\"\n c_matches = list(re.finditer(_re, chunk, flags=re.M))\n if not c_matches:\n raise ParseError('No specification for \"{}\": \"{}\"'.format(title, chunk))\n c_splits = []\n for j in range(len(c_matches) - 1):\n c_splits.append((c_matches[j].end(), c_matches[j + 1].start()))\n c_splits.append((c_matches[-1].end(), len(chunk)))\n for j, (start, end) in enumerate(c_splits):\n part = chunk[start:end].strip(\"\\n\")\n ret.meta.append(_build_meta(part, title))\n\n return ret",
"def parse_sections(soup, report, baseUrl):\n parse_sections = False # To parse section wise set it to True else full content is parsed\n overview = False\n config = False\n usecase = False\n overview_content = \"\"\n config_content = \"\"\n usecases_content = \"\"\n isFullContent = False\n full_content = \"\"\n updateImgUrl(baseUrl, soup)\n for e in soup.contents:\n if not parse_sections:\n if 'h1' == str(e.name).lower():\n isFullContent = True\n if isFullContent:\n full_content += \"\\n\" + str(e)\n else:\n content_value = e.next\n if content_value == 'Overview':\n overview = True\n if content_value == 'Configuration':\n config = True\n overview = False\n if content_value == 'Use Cases':\n usecase = True\n config = False\n if overview == True and config == False and usecase == False:\n overview_content += \"\\n\" + str(e)\n if overview == False and config == True and usecase == False:\n config_content += \"\\n\" + str(e)\n if overview == False and config == False and usecase == True:\n usecases_content += \"\\n\" + str(e)\n\n if not parse_sections:\n report[\"content\"] = convert_to_base64(full_content)\n else:\n if overview_content:\n report[\"overview\"] = convert_to_base64(overview_content)\n if config_content:\n report[\"configuration\"] = convert_to_base64(config_content)\n if usecases_content:\n report[\"use_cases\"] = convert_to_base64(usecases_content)",
"def split_tagged_text_into_chunks(text, *a, **kw):\n return split_tagged_text_into_chunks(text, *a, **kw)",
"def parse_part(self):\n parts = []\n for part in re.split(r'\\*\\*\\* ([A-Z- ]+) \\*\\*\\*', self.hand_file): # return [ 'part1', 'splitter1', 'part2',..\n parts.append(part)\n\n for i in range(0, len(parts)):\n if i == 0:\n self.part_dict['HEADER'] = parts[i]\n if i % 2 != 0: # number is odd\n self.part_dict[parts[i]] = parts[i + 1]",
"def convertSections(tabContent):\n return PAT_RST_SECTION.sub(\n lambda match: HEADING_TEMPLATE_RST.format(template.Options.HEADING_LEVELS.index(match.group(2)[0]) + 1, match.group(1)),\n tabContent)",
"def extract_blocks(lines):\n py_block = False\n block = []\n for line in lines:\n # start of py block\n if line.strip() == '```python':\n py_block = True\n if block:\n yield block, 'md'\n block = []\n\n # exiting py block\n elif py_block and line.strip() == '```':\n py_block = False\n if block:\n yield block, 'py'\n block = []\n\n else:\n block.append(line)\n\n if block:\n yield block, 'md'",
"def split_chunk(chunk):\n if not sentinel_d.get(\"repatt2\"):\n patt2 = r\"<(t(?:ag)?)\\s*([^>]*)>([^>]*)</t(?:ag)?>\"\n sentinel_d.update(repatt2=re.compile(patt2, flags=re.IGNORECASE))\n # Chunk = collections.namedtuple('Chunk', 'tag attrs text')\n if chunk.lower().startswith(\"<t\") and chunk.endswith(\"/>\"):\n chunk_split = chunk.split(None, 1) # [1][:-2]\n tag, attrs = chunk_split[0][1:], chunk_split[1][:-2]\n options_d, font_d, case = parse_tag_attrs(attrs) # , attr=text_s) #\n text = options_d.pop(text_s, \"\")\n new_attrs = gen_tag_attrs(options=options_d, font=font_d, case=case)\n chunk = \"<{tag} {new_attrs}>{text}</{tag}>\".format(\n tag=tag, new_attrs=new_attrs, text=text\n )\n matches = sentinel_d[\"repatt2\"].findall(chunk)\n result = (\n Chunk(*matches[0])\n if len(matches) == 1\n else Chunk(\"\", \"\", chunk)\n if chunk\n else ()\n )\n return result",
"def test_chunks(year, day, part_number):\n chunks = []\n chunk_index = -1\n data_file_lines(part_number).each do |line|\n if line[0] == '#'\n chunk_index += 1\n chunks[chunk_index] = [line[1..-1].strip, []]\n elsif chunk_index >= 0\n chunks[chunk_index][1] << line\n end\n end\n chunks",
"def test_with_complex_lists(self):\n\n self.check_markdown(\n '''\n - List\n\n ??? note \"Details\"\n\n - Paragraph\n\n ??? note \"Details\"\n\n 1. Paragraph\n\n Paragraph\n ''',\n '''\n <ul>\n <li>\n <p>List</p>\n <details class=\"note\">\n <summary>Details</summary>\n <ul>\n <li>\n <p>Paragraph</p>\n <details class=\"note\">\n <summary>Details</summary>\n <ol>\n <li>\n <p>Paragraph</p>\n <p>Paragraph</p>\n </li>\n </ol>\n </details>\n </li>\n </ul>\n </details>\n </li>\n </ul>\n ''',\n True\n )"
] | [
"0.6592996",
"0.62733644",
"0.60852915",
"0.6063004",
"0.5859374",
"0.5849308",
"0.5828487",
"0.5798614",
"0.57628095",
"0.5672283",
"0.56307954",
"0.5625874",
"0.55943716",
"0.5592244",
"0.55725014",
"0.55567396",
"0.55356854",
"0.5535604",
"0.552459",
"0.55119103",
"0.54861003",
"0.5485103",
"0.54607475",
"0.54582494",
"0.54530793",
"0.54410195",
"0.54407835",
"0.5416548",
"0.5360765",
"0.5359353"
] | 0.77850777 | 0 |
Validate a redirected error response. All the URL components should match the original redirect_uri, with the exception of the parameters, which should contain an 'error' and an 'error_description' field of the provided types. | def assertValidRedirect(self, response, redirect_uri,
expected_status_code, **kwargs):
self.assertEqual(expected_status_code, response.status_code)
# Split the url into parts.
location = response.headers.get('Location')
location_url = urlparse.urlparse(location)
parameters = urlparse.parse_qs(location_url[4])
# Break out the redirect uri to compare and make sure we're headed
# back to the redirect URI with the appropriate error codes.
configured_url = urlparse.urlparse(redirect_uri)
self.assertEqual(configured_url[0], location_url[0])
self.assertEqual(configured_url[1], location_url[1])
self.assertEqual(configured_url[2], location_url[2])
self.assertEqual(configured_url[3], location_url[3])
# 4 is ignored, it contains new parameters.
self.assertEqual(configured_url[5], location_url[5])
# Make sure we have the correct error response.
self.assertEqual(len(kwargs), len(parameters))
for key, value in six.iteritems(kwargs):
self.assertIn(key, parameters)
self.assertIsNotNone(parameters[key])
self.assertEqual(value, parameters[key][0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def error_response(self, error, **kwargs):\n oauthlib_error = error.oauthlib_error\n error_response = {\n 'error': oauthlib_error,\n 'url': '{0}?{1}'.format(oauthlib_error.redirect_uri, oauthlib_error.urlencoded)\n }\n error_response.update(kwargs)\n\n if isinstance(error, FatalClientError):\n redirect = False\n else:\n redirect = True\n\n return redirect, error_response",
"def test_http_error_raise_with_redirect(self):\n\n resp = self.r(\n HTTPError(http_status.HTTP_201_CREATED, redirect_url='http://google.com/')\n )\n\n self.assertIsInstance(\n resp, werkzeug.wrappers.Response\n )\n\n self.assertEqual(302, resp.status_code)\n self.assertEqual('http://google.com/', resp.location)",
"def _get_authorize_error_response(error, redirect_uri):\n params = error.get_body()\n uri = add_params_to_uri(redirect_uri, params)\n headers = [(\"Location\", uri)]\n response = flask.Response(\"\", status=302, headers=headers)\n return response",
"def _verify_redirect_uri(self, areq):\n try:\n _redirect_uri = unquote(areq[\"redirect_uri\"])\n\n part = urlparse(_redirect_uri)\n if part.fragment:\n raise URIError(\"Contains fragment\")\n\n (_base, _query) = splitquery(_redirect_uri)\n if _query:\n _query = parse_qs(_query)\n\n match = False\n for regbase, rquery in self.cdb[str(areq[\"client_id\"])][\"redirect_uris\"]:\n # The URI MUST exactly match one of the Redirection URI\n if _base != regbase:\n continue\n\n if not rquery and not _query:\n match = True\n break\n\n if not rquery or not _query:\n continue\n\n # every registered query component must exist in the\n # redirect_uri\n is_match_query = True\n for key, vals in _query.items():\n if key not in rquery:\n is_match_query = False\n break\n\n for val in vals:\n if val not in rquery[key]:\n is_match_query = False\n break\n\n if not is_match_query:\n break\n\n if not is_match_query:\n continue\n\n match = True\n break\n\n if not match:\n raise RedirectURIError(\"Doesn't match any registered uris\")\n # ignore query components that are not registered\n return None\n except Exception:\n logger.error(\"Faulty redirect_uri: %s\" % areq[\"redirect_uri\"])\n try:\n _cinfo = self.cdb[str(areq[\"client_id\"])]\n except KeyError:\n try:\n cid = areq[\"client_id\"]\n except KeyError:\n logger.error(\"No client id found\")\n raise UnknownClient(\"No client_id provided\")\n else:\n logger.info(\"Unknown client: %s\" % cid)\n raise UnknownClient(areq[\"client_id\"])\n else:\n logger.info(\"Registered redirect_uris: %s\" % sanitize(_cinfo))\n raise RedirectURIError(\"Faulty redirect_uri: %s\" % areq[\"redirect_uri\"])",
"def test_authorize_invalid_response_type(self):\n invalid_params = self.valid_params.copy()\n invalid_params['response_type'] = 'invalid_code'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Validate the error response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=invalid_params['redirect_uri'],\n error='unsupported_response_type',\n error_description=e_msg.INVALID_RESPONSE_TYPE)",
"def assertRedirects(self, response, expected_url, status_code=302,\n target_status_code=200, host=None, msg_prefix=''):\n if msg_prefix:\n msg_prefix += \": \"\n\n if hasattr(response, 'redirect_chain'):\n # The request was a followed redirect\n self.failUnless(\n len(response.redirect_chain) > 0,\n msg_prefix + \"Response didn't redirect as expected: Response\"\n \" code was %d (expected %d)\" % (response.status_code, status_code)\n )\n\n self.assertEqual(\n response.redirect_chain[0][1], status_code,\n msg_prefix + \"Initial response didn't redirect as expected:\"\n \" Response code was %d (expected %d)\" %\n (response.redirect_chain[0][1], status_code)\n )\n\n url, status_code = response.redirect_chain[-1]\n\n self.assertEqual(\n response.status_code, target_status_code,\n msg_prefix + \"Response didn't redirect as expected: Final\"\n \" Response code was %d (expected %d)\" % (response.status_code, target_status_code)\n )\n\n else:\n # Not a followed redirect\n self.assertEqual(\n response.status_code, status_code,\n msg_prefix + \"Response didn't redirect as expected: Response\"\n \" code was %d (expected %d)\" % (response.status_code, status_code)\n )\n\n url = response['Location']\n scheme, netloc, path, query, fragment = urlsplit(url)\n\n redirect_response = self.get(\n urlunsplit((scheme, netloc, path, None, None)),\n QueryDict(query),\n )\n\n # Get the redirection page, using the same client that was used\n # to obtain the original response.\n self.assertEqual(\n redirect_response.status_code, target_status_code,\n msg_prefix + \"Couldn't retrieve redirection page '%s':\"\n \" response code was %d (expected %d)\" %\n (path, redirect_response.status_code, target_status_code)\n )\n\n e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url)\n if not (e_scheme or e_netloc):\n expected_url = urlunsplit(('http', host or 'testserver', e_path, e_query, e_fragment))\n\n self.assertEqual(\n url,\n expected_url,\n msg_prefix + \"Response redirected to '%s', expected '%s'\" % (url, expected_url),\n )",
"def test_authorize_invalid_redirect_uri(self):\n invalid_params = self.valid_params.copy()\n invalid_params['redirect_uri'] = 'not_a_valid_uri'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Assert that this is NOT a redirect\n self.assertEqual(400, response.status_code)\n self.assertIsNotNone(response.json)\n self.assertEqual('invalid_request', response.json['error'])\n self.assertEqual(e_msg.INVALID_REDIRECT_URI,\n response.json['error_description'])",
"def _assert_redirect_url(self, response, expected_redirect_url):\n response_dict = json.loads(response.content.decode('utf-8'))\n assert 'redirect_url' in response_dict, (\n \"Response JSON unexpectedly does not have redirect_url: {!r}\".format(\n response_dict\n )\n )\n assert response_dict['redirect_url'] == expected_redirect_url",
"def expect_oauth_redirect(self, redirect_re='http://x/y\\?code=(.+)',\n args=None):\n full_args = {\n 'client_id': '123',\n 'redirect_uri': 'http://x/y',\n }\n if args:\n full_args.update(args)\n\n resp = self.get_response('/dialog/oauth', args=full_args)\n self.assertEquals('302 Moved Temporarily', resp.status)\n location = resp.headers['Location']\n match = re.match(redirect_re, location)\n assert match, location\n return urllib.unquote(match.group(1))",
"def test_invalid_response_request(self, mock_post):\n self._mock_response(mock_post, valid=False)\n\n random_state = six.text_type(uuid.uuid4())\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **self.valid_params)\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n error='access_denied',\n error_description=e_msg.OPEN_ID_TOKEN_INVALID)",
"def validate_redirect_uri(value):\n sch, netloc, path, par, query, fra = urlparse(value)\n if not (sch and netloc):\n raise InvalidRedirectURIError()\n if sch != 'https':\n if ':' in netloc:\n netloc, port = netloc.split(':', 1)\n if not (netloc in ('localhost', '127.0.0.1') and sch == 'http'):\n raise InsecureTransportError()",
"def verify_auth_request(self, *args, **kwargs):\n if len(args) == 1:\n url = args[0]\n qs = get_query_string(url)\n response_type = qs.pop('response_type', None)\n client_id = qs.pop('client_id', None)\n redirect_uri = qs.pop('redirect_uri', None)\n scope = qs.pop('scope', None)\n state = qs.pop('state', None)\n\n elif len(args) == 2:\n response_type = args[0]\n client_id = args[1]\n\n redirect_uri = kwargs.pop('redirect_uri', None)\n scope = kwargs.pop('scope', None)\n state = kwargs.pop('state', None)\n\n if not client_id: \n return self.invalid_request(\n error_description = 'client_id is required'\n , redirect_uri = redirect_uri\n , state = state\n )\n\n if not response_type:\n return self.invalid_request(\n error_description = 'response_type is required'\n , redirect_uri = redirect_uri\n , state = state\n )\n\n is_client_id_valid = self.verify_client_id(client_id)\n\n if not is_client_id_valid:\n return self.unauthorized_client(\n redirect_uri = redirect_uri\n , state = state\n )\n\n\n if redirect_uri == None:\n redirect_uri = self.get_redirect_uri(client_id)\n\n is_redirect_uri_valid = self.verify_redirect_uri(client_id,\n redirect_uri)\n\n if not is_redirect_uri_valid:\n return self.invalid_request()\n\n is_scope_valid = self.verify_scope(scope)\n\n if not is_scope_valid:\n return self.invalid_scope(\n redirect_uri = redirect_uri\n , state = state\n )\n\n is_authenticated = self.authenticate_user()\n\n if not is_authenticated:\n return self.access_denied(\n redirect_uri = redirect_uri\n , state = state\n )\n\n if response_type == 'code':\n # We are doing 4.1.1\n code = self.generate_authorization_code()\n\n # Save information to be used to validate later requests\n self.save_auth_code(\n client_id\n , code\n , scope\n , redirect_uri\n )\n\n new_qs = {'code': code}\n\n if state:\n new_qs['state'] = state\n\n return {\n 'redirect_uri': clean_url(redirect_uri, new_qs,\n should_force_ssl=self.should_force_ssl\n )\n }\n\n elif response_type == 'token':\n # We are doing 4.2.1\n token = self.generate_access_token()\n\n self.save_auth_token(token, None)\n\n # don't issue a refresh token in this mode\n\n #TODO: If scope is different than requested, return it\n\n return {'access_token': token }\n else:\n return self.unsupported_response_type(\n redirect_uri = redirect_uri\n , state = state\n )",
"def validate_response(self, response):\n pass",
"def assertIsRedirect(self, response, path=None):\n self.assertIn(response.status_code, range(300, 400), str(response) + ' is not a redirect')\n if path:\n self.assertEqual(response['location'], path)",
"def accessibility_error_redirect_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"accessibility_error_redirect_url\")",
"def accessibility_error_redirect_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"accessibility_error_redirect_url\")",
"def error_handler(error):\n if isinstance(error, SocialAuthBaseException):\n return redirect('/socialerror')",
"def redirects_to(response, url):\n is_redirect = response.status_code == 302\n parsed_url = urlparse(response.get('Location'))\n is_url = parsed_url.path == url\n\n return is_redirect and is_url",
"def _redirectErrors(self, other):\n other.getErrorRaisedEventManager().add_listener(self._errorRedirection)",
"def http_error_302(self, req, fp, code, msg, headers):\n\n if 'Location' in headers and ':80/?ver=' in headers['Location']:\n raise QQImagePath(headers['Location'])",
"def handle_error_response(response_body):\n try:\n error_components = []\n error_data = json.loads(response_body)\n\n error_components.append(\"Error code {}\".format(error_data[\"error\"]))\n if \"error_description\" in error_data:\n error_components.append(\": {}\".format(error_data[\"error_description\"]))\n if \"error_uri\" in error_data:\n error_components.append(\" - {}\".format(error_data[\"error_uri\"]))\n error_details = \"\".join(error_components)\n # If no details could be extracted, use the response data.\n except (KeyError, ValueError):\n error_details = response_body\n\n raise exceptions.OAuthError(error_details, response_body)",
"def handle_error(e: ODPAPIError):\n\n if e.status_code == 401:\n flash('Your session has expired. Please log in again to continue.', category='error')\n return redirect(url_for('hydra.logout'))\n\n if e.status_code == 403:\n flash('You do not have permission to access that page.', category='warning')\n return redirect(request.referrer or url_for('home.index'))\n\n if e.status_code == 503:\n flash('Service unavailable. Please try again in a few minutes.', category='error')\n return\n\n try:\n detail = e.error_detail['detail']\n if e.status_code == 422 and isinstance(detail, list):\n # duplicate validation errors are returned when multiple\n # server-side dependencies validate the same input; we\n # eliminate duplicates by packing them into a dict\n errors = {\n error['loc'][1]: error['msg']\n for error in detail\n }\n for field, msg in errors.items():\n flash(f'{field}: {msg}', category='error')\n else:\n flash(detail, category='error')\n\n except (TypeError, KeyError, IndexError):\n flash(e.error_detail, category='error')",
"def test_authorize_no_response_type(self):\n invalid_params = self.valid_params.copy()\n del invalid_params['response_type']\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Validate the error response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=invalid_params['redirect_uri'],\n error='unsupported_response_type',\n error_description=e_msg.NO_RESPONSE_TYPE)",
"def get_errors(self, response: response_domain_model.Response, question_code: str) -> Sequence['ValidationError']:\n ...",
"def test_valid_response_request(self, mock_post):\n self._mock_response(mock_post, valid=True)\n\n random_state = six.text_type(uuid.uuid4())\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **self.valid_params)\n\n # Try to pull the code out of the response\n location = response.headers.get('Location')\n location_url = urlparse.urlparse(location)\n parameters = urlparse.parse_qs(location_url[4])\n\n with base.HybridSessionManager():\n token = auth_api.authorization_code_get(parameters['code'])\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n state=token.state,\n code=token.code)",
"def validate_response(self, response: requests.Response) -> None:\n if 400 <= response.status_code < 500:\n msg = (\n f\"{response.status_code} Client Error: \"\n f\"{response.reason} for path: {self.path}. \"\n f\"Request payload: {response.request.body}\"\n )\n raise FatalAPIError(msg)\n\n elif 500 <= response.status_code < 600:\n msg = (\n f\"{response.status_code} Server Error: \"\n f\"{response.reason} for path: {self.path}\"\n )\n raise RetriableAPIError(msg)",
"def assert_has_valid_error(self, response, expected_code):\r\n assert 'error' in response\r\n assert len(response) == 1\r\n \r\n error = response['error']\r\n assert 'code' in error\r\n assert error['code'] == expected_code\r\n assert 'title' in error\r\n assert isinstance(error['title'], str)\r\n assert 'message' in error\r\n assert isinstance(error['message'], str)",
"def assert_redirect_to_provider_looks_correct(self, response):\r\n self.assertEqual(302, response.status_code)\r\n self.assertTrue(response.has_header('Location'))",
"def error_wrapper(error, errorClass):\n http_status = 0\n if error.check(TwistedWebError):\n xml_payload = error.value.response\n if error.value.status:\n http_status = int(error.value.status)\n else:\n error.raiseException()\n if http_status >= 400:\n if not xml_payload:\n error.raiseException()\n try:\n fallback_error = errorClass(\n xml_payload, error.value.status, str(error.value),\n error.value.response)\n except (ParseError, AWSResponseParseError):\n error_message = http.RESPONSES.get(http_status)\n fallback_error = TwistedWebError(\n http_status, error_message, error.value.response)\n raise fallback_error\n elif 200 <= http_status < 300:\n return str(error.value)\n else:\n error.raiseException()",
"def validate_response(response: json):\n if \"error\" in response:\n print(\"ERROR: Request returned error\")\n print_request_response(response)\n exit(1)"
] | [
"0.7122628",
"0.65730166",
"0.62523395",
"0.6000711",
"0.59799373",
"0.58929646",
"0.58686644",
"0.58485216",
"0.5775962",
"0.5717465",
"0.5591549",
"0.5570456",
"0.5543242",
"0.5461019",
"0.543455",
"0.543455",
"0.5410617",
"0.5404725",
"0.54023296",
"0.5393356",
"0.5376724",
"0.53765213",
"0.5356883",
"0.5350822",
"0.5339529",
"0.5334623",
"0.53289837",
"0.53200305",
"0.53073674",
"0.52958703"
] | 0.6597871 | 1 |
This test ensures that the authorize request against the oauth endpoint succeeds with expected values. | def test_valid_authorize_request(self):
random_state = six.text_type(uuid.uuid4())
# Simple GET with various parameters
response = self.get_json(path='/openid/authorize',
expect_errors=True,
state=random_state,
**self.valid_params)
# Assert that this is a redirect response
self.assertEqual(303, response.status_code)
# Assert that the redirect request goes to launchpad.
location = response.headers.get('Location')
location_url = urlparse.urlparse(location)
parameters = urlparse.parse_qs(location_url[4])
# Check the URL
conf_openid_url = CONF.oauth.openid_url
self.assertEqual(conf_openid_url, location[0:len(conf_openid_url)])
# Check OAuth Registration parameters
self.assertIn('fullname', parameters['openid.sreg.required'][0])
self.assertIn('email', parameters['openid.sreg.required'][0])
# Check redirect URL
redirect = parameters['openid.return_to'][0]
redirect_url = urlparse.urlparse(redirect)
redirect_params = urlparse.parse_qs(redirect_url[4])
self.assertIn('/openid/authorize_return', redirect)
self.assertEqual(random_state,
redirect_params['state'][0])
self.assertEqual(self.valid_params['redirect_uri'],
redirect_params['sb_redirect_uri'][0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_oauth(self):\n oauth_headers = self._get_oauth_headers(self.user)\n self.client.logout()\n response = self.client.get(self.path(), **oauth_headers)\n assert response.status_code == 200\n body = {'user_id': 'staff', 'action': 'allow'}\n response = self.client.post(self.path(), body, format='json', **oauth_headers)\n assert response.status_code == 200",
"def test_validate_authorization_request_required_parameters(self):\n\n request = self.make_request()\n scopes, credentials = self.auth.validate_authorization_request(request)\n\n self.assertListEqual(scopes, request.scope.split())\n assert credentials['client_id'] == request.client_id\n assert credentials['redirect_uri'] == request.redirect_uri\n assert credentials['response_type'] == request.response_type\n assert credentials['state'] == request.state\n\n self.validator.validate_client_id\\\n .assert_called_once_with(request.client_id, request)\n self.validator.validate_redirect_uri\\\n .assert_called_once_with(request.client_id, request.redirect_uri, request)",
"def test_read_o_auth_authorize_token(self):\n pass",
"def test_create_o_auth_authorize_token(self):\n pass",
"def test_valid_access_request(self):\n\n # Generate a valid auth token\n with base.HybridSessionManager():\n authorization_code = auth_api.authorization_code_save({\n 'user_id': 2,\n 'state': 'test_state',\n 'code': 'test_valid_code'\n })\n\n content_type = 'application/x-www-form-urlencoded'\n # POST with content: application/x-www-form-urlencoded\n response = self.app.post('/v1/openid/token',\n params={\n 'code': authorization_code.code,\n 'grant_type': 'authorization_code'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a successful response\n self.assertEqual(200, response.status_code)\n\n # Assert that the token came back in the response\n token = response.json\n self.assertIsNotNone(token['access_token'])\n self.assertIsNotNone(token['expires_in'])\n self.assertIsNotNone(token['id_token'])\n self.assertIsNotNone(token['refresh_token'])\n self.assertIsNotNone(token['token_type'])\n self.assertEqual('Bearer', token['token_type'])\n\n # Assert that the access token is in the database\n with base.HybridSessionManager():\n access_token = \\\n token_api.access_token_get_by_token(token['access_token'])\n self.assertIsNotNone(access_token)\n\n # Assert that system configured values is owned by the correct user.\n self.assertEqual(2, access_token.user_id)\n self.assertEqual(token['id_token'], access_token.user_id)\n self.assertEqual(token['expires_in'], CONF.oauth.access_token_ttl)\n self.assertEqual(token['expires_in'], access_token.expires_in)\n self.assertEqual(token['access_token'], access_token.access_token)\n\n # Assert that the refresh token is in the database\n with base.HybridSessionManager():\n refresh_token = \\\n refresh_tokens.refresh_token_get_by_token(\n token['refresh_token'])\n\n self.assertIsNotNone(refresh_token)\n\n # Assert that system configured values is owned by the correct user.\n self.assertEqual(2, refresh_token.user_id)\n self.assertEqual(CONF.oauth.refresh_token_ttl,\n refresh_token.expires_in)\n self.assertEqual(token['refresh_token'], refresh_token.refresh_token)\n\n # Assert that the authorization code is no longer in the database.\n with base.HybridSessionManager():\n none_code = \\\n auth_api.authorization_code_get(authorization_code.code)\n self.assertIsNone(none_code)",
"def test_list_o_auth_authorize_token(self):\n pass",
"def test_authorize_token_url(self):\n test_OauthObject = UAOauth2Client(**self.test_client)\n test_url, test_state = test_OauthObject.authorize_token_url(self.test_call_callback_url)\n expected_url = 'https://www.mapmyfitness.com/v7.1/oauth2/uacf/authorize/?redirect_uri=http%3A%2F%2F127.0.0.1%3A8000&response_type=code&client_id=test_client&state='\n self.assertEqual(test_url, '{}{}'.format(expected_url,test_state))",
"def test_create_o_auth_client_authorization(self):\n pass",
"def test_read_o_auth_client_authorization(self):\n pass",
"def test_authorize(self):\n account = self._get_hosting_account()\n service = account.service\n\n self.assertFalse(service.is_authorized())\n\n service.authorize('myuser', 'abc123', None)\n\n self.assertIn('password', account.data)\n self.assertNotEqual(account.data['password'], 'abc123')\n self.assertTrue(service.is_authorized())",
"def testAuthorizationClientAuthInParams(self):\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'refresh_token': self._VALID_REFRESH_TOKEN,\n 'client_id': self._VALID_CLIENT.id,\n 'client_secret': self._VALID_CLIENT.secret\n })\n newAuthToken = 'tokenWithAuthInParameter'\n self._TOKEN_FACTORY.expectTokenRequest(newAuthToken, self._TOKEN_RESOURCE.authTokenLifeTime,\n self._VALID_CLIENT, self._VALID_SCOPE)\n result = self._TOKEN_RESOURCE.render_POST(request)\n self._TOKEN_FACTORY.assertAllTokensRequested()\n self.assertValidTokenResponse(\n request, result, newAuthToken,\n self._TOKEN_RESOURCE.authTokenLifeTime, expectedScope=self._VALID_SCOPE)",
"def test_patch_o_auth_authorize_token(self):\n pass",
"def test_replace_o_auth_client_authorization(self):\n pass",
"def test_replace_o_auth_authorize_token(self):\n pass",
"def test_access_token_get(self):\n client = oauth.Client(self.consumer, None)\n resp, content = client.request(self._uri('request_token'), \"GET\")\n\n self.assertEqual(int(resp['status']), 200)",
"def test_list_o_auth_client_authorization(self):\n pass",
"def test_access_token_post(self):\n client = oauth.Client(self.consumer, None)\n resp, content = client.request(self._uri('request_token'), \"POST\")\n\n self.assertEqual(int(resp['status']), 200)\n\n res = dict(parse_qsl(content))\n self.assertTrue(b'oauth_token' in res)\n self.assertTrue(b'oauth_token_secret' in res)",
"def _assertParams(self) -> None:\n params = parse_qs(self.http_client.request.call_args[1][\"data\"].decode(\"utf-8\"))\n self.assertEqual(params[\"token\"], [\"mockAccessToken\"])\n self.assertEqual(params[\"client_id\"], [CLIENT_ID])\n self.assertEqual(params[\"client_secret\"], [CLIENT_SECRET])",
"def test_auth_required(self):\n\n res = self.client.get(SERVICES_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_create_o_auth_access_token(self):\n pass",
"def test_patch_o_auth_client_authorization(self):\n pass",
"async def test_token_request_succeeds(hass: HomeAssistant) -> None:\n flow = config_flow.EcobeeFlowHandler()\n flow.hass = hass\n flow.hass.data[DATA_ECOBEE_CONFIG] = {}\n\n with patch(\"homeassistant.components.ecobee.config_flow.Ecobee\") as mock_ecobee:\n mock_ecobee = mock_ecobee.return_value\n mock_ecobee.request_tokens.return_value = True\n mock_ecobee.api_key = \"test-api-key\"\n mock_ecobee.refresh_token = \"test-token\"\n\n flow._ecobee = mock_ecobee\n\n result = await flow.async_step_authorize(user_input={})\n\n assert result[\"type\"] == data_entry_flow.FlowResultType.CREATE_ENTRY\n assert result[\"title\"] == DOMAIN\n assert result[\"data\"] == {\n CONF_API_KEY: \"test-api-key\",\n CONF_REFRESH_TOKEN: \"test-token\",\n }",
"def test_auth_required(self):\n res = self.client.get(RECIPE_URL)\n self.assertEqual(res.status_code,status.HTTP_401_UNAUTHORIZED)",
"def test_from_request_is_case_insensitive_checking_for_auth(self):\n url = \"http://sp.example.com/\"\n\n params = {\n 'oauth_version': \"1.0\",\n 'oauth_nonce': \"4572616e48616d6d65724c61686176\",\n 'oauth_timestamp': \"137131200\",\n 'oauth_consumer_key': \"0685bd9184jfhq22\",\n 'oauth_signature_method': \"HMAC-SHA1\",\n 'oauth_token': \"ad180jjd733klru7\",\n 'oauth_signature': \"wOJIO9A2W5mFwDgiDvZbTSMK%2FPY%3D\",\n }\n\n req = oauth.Request(\"GET\", url, params)\n headers = req.to_header()\n\n # Munge the headers\n headers['authorization'] = headers['Authorization']\n del headers['Authorization'] \n\n # Test from the headers\n req = oauth.Request.from_request(\"GET\", url, headers)\n self.assertEqual(req.method, \"GET\")\n self.assertEqual(req.url, url)\n self.assertEqual(params, req.copy())",
"def test_get_oauth2_discovery(self):\n response = self.client.get(reverse('oauth_authorization_server'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"userinfo_endpoint\")",
"def test_apis_wo_auth(self):\n\n # Order list API\n url = reverse('orders-list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n # Order summary API\n url = reverse('order-summary-list')\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n # Order create API\n url = reverse('orders-list')\n response = self.client.post(url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n # Shares list/summary API\n url = reverse('shares-list', args=['summary'])\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n url = reverse('shares-list', args=['all'])\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)",
"def test_authorization(self):\n res = self.get(url=\"/products/1/pricehistory\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)\n res = self.get(url=\"/products/1/pricehistory\", role=\"user\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)",
"def check_oauth(self):\n resp = dict(self.__httprequest.POST.dict())\n orderedresp = OrderedDict(sorted(resp.items(), key=lambda t: t[0]))\n query_string = urllib.urlencode(orderedresp)\n oauth_headers = dict(signature.collect_parameters(query_string, exclude_oauth_signature=False))\n sig = oauth_headers.pop('oauth_signature')\n consumer_secret = self.get_oauthsecret_for_key(orderedresp.get('oauth_consumer_key'))\n\n oauthrequest = Oauthrequest()\n oauthrequest.params = oauth_headers.items()\n oauthrequest.uri = unicode(urllib.unquote(self.__httprequest.build_absolute_uri()))\n oauthrequest.http_method = unicode('POST')\n oauthrequest.signature = sig\n if signature.verify_hmac_sha1(request=oauthrequest, client_secret=unicode(consumer_secret)):\n return True\n return False",
"def test_authorize_no_client(self):\n invalid_params = self.valid_params.copy()\n del invalid_params['client_id']\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Validate the error response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=invalid_params['redirect_uri'],\n error='invalid_client',\n error_description=e_msg.NO_CLIENT_ID)",
"def test_authorization_one_call(self, mock_init, mock_get_token):\n creds = credentials.Credentials('file')\n # On real init we would have had access_token set to None\n creds.access_token = None\n\n auth = creds.authorization\n self.assertEqual('Bearer access_token1', auth)\n mock_get_token.assert_called_once_with(creds)"
] | [
"0.74779457",
"0.74457616",
"0.72292477",
"0.7201043",
"0.7180546",
"0.7083249",
"0.7080737",
"0.701164",
"0.70015174",
"0.6941109",
"0.69249433",
"0.68833077",
"0.67708033",
"0.6765852",
"0.6744",
"0.67292756",
"0.66937137",
"0.6668326",
"0.6645099",
"0.6577298",
"0.65717685",
"0.65608704",
"0.6470163",
"0.6466975",
"0.64472216",
"0.64369804",
"0.6413357",
"0.63765454",
"0.6371686",
"0.636632"
] | 0.8130674 | 0 |
Assert that an invalid response_type redirects back to the redirect_uri and provides the expected error response. | def test_authorize_invalid_response_type(self):
invalid_params = self.valid_params.copy()
invalid_params['response_type'] = 'invalid_code'
# Simple GET with invalid code parameters
random_state = six.text_type(uuid.uuid4())
response = self.get_json(path='/openid/authorize',
expect_errors=True,
state=random_state,
**invalid_params)
# Validate the error response
self.assertValidRedirect(response=response,
expected_status_code=302,
redirect_uri=invalid_params['redirect_uri'],
error='unsupported_response_type',
error_description=e_msg.INVALID_RESPONSE_TYPE) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_http_error_raise_with_redirect(self):\n\n resp = self.r(\n HTTPError(http_status.HTTP_201_CREATED, redirect_url='http://google.com/')\n )\n\n self.assertIsInstance(\n resp, werkzeug.wrappers.Response\n )\n\n self.assertEqual(302, resp.status_code)\n self.assertEqual('http://google.com/', resp.location)",
"def test_invalid_response_request(self, mock_post):\n self._mock_response(mock_post, valid=False)\n\n random_state = six.text_type(uuid.uuid4())\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **self.valid_params)\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n error='access_denied',\n error_description=e_msg.OPEN_ID_TOKEN_INVALID)",
"def test_authorize_no_response_type(self):\n invalid_params = self.valid_params.copy()\n del invalid_params['response_type']\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Validate the error response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=invalid_params['redirect_uri'],\n error='unsupported_response_type',\n error_description=e_msg.NO_RESPONSE_TYPE)",
"def test_authorize_invalid_redirect_uri(self):\n invalid_params = self.valid_params.copy()\n invalid_params['redirect_uri'] = 'not_a_valid_uri'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Assert that this is NOT a redirect\n self.assertEqual(400, response.status_code)\n self.assertIsNotNone(response.json)\n self.assertEqual('invalid_request', response.json['error'])\n self.assertEqual(e_msg.INVALID_REDIRECT_URI,\n response.json['error_description'])",
"def assertValidRedirect(self, response, redirect_uri,\n expected_status_code, **kwargs):\n\n self.assertEqual(expected_status_code, response.status_code)\n # Split the url into parts.\n location = response.headers.get('Location')\n location_url = urlparse.urlparse(location)\n parameters = urlparse.parse_qs(location_url[4])\n\n # Break out the redirect uri to compare and make sure we're headed\n # back to the redirect URI with the appropriate error codes.\n configured_url = urlparse.urlparse(redirect_uri)\n self.assertEqual(configured_url[0], location_url[0])\n self.assertEqual(configured_url[1], location_url[1])\n self.assertEqual(configured_url[2], location_url[2])\n self.assertEqual(configured_url[3], location_url[3])\n # 4 is ignored, it contains new parameters.\n self.assertEqual(configured_url[5], location_url[5])\n\n # Make sure we have the correct error response.\n self.assertEqual(len(kwargs), len(parameters))\n for key, value in six.iteritems(kwargs):\n self.assertIn(key, parameters)\n self.assertIsNotNone(parameters[key])\n self.assertEqual(value, parameters[key][0])",
"def assertIsRedirect(self, response, path=None):\n self.assertIn(response.status_code, range(300, 400), str(response) + ' is not a redirect')\n if path:\n self.assertEqual(response['location'], path)",
"def assertRedirects(self, response, expected_url, status_code=302,\n target_status_code=200, host=None, msg_prefix=''):\n if msg_prefix:\n msg_prefix += \": \"\n\n if hasattr(response, 'redirect_chain'):\n # The request was a followed redirect\n self.failUnless(\n len(response.redirect_chain) > 0,\n msg_prefix + \"Response didn't redirect as expected: Response\"\n \" code was %d (expected %d)\" % (response.status_code, status_code)\n )\n\n self.assertEqual(\n response.redirect_chain[0][1], status_code,\n msg_prefix + \"Initial response didn't redirect as expected:\"\n \" Response code was %d (expected %d)\" %\n (response.redirect_chain[0][1], status_code)\n )\n\n url, status_code = response.redirect_chain[-1]\n\n self.assertEqual(\n response.status_code, target_status_code,\n msg_prefix + \"Response didn't redirect as expected: Final\"\n \" Response code was %d (expected %d)\" % (response.status_code, target_status_code)\n )\n\n else:\n # Not a followed redirect\n self.assertEqual(\n response.status_code, status_code,\n msg_prefix + \"Response didn't redirect as expected: Response\"\n \" code was %d (expected %d)\" % (response.status_code, status_code)\n )\n\n url = response['Location']\n scheme, netloc, path, query, fragment = urlsplit(url)\n\n redirect_response = self.get(\n urlunsplit((scheme, netloc, path, None, None)),\n QueryDict(query),\n )\n\n # Get the redirection page, using the same client that was used\n # to obtain the original response.\n self.assertEqual(\n redirect_response.status_code, target_status_code,\n msg_prefix + \"Couldn't retrieve redirection page '%s':\"\n \" response code was %d (expected %d)\" %\n (path, redirect_response.status_code, target_status_code)\n )\n\n e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url)\n if not (e_scheme or e_netloc):\n expected_url = urlunsplit(('http', host or 'testserver', e_path, e_query, e_fragment))\n\n self.assertEqual(\n url,\n expected_url,\n msg_prefix + \"Response redirected to '%s', expected '%s'\" % (url, expected_url),\n )",
"def _assert_redirect_url(self, response, expected_redirect_url):\n response_dict = json.loads(response.content.decode('utf-8'))\n assert 'redirect_url' in response_dict, (\n \"Response JSON unexpectedly does not have redirect_url: {!r}\".format(\n response_dict\n )\n )\n assert response_dict['redirect_url'] == expected_redirect_url",
"def test_authentication_error(self):\n resp = self.client.post(\n reverse(self.provider.id + \"_callback\"),\n data={\"error\": \"misc\", \"state\": \"testingstate123\"},\n )\n assert reverse(\"apple_finish_callback\") in resp.url\n # Follow the redirect\n resp = self.client.get(resp.url)\n\n self.assertTemplateUsed(\n resp,\n \"socialaccount/authentication_error.%s\"\n % getattr(settings, \"ACCOUNT_TEMPLATE_EXTENSION\", \"html\"),\n )",
"def _get_authorize_error_response(error, redirect_uri):\n params = error.get_body()\n uri = add_params_to_uri(redirect_uri, params)\n headers = [(\"Location\", uri)]\n response = flask.Response(\"\", status=302, headers=headers)\n return response",
"def assert_exception_redirect_looks_correct(self, auth_entry=None):\r\n exception_middleware = middleware.ExceptionMiddleware()\r\n request, _ = self.get_request_and_strategy(auth_entry=auth_entry)\r\n response = exception_middleware.process_exception(\r\n request, exceptions.AuthCanceled(request.social_strategy.backend))\r\n location = response.get('Location')\r\n\r\n self.assertEqual(302, response.status_code)\r\n self.assertIn('canceled', location)\r\n self.assertIn(self.backend_name, location)\r\n\r\n if auth_entry:\r\n # Custom redirection to form.\r\n self.assertTrue(location.startswith('/' + auth_entry))\r\n else:\r\n # Stock framework redirection to root.\r\n self.assertTrue(location.startswith('/?'))",
"def assert_redirect_to_provider_looks_correct(self, response):\r\n self.assertEqual(302, response.status_code)\r\n self.assertTrue(response.has_header('Location'))",
"def test_invalid_redirect_no_email(self, mock_post):\n self._mock_response(mock_post, valid=True)\n\n random_state = six.text_type(uuid.uuid4())\n\n invalid_params = self.valid_params.copy()\n del invalid_params['openid.sreg.email']\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n error='invalid_request',\n error_description=e_msg.INVALID_NO_EMAIL)",
"def test_authorize_no_redirect_uri(self):\n invalid_params = self.valid_params.copy()\n del invalid_params['redirect_uri']\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Assert that this is NOT a redirect\n self.assertEqual(400, response.status_code)\n self.assertIsNotNone(response.json)\n self.assertEqual('invalid_request', response.json['error'])\n self.assertEqual(e_msg.NO_REDIRECT_URI,\n response.json['error_description'])",
"def test_invalid_redirect_no_name(self, mock_post):\n self._mock_response(mock_post, valid=True)\n\n random_state = six.text_type(uuid.uuid4())\n\n invalid_params = self.valid_params.copy()\n del invalid_params['openid.sreg.fullname']\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n error='invalid_request',\n error_description=e_msg.INVALID_NO_NAME)",
"def test_valid_response_request(self, mock_post):\n self._mock_response(mock_post, valid=True)\n\n random_state = six.text_type(uuid.uuid4())\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **self.valid_params)\n\n # Try to pull the code out of the response\n location = response.headers.get('Location')\n location_url = urlparse.urlparse(location)\n parameters = urlparse.parse_qs(location_url[4])\n\n with base.HybridSessionManager():\n token = auth_api.authorization_code_get(parameters['code'])\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n state=token.state,\n code=token.code)",
"def error_response(self, error, **kwargs):\n oauthlib_error = error.oauthlib_error\n error_response = {\n 'error': oauthlib_error,\n 'url': '{0}?{1}'.format(oauthlib_error.redirect_uri, oauthlib_error.urlencoded)\n }\n error_response.update(kwargs)\n\n if isinstance(error, FatalClientError):\n redirect = False\n else:\n redirect = True\n\n return redirect, error_response",
"def test_invalid_grant_type(self):\n\n # Generate a valid auth token\n with base.HybridSessionManager():\n authorization_code = auth_api.authorization_code_save({\n 'user_id': 2,\n 'state': 'test_state',\n 'code': 'test_valid_code',\n 'expires_in': 300\n })\n\n content_type = 'application/x-www-form-urlencoded'\n # POST with content: application/x-www-form-urlencoded\n response = self.app.post('/v1/openid/token',\n params={\n 'code': authorization_code.code,\n 'grant_type': 'invalid_grant_type'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a successful response\n self.assertEqual(400, response.status_code)\n self.assertIsNotNone(response.json)\n self.assertEqual('unsupported_grant_type', response.json['error'])\n self.assertEqual(e_msg.INVALID_TOKEN_GRANT_TYPE,\n response.json['error_description'])",
"def assert_has_valid_error(self, response, expected_code):\r\n assert 'error' in response\r\n assert len(response) == 1\r\n \r\n error = response['error']\r\n assert 'code' in error\r\n assert error['code'] == expected_code\r\n assert 'title' in error\r\n assert isinstance(error['title'], str)\r\n assert 'message' in error\r\n assert isinstance(error['message'], str)",
"def test_client_submit_response_incorrect(self, mock_urlopen):\n mock_resp = mock.Mock()\n mock_resp.read.return_value = json.dumps(\n {'success': False, 'error-codes': ['ERROR']})\n mock_urlopen.return_value = mock_resp\n result = client.submit('a', 'a', 'a')\n self.assertFalse(result.is_valid)\n self.assertEqual(result.error_codes, ['ERROR'])",
"def test_redirect(self):\n self.app.app.preprocess_request()\n\n resp = self.r(\n ({}, # data\n 302, # status code\n None, # headers\n 'http://google.com/', # redirect_uri\n )\n )\n\n self.assertIsInstance(\n resp,\n werkzeug.wrappers.Response,\n )\n self.assertEqual(302, resp.status_code)\n self.assertEqual('http://google.com/', resp.location)",
"def assert_redirect_to_register_looks_correct(self, response):\r\n self.assertEqual(302, response.status_code)\r\n self.assertEqual('/' + pipeline.AUTH_ENTRY_REGISTER, response.get('Location'))",
"def test_http_error_raised(self):\n\n self.app.app.preprocess_request()\n\n err = HTTPError(http_status.HTTP_404_NOT_FOUND)\n\n resp = self.r(err)\n\n self.assertIn(\n err.to_data()['message_short'],\n resp[0].decode(),\n )\n self.assertEqual(\n http_status.HTTP_404_NOT_FOUND,\n resp[1],\n )",
"def assertHttpBadRequest(self, response):\r\n self.assertEqual(response.status_code, 400)",
"def test_403_response(self):\n mock = Mock()\n mock.status_code = 403\n\n with self.assertRaises(AuthError):\n check_response(mock)",
"def test_redirect(self):\n resp = flask.make_response('')\n\n self.assertIsInstance(\n self.r(resp),\n werkzeug.wrappers.Response,\n )",
"def test_redirect(self):\n resp = flask.make_response('')\n\n self.assertIsInstance(\n self.r(resp),\n werkzeug.wrappers.Response,\n )",
"def test_errors(self):\n rc = self.app.get('/this_should_not_exist', follow_redirects=True)\n assert b'404 error :(' in rc.data",
"def testInvalidContentType(self):\n request = MockRequest('POST', 'token', arguments={\n 'grant_type': 'refresh_token',\n 'refresh_token': self._VALID_REFRESH_TOKEN\n })\n request.setRequestHeader('Content-Type', 'application/not-x-www-form-urlencoded')\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result,\n MalformedRequestError('The Content-Type must be \"application/x-www-form-urlencoded\"'),\n msg='Expected the token resource to reject a request with an invalid content type.')",
"def assertRedirects(self, response, url):\n self.assert302(response)\n\n location = response.headers.get('Location')\n if url.startswith('http'):\n location = self.get_url(location)\n self.assertEqual(location, url)"
] | [
"0.75191337",
"0.72961164",
"0.7171664",
"0.70651674",
"0.6875958",
"0.67515904",
"0.6627203",
"0.6615773",
"0.65987235",
"0.6589537",
"0.6552112",
"0.6536879",
"0.6508044",
"0.6480439",
"0.63691235",
"0.6355215",
"0.6341384",
"0.6288093",
"0.62804973",
"0.62268054",
"0.6181956",
"0.6175572",
"0.61726326",
"0.61515975",
"0.6151157",
"0.61404616",
"0.61404616",
"0.6134338",
"0.6083359",
"0.6080618"
] | 0.7701483 | 0 |
Assert that an nonexistent response_type redirects back to the redirect_uri and provides the expected error response. | def test_authorize_no_response_type(self):
invalid_params = self.valid_params.copy()
del invalid_params['response_type']
# Simple GET with invalid code parameters
random_state = six.text_type(uuid.uuid4())
response = self.get_json(path='/openid/authorize',
expect_errors=True,
state=random_state,
**invalid_params)
# Validate the error response
self.assertValidRedirect(response=response,
expected_status_code=302,
redirect_uri=invalid_params['redirect_uri'],
error='unsupported_response_type',
error_description=e_msg.NO_RESPONSE_TYPE) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_authorize_invalid_response_type(self):\n invalid_params = self.valid_params.copy()\n invalid_params['response_type'] = 'invalid_code'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Validate the error response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=invalid_params['redirect_uri'],\n error='unsupported_response_type',\n error_description=e_msg.INVALID_RESPONSE_TYPE)",
"def test_http_error_raise_with_redirect(self):\n\n resp = self.r(\n HTTPError(http_status.HTTP_201_CREATED, redirect_url='http://google.com/')\n )\n\n self.assertIsInstance(\n resp, werkzeug.wrappers.Response\n )\n\n self.assertEqual(302, resp.status_code)\n self.assertEqual('http://google.com/', resp.location)",
"def test_invalid_response_request(self, mock_post):\n self._mock_response(mock_post, valid=False)\n\n random_state = six.text_type(uuid.uuid4())\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **self.valid_params)\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n error='access_denied',\n error_description=e_msg.OPEN_ID_TOKEN_INVALID)",
"def assertIsRedirect(self, response, path=None):\n self.assertIn(response.status_code, range(300, 400), str(response) + ' is not a redirect')\n if path:\n self.assertEqual(response['location'], path)",
"def test_authorize_invalid_redirect_uri(self):\n invalid_params = self.valid_params.copy()\n invalid_params['redirect_uri'] = 'not_a_valid_uri'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Assert that this is NOT a redirect\n self.assertEqual(400, response.status_code)\n self.assertIsNotNone(response.json)\n self.assertEqual('invalid_request', response.json['error'])\n self.assertEqual(e_msg.INVALID_REDIRECT_URI,\n response.json['error_description'])",
"def test_authorize_no_redirect_uri(self):\n invalid_params = self.valid_params.copy()\n del invalid_params['redirect_uri']\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Assert that this is NOT a redirect\n self.assertEqual(400, response.status_code)\n self.assertIsNotNone(response.json)\n self.assertEqual('invalid_request', response.json['error'])\n self.assertEqual(e_msg.NO_REDIRECT_URI,\n response.json['error_description'])",
"def _assert_redirect_url(self, response, expected_redirect_url):\n response_dict = json.loads(response.content.decode('utf-8'))\n assert 'redirect_url' in response_dict, (\n \"Response JSON unexpectedly does not have redirect_url: {!r}\".format(\n response_dict\n )\n )\n assert response_dict['redirect_url'] == expected_redirect_url",
"def assert_redirect_to_provider_looks_correct(self, response):\r\n self.assertEqual(302, response.status_code)\r\n self.assertTrue(response.has_header('Location'))",
"def test_authentication_error(self):\n resp = self.client.post(\n reverse(self.provider.id + \"_callback\"),\n data={\"error\": \"misc\", \"state\": \"testingstate123\"},\n )\n assert reverse(\"apple_finish_callback\") in resp.url\n # Follow the redirect\n resp = self.client.get(resp.url)\n\n self.assertTemplateUsed(\n resp,\n \"socialaccount/authentication_error.%s\"\n % getattr(settings, \"ACCOUNT_TEMPLATE_EXTENSION\", \"html\"),\n )",
"def test_invalid_redirect_no_name(self, mock_post):\n self._mock_response(mock_post, valid=True)\n\n random_state = six.text_type(uuid.uuid4())\n\n invalid_params = self.valid_params.copy()\n del invalid_params['openid.sreg.fullname']\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n error='invalid_request',\n error_description=e_msg.INVALID_NO_NAME)",
"def test_invalid_redirect_no_email(self, mock_post):\n self._mock_response(mock_post, valid=True)\n\n random_state = six.text_type(uuid.uuid4())\n\n invalid_params = self.valid_params.copy()\n del invalid_params['openid.sreg.email']\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n error='invalid_request',\n error_description=e_msg.INVALID_NO_EMAIL)",
"def assertRedirects(self, response, expected_url, status_code=302,\n target_status_code=200, host=None, msg_prefix=''):\n if msg_prefix:\n msg_prefix += \": \"\n\n if hasattr(response, 'redirect_chain'):\n # The request was a followed redirect\n self.failUnless(\n len(response.redirect_chain) > 0,\n msg_prefix + \"Response didn't redirect as expected: Response\"\n \" code was %d (expected %d)\" % (response.status_code, status_code)\n )\n\n self.assertEqual(\n response.redirect_chain[0][1], status_code,\n msg_prefix + \"Initial response didn't redirect as expected:\"\n \" Response code was %d (expected %d)\" %\n (response.redirect_chain[0][1], status_code)\n )\n\n url, status_code = response.redirect_chain[-1]\n\n self.assertEqual(\n response.status_code, target_status_code,\n msg_prefix + \"Response didn't redirect as expected: Final\"\n \" Response code was %d (expected %d)\" % (response.status_code, target_status_code)\n )\n\n else:\n # Not a followed redirect\n self.assertEqual(\n response.status_code, status_code,\n msg_prefix + \"Response didn't redirect as expected: Response\"\n \" code was %d (expected %d)\" % (response.status_code, status_code)\n )\n\n url = response['Location']\n scheme, netloc, path, query, fragment = urlsplit(url)\n\n redirect_response = self.get(\n urlunsplit((scheme, netloc, path, None, None)),\n QueryDict(query),\n )\n\n # Get the redirection page, using the same client that was used\n # to obtain the original response.\n self.assertEqual(\n redirect_response.status_code, target_status_code,\n msg_prefix + \"Couldn't retrieve redirection page '%s':\"\n \" response code was %d (expected %d)\" %\n (path, redirect_response.status_code, target_status_code)\n )\n\n e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url)\n if not (e_scheme or e_netloc):\n expected_url = urlunsplit(('http', host or 'testserver', e_path, e_query, e_fragment))\n\n self.assertEqual(\n url,\n expected_url,\n msg_prefix + \"Response redirected to '%s', expected '%s'\" % (url, expected_url),\n )",
"def assert_exception_redirect_looks_correct(self, auth_entry=None):\r\n exception_middleware = middleware.ExceptionMiddleware()\r\n request, _ = self.get_request_and_strategy(auth_entry=auth_entry)\r\n response = exception_middleware.process_exception(\r\n request, exceptions.AuthCanceled(request.social_strategy.backend))\r\n location = response.get('Location')\r\n\r\n self.assertEqual(302, response.status_code)\r\n self.assertIn('canceled', location)\r\n self.assertIn(self.backend_name, location)\r\n\r\n if auth_entry:\r\n # Custom redirection to form.\r\n self.assertTrue(location.startswith('/' + auth_entry))\r\n else:\r\n # Stock framework redirection to root.\r\n self.assertTrue(location.startswith('/?'))",
"def assertValidRedirect(self, response, redirect_uri,\n expected_status_code, **kwargs):\n\n self.assertEqual(expected_status_code, response.status_code)\n # Split the url into parts.\n location = response.headers.get('Location')\n location_url = urlparse.urlparse(location)\n parameters = urlparse.parse_qs(location_url[4])\n\n # Break out the redirect uri to compare and make sure we're headed\n # back to the redirect URI with the appropriate error codes.\n configured_url = urlparse.urlparse(redirect_uri)\n self.assertEqual(configured_url[0], location_url[0])\n self.assertEqual(configured_url[1], location_url[1])\n self.assertEqual(configured_url[2], location_url[2])\n self.assertEqual(configured_url[3], location_url[3])\n # 4 is ignored, it contains new parameters.\n self.assertEqual(configured_url[5], location_url[5])\n\n # Make sure we have the correct error response.\n self.assertEqual(len(kwargs), len(parameters))\n for key, value in six.iteritems(kwargs):\n self.assertIn(key, parameters)\n self.assertIsNotNone(parameters[key])\n self.assertEqual(value, parameters[key][0])",
"def test_errors(self):\n rc = self.app.get('/this_should_not_exist', follow_redirects=True)\n assert b'404 error :(' in rc.data",
"def _get_authorize_error_response(error, redirect_uri):\n params = error.get_body()\n uri = add_params_to_uri(redirect_uri, params)\n headers = [(\"Location\", uri)]\n response = flask.Response(\"\", status=302, headers=headers)\n return response",
"def assert404(self, response):\n self.assertEqual(response.status_code, 404)",
"def test_redirect(self):\n resp = flask.make_response('')\n\n self.assertIsInstance(\n self.r(resp),\n werkzeug.wrappers.Response,\n )",
"def test_redirect(self):\n resp = flask.make_response('')\n\n self.assertIsInstance(\n self.r(resp),\n werkzeug.wrappers.Response,\n )",
"def assert404(self, response):\n self.assertTrue(response.status_code == 404)",
"def test_http_error_raised(self):\n\n self.app.app.preprocess_request()\n\n err = HTTPError(http_status.HTTP_404_NOT_FOUND)\n\n resp = self.r(err)\n\n self.assertIn(\n err.to_data()['message_short'],\n resp[0].decode(),\n )\n self.assertEqual(\n http_status.HTTP_404_NOT_FOUND,\n resp[1],\n )",
"def error_response(self, error, **kwargs):\n oauthlib_error = error.oauthlib_error\n error_response = {\n 'error': oauthlib_error,\n 'url': '{0}?{1}'.format(oauthlib_error.redirect_uri, oauthlib_error.urlencoded)\n }\n error_response.update(kwargs)\n\n if isinstance(error, FatalClientError):\n redirect = False\n else:\n redirect = True\n\n return redirect, error_response",
"def test_errors(self):\n response = self.client.get(reverse('users:resend_confirmation_email'))\n self.assertEqual(response.status_code, 404)\n\n response = self.client.get(\n reverse(\n 'users:resend_confirmation_email',\n ),\n data={\n 'redirect_to': reverse('users:login'),\n 'username': 'wrong_username',\n }\n )\n self.assertEqual(response.status_code, 404)",
"def assert_redirect_to_register_looks_correct(self, response):\r\n self.assertEqual(302, response.status_code)\r\n self.assertEqual('/' + pipeline.AUTH_ENTRY_REGISTER, response.get('Location'))",
"def assertRedirects(self, response, url):\n server_name = self.app.config.get('SERVER_NAME') or 'localhost'\n redirect_url = response.headers.get('Location', None)\n target_url = urljoin('http://{}'.format(server_name), url)\n self.assertEqual(redirect_url, target_url)",
"def test_client_submit_response_incorrect(self, mock_urlopen):\n mock_resp = mock.Mock()\n mock_resp.read.return_value = json.dumps(\n {'success': False, 'error-codes': ['ERROR']})\n mock_urlopen.return_value = mock_resp\n result = client.submit('a', 'a', 'a')\n self.assertFalse(result.is_valid)\n self.assertEqual(result.error_codes, ['ERROR'])",
"def test_403_response(self):\n mock = Mock()\n mock.status_code = 403\n\n with self.assertRaises(AuthError):\n check_response(mock)",
"def test_redirect(self):\n self.app.app.preprocess_request()\n\n resp = self.r(\n ({}, # data\n 302, # status code\n None, # headers\n 'http://google.com/', # redirect_uri\n )\n )\n\n self.assertIsInstance(\n resp,\n werkzeug.wrappers.Response,\n )\n self.assertEqual(302, resp.status_code)\n self.assertEqual('http://google.com/', resp.location)",
"def test_valid_response_request(self, mock_post):\n self._mock_response(mock_post, valid=True)\n\n random_state = six.text_type(uuid.uuid4())\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **self.valid_params)\n\n # Try to pull the code out of the response\n location = response.headers.get('Location')\n location_url = urlparse.urlparse(location)\n parameters = urlparse.parse_qs(location_url[4])\n\n with base.HybridSessionManager():\n token = auth_api.authorization_code_get(parameters['code'])\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n state=token.state,\n code=token.code)",
"def assertRedirects(self, response, url):\n self.assert302(response)\n\n location = response.headers.get('Location')\n if url.startswith('http'):\n location = self.get_url(location)\n self.assertEqual(location, url)"
] | [
"0.7420597",
"0.7164122",
"0.6910888",
"0.6648324",
"0.6642587",
"0.6474151",
"0.64672065",
"0.6405829",
"0.6382179",
"0.6363759",
"0.63207835",
"0.6319008",
"0.62895614",
"0.6289121",
"0.62787527",
"0.6247881",
"0.6192791",
"0.61116356",
"0.61116356",
"0.60881925",
"0.6087155",
"0.6077399",
"0.6062357",
"0.6032242",
"0.6012859",
"0.60040593",
"0.6002961",
"0.5993456",
"0.59884995",
"0.59796256"
] | 0.73475444 | 1 |
Assert that an invalid scope redirects back to the redirect_uri and provides the expected error response. | def test_authorize_invalid_scope(self):
invalid_params = self.valid_params.copy()
invalid_params['scope'] = 'invalid_scope'
# Simple GET with invalid code parameters
random_state = six.text_type(uuid.uuid4())
response = self.get_json(path='/openid/authorize',
expect_errors=True,
state=random_state,
**invalid_params)
# Validate the error response
self.assertValidRedirect(response=response,
expected_status_code=302,
redirect_uri=invalid_params['redirect_uri'],
error='invalid_scope',
error_description=e_msg.INVALID_SCOPE) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_authorize_invalid_redirect_uri(self):\n invalid_params = self.valid_params.copy()\n invalid_params['redirect_uri'] = 'not_a_valid_uri'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Assert that this is NOT a redirect\n self.assertEqual(400, response.status_code)\n self.assertIsNotNone(response.json)\n self.assertEqual('invalid_request', response.json['error'])\n self.assertEqual(e_msg.INVALID_REDIRECT_URI,\n response.json['error_description'])",
"def test_http_error_raise_with_redirect(self):\n\n resp = self.r(\n HTTPError(http_status.HTTP_201_CREATED, redirect_url='http://google.com/')\n )\n\n self.assertIsInstance(\n resp, werkzeug.wrappers.Response\n )\n\n self.assertEqual(302, resp.status_code)\n self.assertEqual('http://google.com/', resp.location)",
"def test_invalid_response_request(self, mock_post):\n self._mock_response(mock_post, valid=False)\n\n random_state = six.text_type(uuid.uuid4())\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **self.valid_params)\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n error='access_denied',\n error_description=e_msg.OPEN_ID_TOKEN_INVALID)",
"def test_authorize_no_scope(self):\n invalid_params = self.valid_params.copy()\n del invalid_params['scope']\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Validate the error response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=invalid_params['redirect_uri'],\n error='invalid_scope',\n error_description=e_msg.NO_SCOPE)",
"def test_invalid_redirect_no_name(self, mock_post):\n self._mock_response(mock_post, valid=True)\n\n random_state = six.text_type(uuid.uuid4())\n\n invalid_params = self.valid_params.copy()\n del invalid_params['openid.sreg.fullname']\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n error='invalid_request',\n error_description=e_msg.INVALID_NO_NAME)",
"def test_authorize_no_redirect_uri(self):\n invalid_params = self.valid_params.copy()\n del invalid_params['redirect_uri']\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Assert that this is NOT a redirect\n self.assertEqual(400, response.status_code)\n self.assertIsNotNone(response.json)\n self.assertEqual('invalid_request', response.json['error'])\n self.assertEqual(e_msg.NO_REDIRECT_URI,\n response.json['error_description'])",
"def test_authorize_invalid_response_type(self):\n invalid_params = self.valid_params.copy()\n invalid_params['response_type'] = 'invalid_code'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Validate the error response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=invalid_params['redirect_uri'],\n error='unsupported_response_type',\n error_description=e_msg.INVALID_RESPONSE_TYPE)",
"def test_invalid_redirect_no_email(self, mock_post):\n self._mock_response(mock_post, valid=True)\n\n random_state = six.text_type(uuid.uuid4())\n\n invalid_params = self.valid_params.copy()\n del invalid_params['openid.sreg.email']\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n error='invalid_request',\n error_description=e_msg.INVALID_NO_EMAIL)",
"def _get_authorize_error_response(error, redirect_uri):\n params = error.get_body()\n uri = add_params_to_uri(redirect_uri, params)\n headers = [(\"Location\", uri)]\n response = flask.Response(\"\", status=302, headers=headers)\n return response",
"def test_authorize_invalid_client(self):\n invalid_params = self.valid_params.copy()\n invalid_params['client_id'] = 'invalid_client'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Validate the error response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=invalid_params['redirect_uri'],\n error='unauthorized_client',\n error_description=e_msg.INVALID_CLIENT_ID)",
"def assertValidRedirect(self, response, redirect_uri,\n expected_status_code, **kwargs):\n\n self.assertEqual(expected_status_code, response.status_code)\n # Split the url into parts.\n location = response.headers.get('Location')\n location_url = urlparse.urlparse(location)\n parameters = urlparse.parse_qs(location_url[4])\n\n # Break out the redirect uri to compare and make sure we're headed\n # back to the redirect URI with the appropriate error codes.\n configured_url = urlparse.urlparse(redirect_uri)\n self.assertEqual(configured_url[0], location_url[0])\n self.assertEqual(configured_url[1], location_url[1])\n self.assertEqual(configured_url[2], location_url[2])\n self.assertEqual(configured_url[3], location_url[3])\n # 4 is ignored, it contains new parameters.\n self.assertEqual(configured_url[5], location_url[5])\n\n # Make sure we have the correct error response.\n self.assertEqual(len(kwargs), len(parameters))\n for key, value in six.iteritems(kwargs):\n self.assertIn(key, parameters)\n self.assertIsNotNone(parameters[key])\n self.assertEqual(value, parameters[key][0])",
"def assert_exception_redirect_looks_correct(self, auth_entry=None):\r\n exception_middleware = middleware.ExceptionMiddleware()\r\n request, _ = self.get_request_and_strategy(auth_entry=auth_entry)\r\n response = exception_middleware.process_exception(\r\n request, exceptions.AuthCanceled(request.social_strategy.backend))\r\n location = response.get('Location')\r\n\r\n self.assertEqual(302, response.status_code)\r\n self.assertIn('canceled', location)\r\n self.assertIn(self.backend_name, location)\r\n\r\n if auth_entry:\r\n # Custom redirection to form.\r\n self.assertTrue(location.startswith('/' + auth_entry))\r\n else:\r\n # Stock framework redirection to root.\r\n self.assertTrue(location.startswith('/?'))",
"def test_authentication_error(self):\n resp = self.client.post(\n reverse(self.provider.id + \"_callback\"),\n data={\"error\": \"misc\", \"state\": \"testingstate123\"},\n )\n assert reverse(\"apple_finish_callback\") in resp.url\n # Follow the redirect\n resp = self.client.get(resp.url)\n\n self.assertTemplateUsed(\n resp,\n \"socialaccount/authentication_error.%s\"\n % getattr(settings, \"ACCOUNT_TEMPLATE_EXTENSION\", \"html\"),\n )",
"def test_errors(self):\n response = self.client.get(reverse('users:resend_confirmation_email'))\n self.assertEqual(response.status_code, 404)\n\n response = self.client.get(\n reverse(\n 'users:resend_confirmation_email',\n ),\n data={\n 'redirect_to': reverse('users:login'),\n 'username': 'wrong_username',\n }\n )\n self.assertEqual(response.status_code, 404)",
"def assert_redirect_to_provider_looks_correct(self, response):\r\n self.assertEqual(302, response.status_code)\r\n self.assertTrue(response.has_header('Location'))",
"def test_invalid_return_url(self):\r\n self.attempt_login(403, return_to=\"http://apps.cs50.edx.or\")",
"def test_valid_response_request(self, mock_post):\n self._mock_response(mock_post, valid=True)\n\n random_state = six.text_type(uuid.uuid4())\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **self.valid_params)\n\n # Try to pull the code out of the response\n location = response.headers.get('Location')\n location_url = urlparse.urlparse(location)\n parameters = urlparse.parse_qs(location_url[4])\n\n with base.HybridSessionManager():\n token = auth_api.authorization_code_get(parameters['code'])\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n state=token.state,\n code=token.code)",
"def test_http_error_raised(self):\n\n self.app.app.preprocess_request()\n\n err = HTTPError(http_status.HTTP_404_NOT_FOUND)\n\n resp = self.r(err)\n\n self.assertIn(\n err.to_data()['message_short'],\n resp[0].decode(),\n )\n self.assertEqual(\n http_status.HTTP_404_NOT_FOUND,\n resp[1],\n )",
"def test_redirect(self):\n self.app.app.preprocess_request()\n\n resp = self.r(\n ({}, # data\n 302, # status code\n None, # headers\n 'http://google.com/', # redirect_uri\n )\n )\n\n self.assertIsInstance(\n resp,\n werkzeug.wrappers.Response,\n )\n self.assertEqual(302, resp.status_code)\n self.assertEqual('http://google.com/', resp.location)",
"def test_authorize_no_response_type(self):\n invalid_params = self.valid_params.copy()\n del invalid_params['response_type']\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Validate the error response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=invalid_params['redirect_uri'],\n error='unsupported_response_type',\n error_description=e_msg.NO_RESPONSE_TYPE)",
"def test_social_auth_exception(self):\n self._setup_provider_response_with_body(200, json.dumps(\"false\"))\n response = self.client.post(self.url, self.data())\n self._assert_access_token_error(response, \"The provided access_token is not valid.\", \"tpa-invalid-access-token\")\n self._verify_user_existence(user_exists=False, social_link_exists=False)",
"def test_errors(self):\n rc = self.app.get('/this_should_not_exist', follow_redirects=True)\n assert b'404 error :(' in rc.data",
"def test_for_bad_request_errors(self):\n # Invalid token:\n response = self.client.get(\n reverse(\n 'users:recover_password',\n kwargs={\n 'token': 'invalid_token',\n },\n ),\n follow=True,\n )\n\n self.assertEqual(response.status_code, 400)",
"def test_authorize_no_client(self):\n invalid_params = self.valid_params.copy()\n del invalid_params['client_id']\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Validate the error response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=invalid_params['redirect_uri'],\n error='invalid_client',\n error_description=e_msg.NO_CLIENT_ID)",
"def assertIsRedirect(self, response, path=None):\n self.assertIn(response.status_code, range(300, 400), str(response) + ' is not a redirect')\n if path:\n self.assertEqual(response['location'], path)",
"def assertRedirects(self, response, expected_url, status_code=302,\n target_status_code=200, host=None, msg_prefix=''):\n if msg_prefix:\n msg_prefix += \": \"\n\n if hasattr(response, 'redirect_chain'):\n # The request was a followed redirect\n self.failUnless(\n len(response.redirect_chain) > 0,\n msg_prefix + \"Response didn't redirect as expected: Response\"\n \" code was %d (expected %d)\" % (response.status_code, status_code)\n )\n\n self.assertEqual(\n response.redirect_chain[0][1], status_code,\n msg_prefix + \"Initial response didn't redirect as expected:\"\n \" Response code was %d (expected %d)\" %\n (response.redirect_chain[0][1], status_code)\n )\n\n url, status_code = response.redirect_chain[-1]\n\n self.assertEqual(\n response.status_code, target_status_code,\n msg_prefix + \"Response didn't redirect as expected: Final\"\n \" Response code was %d (expected %d)\" % (response.status_code, target_status_code)\n )\n\n else:\n # Not a followed redirect\n self.assertEqual(\n response.status_code, status_code,\n msg_prefix + \"Response didn't redirect as expected: Response\"\n \" code was %d (expected %d)\" % (response.status_code, status_code)\n )\n\n url = response['Location']\n scheme, netloc, path, query, fragment = urlsplit(url)\n\n redirect_response = self.get(\n urlunsplit((scheme, netloc, path, None, None)),\n QueryDict(query),\n )\n\n # Get the redirection page, using the same client that was used\n # to obtain the original response.\n self.assertEqual(\n redirect_response.status_code, target_status_code,\n msg_prefix + \"Couldn't retrieve redirection page '%s':\"\n \" response code was %d (expected %d)\" %\n (path, redirect_response.status_code, target_status_code)\n )\n\n e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url)\n if not (e_scheme or e_netloc):\n expected_url = urlunsplit(('http', host or 'testserver', e_path, e_query, e_fragment))\n\n self.assertEqual(\n url,\n expected_url,\n msg_prefix + \"Response redirected to '%s', expected '%s'\" % (url, expected_url),\n )",
"def test_validate_callback_invalid_status(self):\n with patch('requests.get') as mock:\n mock.return_value.raise_for_status.side_effect = requests.exceptions.HTTPError\n with self.assertRaises(InvalidProxyCallback):\n ProxyGrantingTicket.objects.validate_callback('http://www.example.com/', 'https://www.example.org/',\n self.pgtid, self.pgtiou)",
"def test_valid_authorize_request(self):\n\n random_state = six.text_type(uuid.uuid4())\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **self.valid_params)\n\n # Assert that this is a redirect response\n self.assertEqual(303, response.status_code)\n\n # Assert that the redirect request goes to launchpad.\n location = response.headers.get('Location')\n location_url = urlparse.urlparse(location)\n parameters = urlparse.parse_qs(location_url[4])\n\n # Check the URL\n conf_openid_url = CONF.oauth.openid_url\n self.assertEqual(conf_openid_url, location[0:len(conf_openid_url)])\n\n # Check OAuth Registration parameters\n self.assertIn('fullname', parameters['openid.sreg.required'][0])\n self.assertIn('email', parameters['openid.sreg.required'][0])\n\n # Check redirect URL\n redirect = parameters['openid.return_to'][0]\n redirect_url = urlparse.urlparse(redirect)\n redirect_params = urlparse.parse_qs(redirect_url[4])\n\n self.assertIn('/openid/authorize_return', redirect)\n self.assertEqual(random_state,\n redirect_params['state'][0])\n self.assertEqual(self.valid_params['redirect_uri'],\n redirect_params['sb_redirect_uri'][0])",
"def assertHttpBadRequest(self, response):\r\n self.assertEqual(response.status_code, 400)",
"def test_invalid_access_token(self):\n\n content_type = 'application/x-www-form-urlencoded'\n # POST with content: application/x-www-form-urlencoded\n response = self.app.post('/v1/openid/token',\n params={\n 'code': 'invalid_access_token',\n 'grant_type': 'invalid_grant_type'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a successful response\n self.assertEqual(400, response.status_code)\n self.assertIsNotNone(response.json)\n self.assertEqual('unsupported_grant_type', response.json['error'])\n self.assertEqual(e_msg.INVALID_TOKEN_GRANT_TYPE,\n response.json['error_description'])"
] | [
"0.75373936",
"0.7251968",
"0.7147685",
"0.7066093",
"0.6928313",
"0.6903567",
"0.66213906",
"0.65763414",
"0.65235853",
"0.6513123",
"0.6497357",
"0.626261",
"0.61775184",
"0.61602324",
"0.61488956",
"0.6142485",
"0.6129963",
"0.6106142",
"0.60907656",
"0.5985967",
"0.5980295",
"0.5972227",
"0.5946921",
"0.59225553",
"0.5910761",
"0.59027326",
"0.5871673",
"0.5866611",
"0.5856889",
"0.58515024"
] | 0.7770297 | 0 |
Assert that a nonexistent scope redirects back to the redirect_uri and provides the expected error response. | def test_authorize_no_scope(self):
invalid_params = self.valid_params.copy()
del invalid_params['scope']
# Simple GET with invalid code parameters
random_state = six.text_type(uuid.uuid4())
response = self.get_json(path='/openid/authorize',
expect_errors=True,
state=random_state,
**invalid_params)
# Validate the error response
self.assertValidRedirect(response=response,
expected_status_code=302,
redirect_uri=invalid_params['redirect_uri'],
error='invalid_scope',
error_description=e_msg.NO_SCOPE) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_authorize_invalid_scope(self):\n invalid_params = self.valid_params.copy()\n invalid_params['scope'] = 'invalid_scope'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Validate the error response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=invalid_params['redirect_uri'],\n error='invalid_scope',\n error_description=e_msg.INVALID_SCOPE)",
"def test_authorize_invalid_redirect_uri(self):\n invalid_params = self.valid_params.copy()\n invalid_params['redirect_uri'] = 'not_a_valid_uri'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Assert that this is NOT a redirect\n self.assertEqual(400, response.status_code)\n self.assertIsNotNone(response.json)\n self.assertEqual('invalid_request', response.json['error'])\n self.assertEqual(e_msg.INVALID_REDIRECT_URI,\n response.json['error_description'])",
"def test_http_error_raise_with_redirect(self):\n\n resp = self.r(\n HTTPError(http_status.HTTP_201_CREATED, redirect_url='http://google.com/')\n )\n\n self.assertIsInstance(\n resp, werkzeug.wrappers.Response\n )\n\n self.assertEqual(302, resp.status_code)\n self.assertEqual('http://google.com/', resp.location)",
"def test_authorize_no_redirect_uri(self):\n invalid_params = self.valid_params.copy()\n del invalid_params['redirect_uri']\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Assert that this is NOT a redirect\n self.assertEqual(400, response.status_code)\n self.assertIsNotNone(response.json)\n self.assertEqual('invalid_request', response.json['error'])\n self.assertEqual(e_msg.NO_REDIRECT_URI,\n response.json['error_description'])",
"def test_invalid_redirect_no_name(self, mock_post):\n self._mock_response(mock_post, valid=True)\n\n random_state = six.text_type(uuid.uuid4())\n\n invalid_params = self.valid_params.copy()\n del invalid_params['openid.sreg.fullname']\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n error='invalid_request',\n error_description=e_msg.INVALID_NO_NAME)",
"def test_invalid_response_request(self, mock_post):\n self._mock_response(mock_post, valid=False)\n\n random_state = six.text_type(uuid.uuid4())\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **self.valid_params)\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n error='access_denied',\n error_description=e_msg.OPEN_ID_TOKEN_INVALID)",
"def test_invalid_redirect_no_email(self, mock_post):\n self._mock_response(mock_post, valid=True)\n\n random_state = six.text_type(uuid.uuid4())\n\n invalid_params = self.valid_params.copy()\n del invalid_params['openid.sreg.email']\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n error='invalid_request',\n error_description=e_msg.INVALID_NO_EMAIL)",
"def test_errors(self):\n rc = self.app.get('/this_should_not_exist', follow_redirects=True)\n assert b'404 error :(' in rc.data",
"def test_errors(self):\n response = self.client.get(reverse('users:resend_confirmation_email'))\n self.assertEqual(response.status_code, 404)\n\n response = self.client.get(\n reverse(\n 'users:resend_confirmation_email',\n ),\n data={\n 'redirect_to': reverse('users:login'),\n 'username': 'wrong_username',\n }\n )\n self.assertEqual(response.status_code, 404)",
"def test_anonymous_required_failure(self):\n rv = self.client.get('/required', follow_redirects=True)\n self.assertNotEqual(b'required', rv.data)",
"def _get_authorize_error_response(error, redirect_uri):\n params = error.get_body()\n uri = add_params_to_uri(redirect_uri, params)\n headers = [(\"Location\", uri)]\n response = flask.Response(\"\", status=302, headers=headers)\n return response",
"def test_http_error_raised(self):\n\n self.app.app.preprocess_request()\n\n err = HTTPError(http_status.HTTP_404_NOT_FOUND)\n\n resp = self.r(err)\n\n self.assertIn(\n err.to_data()['message_short'],\n resp[0].decode(),\n )\n self.assertEqual(\n http_status.HTTP_404_NOT_FOUND,\n resp[1],\n )",
"def test_authorize_invalid_client(self):\n invalid_params = self.valid_params.copy()\n invalid_params['client_id'] = 'invalid_client'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Validate the error response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=invalid_params['redirect_uri'],\n error='unauthorized_client',\n error_description=e_msg.INVALID_CLIENT_ID)",
"def test_invalid_return_url(self):\r\n self.attempt_login(403, return_to=\"http://apps.cs50.edx.or\")",
"def test_authorize_invalid_response_type(self):\n invalid_params = self.valid_params.copy()\n invalid_params['response_type'] = 'invalid_code'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Validate the error response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=invalid_params['redirect_uri'],\n error='unsupported_response_type',\n error_description=e_msg.INVALID_RESPONSE_TYPE)",
"def test_authorize_no_client(self):\n invalid_params = self.valid_params.copy()\n del invalid_params['client_id']\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Validate the error response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=invalid_params['redirect_uri'],\n error='invalid_client',\n error_description=e_msg.NO_CLIENT_ID)",
"def assert_redirect_to_provider_looks_correct(self, response):\r\n self.assertEqual(302, response.status_code)\r\n self.assertTrue(response.has_header('Location'))",
"def assert_exception_redirect_looks_correct(self, auth_entry=None):\r\n exception_middleware = middleware.ExceptionMiddleware()\r\n request, _ = self.get_request_and_strategy(auth_entry=auth_entry)\r\n response = exception_middleware.process_exception(\r\n request, exceptions.AuthCanceled(request.social_strategy.backend))\r\n location = response.get('Location')\r\n\r\n self.assertEqual(302, response.status_code)\r\n self.assertIn('canceled', location)\r\n self.assertIn(self.backend_name, location)\r\n\r\n if auth_entry:\r\n # Custom redirection to form.\r\n self.assertTrue(location.startswith('/' + auth_entry))\r\n else:\r\n # Stock framework redirection to root.\r\n self.assertTrue(location.startswith('/?'))",
"def test_redirect(self):\n self.app.app.preprocess_request()\n\n resp = self.r(\n ({}, # data\n 302, # status code\n None, # headers\n 'http://google.com/', # redirect_uri\n )\n )\n\n self.assertIsInstance(\n resp,\n werkzeug.wrappers.Response,\n )\n self.assertEqual(302, resp.status_code)\n self.assertEqual('http://google.com/', resp.location)",
"def test_authentication_error(self):\n resp = self.client.post(\n reverse(self.provider.id + \"_callback\"),\n data={\"error\": \"misc\", \"state\": \"testingstate123\"},\n )\n assert reverse(\"apple_finish_callback\") in resp.url\n # Follow the redirect\n resp = self.client.get(resp.url)\n\n self.assertTemplateUsed(\n resp,\n \"socialaccount/authentication_error.%s\"\n % getattr(settings, \"ACCOUNT_TEMPLATE_EXTENSION\", \"html\"),\n )",
"def bad_callback(_request, _uri, headers):\n return (404, headers, 'NOT AN ASSERTION')",
"def test_authorize_no_response_type(self):\n invalid_params = self.valid_params.copy()\n del invalid_params['response_type']\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Validate the error response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=invalid_params['redirect_uri'],\n error='unsupported_response_type',\n error_description=e_msg.NO_RESPONSE_TYPE)",
"def test_make_request_error(self):\n response = Helper.make_request(self.url_404)\n self.assertEqual(response.status_code, 404)",
"def test_http_error_raised(self):\n resp = self.r(HTTPError(http_status.HTTP_404_NOT_FOUND))\n\n msg = HTTPError.error_msgs[http_status.HTTP_404_NOT_FOUND]\n\n self.assertEqual(\n (\n {\n 'code': http_status.HTTP_404_NOT_FOUND,\n 'referrer': None,\n 'message_short': msg['message_short'],\n 'message_long': msg['message_long'],\n },\n http_status.HTTP_404_NOT_FOUND,\n ),\n (json.loads(resp[0]), http_status.HTTP_404_NOT_FOUND, ),\n )",
"def assertValidRedirect(self, response, redirect_uri,\n expected_status_code, **kwargs):\n\n self.assertEqual(expected_status_code, response.status_code)\n # Split the url into parts.\n location = response.headers.get('Location')\n location_url = urlparse.urlparse(location)\n parameters = urlparse.parse_qs(location_url[4])\n\n # Break out the redirect uri to compare and make sure we're headed\n # back to the redirect URI with the appropriate error codes.\n configured_url = urlparse.urlparse(redirect_uri)\n self.assertEqual(configured_url[0], location_url[0])\n self.assertEqual(configured_url[1], location_url[1])\n self.assertEqual(configured_url[2], location_url[2])\n self.assertEqual(configured_url[3], location_url[3])\n # 4 is ignored, it contains new parameters.\n self.assertEqual(configured_url[5], location_url[5])\n\n # Make sure we have the correct error response.\n self.assertEqual(len(kwargs), len(parameters))\n for key, value in six.iteritems(kwargs):\n self.assertIn(key, parameters)\n self.assertIsNotNone(parameters[key])\n self.assertEqual(value, parameters[key][0])",
"def test_social_auth_exception(self):\n self._setup_provider_response_with_body(200, json.dumps(\"false\"))\n response = self.client.post(self.url, self.data())\n self._assert_access_token_error(response, \"The provided access_token is not valid.\", \"tpa-invalid-access-token\")\n self._verify_user_existence(user_exists=False, social_link_exists=False)",
"def not_valid(request, redirect=None):\r\n if redirect is None:\r\n raise HTTPForbidden('Deactivated Account')\r\n else:\r\n raise HTTPFound(location=request.route_url(redirect))",
"def assertIsRedirect(self, response, path=None):\n self.assertIn(response.status_code, range(300, 400), str(response) + ' is not a redirect')\n if path:\n self.assertEqual(response['location'], path)",
"def test_redirect(self):\n resp = flask.make_response('')\n\n self.assertIsInstance(\n self.r(resp),\n werkzeug.wrappers.Response,\n )",
"def test_redirect(self):\n resp = flask.make_response('')\n\n self.assertIsInstance(\n self.r(resp),\n werkzeug.wrappers.Response,\n )"
] | [
"0.75156254",
"0.7148414",
"0.7028421",
"0.7019475",
"0.7017566",
"0.67602414",
"0.6492892",
"0.6403846",
"0.6372125",
"0.62288857",
"0.6219358",
"0.6178828",
"0.6163945",
"0.61515033",
"0.6126656",
"0.6096851",
"0.60844594",
"0.6039896",
"0.6037854",
"0.6014855",
"0.5999588",
"0.59989023",
"0.5984848",
"0.59509474",
"0.5912521",
"0.5906886",
"0.5900167",
"0.58710814",
"0.5863543",
"0.5863543"
] | 0.72937465 | 1 |
Assert that an invalid redirect_uri returns a 400 message with the appropriate error message encoded in the body of the response. | def test_authorize_invalid_redirect_uri(self):
invalid_params = self.valid_params.copy()
invalid_params['redirect_uri'] = 'not_a_valid_uri'
# Simple GET with invalid code parameters
random_state = six.text_type(uuid.uuid4())
response = self.get_json(path='/openid/authorize',
expect_errors=True,
state=random_state,
**invalid_params)
# Assert that this is NOT a redirect
self.assertEqual(400, response.status_code)
self.assertIsNotNone(response.json)
self.assertEqual('invalid_request', response.json['error'])
self.assertEqual(e_msg.INVALID_REDIRECT_URI,
response.json['error_description']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_invalid_response_request(self, mock_post):\n self._mock_response(mock_post, valid=False)\n\n random_state = six.text_type(uuid.uuid4())\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **self.valid_params)\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n error='access_denied',\n error_description=e_msg.OPEN_ID_TOKEN_INVALID)",
"def assertHttpBadRequest(self, response):\r\n self.assertEqual(response.status_code, 400)",
"def test_http_error_raise_with_redirect(self):\n\n resp = self.r(\n HTTPError(http_status.HTTP_201_CREATED, redirect_url='http://google.com/')\n )\n\n self.assertIsInstance(\n resp, werkzeug.wrappers.Response\n )\n\n self.assertEqual(302, resp.status_code)\n self.assertEqual('http://google.com/', resp.location)",
"def assertValidRedirect(self, response, redirect_uri,\n expected_status_code, **kwargs):\n\n self.assertEqual(expected_status_code, response.status_code)\n # Split the url into parts.\n location = response.headers.get('Location')\n location_url = urlparse.urlparse(location)\n parameters = urlparse.parse_qs(location_url[4])\n\n # Break out the redirect uri to compare and make sure we're headed\n # back to the redirect URI with the appropriate error codes.\n configured_url = urlparse.urlparse(redirect_uri)\n self.assertEqual(configured_url[0], location_url[0])\n self.assertEqual(configured_url[1], location_url[1])\n self.assertEqual(configured_url[2], location_url[2])\n self.assertEqual(configured_url[3], location_url[3])\n # 4 is ignored, it contains new parameters.\n self.assertEqual(configured_url[5], location_url[5])\n\n # Make sure we have the correct error response.\n self.assertEqual(len(kwargs), len(parameters))\n for key, value in six.iteritems(kwargs):\n self.assertIn(key, parameters)\n self.assertIsNotNone(parameters[key])\n self.assertEqual(value, parameters[key][0])",
"def test_for_bad_request_errors(self):\n # Invalid token:\n response = self.client.get(\n reverse(\n 'users:recover_password',\n kwargs={\n 'token': 'invalid_token',\n },\n ),\n follow=True,\n )\n\n self.assertEqual(response.status_code, 400)",
"def test_authorize_no_redirect_uri(self):\n invalid_params = self.valid_params.copy()\n del invalid_params['redirect_uri']\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Assert that this is NOT a redirect\n self.assertEqual(400, response.status_code)\n self.assertIsNotNone(response.json)\n self.assertEqual('invalid_request', response.json['error'])\n self.assertEqual(e_msg.NO_REDIRECT_URI,\n response.json['error_description'])",
"def test_bad_requests_give_400(self):\n self.assertEqual(self._request({}), 400)",
"def test_400_response(self):\n mock = Mock()\n mock.status_code = 400\n\n with self.assertRaises(RequestError):\n check_response(mock)",
"def test_invalid_redirect_no_email(self, mock_post):\n self._mock_response(mock_post, valid=True)\n\n random_state = six.text_type(uuid.uuid4())\n\n invalid_params = self.valid_params.copy()\n del invalid_params['openid.sreg.email']\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n error='invalid_request',\n error_description=e_msg.INVALID_NO_EMAIL)",
"def assertHttpBadRequest(self, resp):\r\n return self.assertEqual(resp.status_code, 400)",
"def test_invalid_redirect_no_name(self, mock_post):\n self._mock_response(mock_post, valid=True)\n\n random_state = six.text_type(uuid.uuid4())\n\n invalid_params = self.valid_params.copy()\n del invalid_params['openid.sreg.fullname']\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n error='invalid_request',\n error_description=e_msg.INVALID_NO_NAME)",
"def test_400_bad_request(self):\n # create route to abort the request with the 400\n @self.app.route('/400')\n def bad_request_error():\n abort(400)\n response = self.client.get('/400')\n self.assertEqual(response.status_code, 400)",
"def test_authorize_invalid_response_type(self):\n invalid_params = self.valid_params.copy()\n invalid_params['response_type'] = 'invalid_code'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Validate the error response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=invalid_params['redirect_uri'],\n error='unsupported_response_type',\n error_description=e_msg.INVALID_RESPONSE_TYPE)",
"def bad_callback(_request, _uri, headers):\n return (404, headers, 'NOT AN ASSERTION')",
"def test_authorize_invalid_client(self):\n invalid_params = self.valid_params.copy()\n invalid_params['client_id'] = 'invalid_client'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Validate the error response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=invalid_params['redirect_uri'],\n error='unauthorized_client',\n error_description=e_msg.INVALID_CLIENT_ID)",
"def test_400_bad_request(app, client):\n\n @app.route(\"/400\")\n def bad_request():\n abort(400)\n\n response = client.get(\"/400\")\n assert response.status_code == 400\n assert \"400 Bad Request\" in str(response.data)",
"def test_http_error_raised(self):\n\n self.app.app.preprocess_request()\n\n err = HTTPError(http_status.HTTP_404_NOT_FOUND)\n\n resp = self.r(err)\n\n self.assertIn(\n err.to_data()['message_short'],\n resp[0].decode(),\n )\n self.assertEqual(\n http_status.HTTP_404_NOT_FOUND,\n resp[1],\n )",
"def assert_has_valid_error(self, response, expected_code):\r\n assert 'error' in response\r\n assert len(response) == 1\r\n \r\n error = response['error']\r\n assert 'code' in error\r\n assert error['code'] == expected_code\r\n assert 'title' in error\r\n assert isinstance(error['title'], str)\r\n assert 'message' in error\r\n assert isinstance(error['message'], str)",
"def _get_authorize_error_response(error, redirect_uri):\n params = error.get_body()\n uri = add_params_to_uri(redirect_uri, params)\n headers = [(\"Location\", uri)]\n response = flask.Response(\"\", status=302, headers=headers)\n return response",
"def test_invalid_request_url(self):\r\n self.launch_uri = self.uri + 'wrong_lti_endpoint'\r\n response = requests.post(self.launch_uri, data=self.payload)\r\n self.assertIn('Invalid request URL', response.content)",
"def raise_for_status(self):\n if self.status >= 400:\n request_info = mock.Mock(real_url=\"http://example.com\")\n raise ClientResponseError(\n request_info=request_info,\n history=None,\n status=self.status,\n headers=self.headers,\n )",
"def test_invalid_request(client, auth_token, sample_project):\n # Given\n project_id = sample_project[\"uid\"];\n\n # When\n response = client.post(\"/projects/%s\" % project_id,\n data={\"invalid_field\": \"value\"},\n headers={'token': auth_token},\n follow_redirects=True)\n\n # Then\n assert 400 == response.status_code",
"def check_status_code(resp, expectedStatusCode):\n if resp.status_code != expectedStatusCode:\n raise MiteError(f\"Invalid status code. Expected: {expectedStatusCode}, Actual: {resp.status_code} \")",
"def _assert_redirect_url(self, response, expected_redirect_url):\n response_dict = json.loads(response.content.decode('utf-8'))\n assert 'redirect_url' in response_dict, (\n \"Response JSON unexpectedly does not have redirect_url: {!r}\".format(\n response_dict\n )\n )\n assert response_dict['redirect_url'] == expected_redirect_url",
"def bad_request(message):\n return error_response(400, message)",
"def test_400_ans(self):\r\n self.assertEqual(unpack_answ(\r\n {RESPONSE: 400, ERROR: 'Bad Request'}), '400 : Bad Request')",
"def _assert_access_token_error(self, response, expected_error_message, error_code):\n assert response.status_code == 400\n response_json = json.loads(response.content.decode('utf-8'))\n self.assertDictEqual(\n response_json,\n {\n \"access_token\": [{\"user_message\": expected_error_message}],\n \"error_code\": error_code\n }\n )",
"def invalid_response():\n return Response(\n '{\"error\": \"Invalid request\"}',\n status=400,\n mimetype='application/json'\n )",
"def test_errors(self):\n rc = self.app.get('/this_should_not_exist', follow_redirects=True)\n assert b'404 error :(' in rc.data",
"def test_invalid_usage_exception(exception_app):\n request, response = exception_app.test_client.get('/invalid')\n assert response.status == 400"
] | [
"0.7055511",
"0.70326936",
"0.6937571",
"0.6726953",
"0.6695739",
"0.6667875",
"0.6663773",
"0.6658823",
"0.66554034",
"0.6631071",
"0.6492055",
"0.6474287",
"0.6418554",
"0.6386279",
"0.6319697",
"0.6296205",
"0.6216002",
"0.6185745",
"0.61735564",
"0.6173064",
"0.6152858",
"0.61346096",
"0.61147",
"0.61084294",
"0.60929894",
"0.6090047",
"0.6080305",
"0.60783136",
"0.60739726",
"0.60629135"
] | 0.76964027 | 0 |
Assert that a nonexistent redirect_uri returns a 400 message with the appropriate error message encoded in the body of the response. | def test_authorize_no_redirect_uri(self):
invalid_params = self.valid_params.copy()
del invalid_params['redirect_uri']
# Simple GET with invalid code parameters
random_state = six.text_type(uuid.uuid4())
response = self.get_json(path='/openid/authorize',
expect_errors=True,
state=random_state,
**invalid_params)
# Assert that this is NOT a redirect
self.assertEqual(400, response.status_code)
self.assertIsNotNone(response.json)
self.assertEqual('invalid_request', response.json['error'])
self.assertEqual(e_msg.NO_REDIRECT_URI,
response.json['error_description']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_authorize_invalid_redirect_uri(self):\n invalid_params = self.valid_params.copy()\n invalid_params['redirect_uri'] = 'not_a_valid_uri'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Assert that this is NOT a redirect\n self.assertEqual(400, response.status_code)\n self.assertIsNotNone(response.json)\n self.assertEqual('invalid_request', response.json['error'])\n self.assertEqual(e_msg.INVALID_REDIRECT_URI,\n response.json['error_description'])",
"def test_invalid_response_request(self, mock_post):\n self._mock_response(mock_post, valid=False)\n\n random_state = six.text_type(uuid.uuid4())\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **self.valid_params)\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n error='access_denied',\n error_description=e_msg.OPEN_ID_TOKEN_INVALID)",
"def test_invalid_redirect_no_email(self, mock_post):\n self._mock_response(mock_post, valid=True)\n\n random_state = six.text_type(uuid.uuid4())\n\n invalid_params = self.valid_params.copy()\n del invalid_params['openid.sreg.email']\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n error='invalid_request',\n error_description=e_msg.INVALID_NO_EMAIL)",
"def test_http_error_raise_with_redirect(self):\n\n resp = self.r(\n HTTPError(http_status.HTTP_201_CREATED, redirect_url='http://google.com/')\n )\n\n self.assertIsInstance(\n resp, werkzeug.wrappers.Response\n )\n\n self.assertEqual(302, resp.status_code)\n self.assertEqual('http://google.com/', resp.location)",
"def test_invalid_redirect_no_name(self, mock_post):\n self._mock_response(mock_post, valid=True)\n\n random_state = six.text_type(uuid.uuid4())\n\n invalid_params = self.valid_params.copy()\n del invalid_params['openid.sreg.fullname']\n\n # Simple GET with various parameters\n response = self.get_json(path='/openid/authorize_return',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n redirect_uri = self.valid_params['sb_redirect_uri']\n # Validate the redirect response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=redirect_uri,\n error='invalid_request',\n error_description=e_msg.INVALID_NO_NAME)",
"def assertHttpBadRequest(self, response):\r\n self.assertEqual(response.status_code, 400)",
"def bad_callback(_request, _uri, headers):\n return (404, headers, 'NOT AN ASSERTION')",
"def test_errors(self):\n rc = self.app.get('/this_should_not_exist', follow_redirects=True)\n assert b'404 error :(' in rc.data",
"def test_400_response(self):\n mock = Mock()\n mock.status_code = 400\n\n with self.assertRaises(RequestError):\n check_response(mock)",
"def test_bad_requests_give_400(self):\n self.assertEqual(self._request({}), 400)",
"def test_for_bad_request_errors(self):\n # Invalid token:\n response = self.client.get(\n reverse(\n 'users:recover_password',\n kwargs={\n 'token': 'invalid_token',\n },\n ),\n follow=True,\n )\n\n self.assertEqual(response.status_code, 400)",
"def assertHttpBadRequest(self, resp):\r\n return self.assertEqual(resp.status_code, 400)",
"def assertValidRedirect(self, response, redirect_uri,\n expected_status_code, **kwargs):\n\n self.assertEqual(expected_status_code, response.status_code)\n # Split the url into parts.\n location = response.headers.get('Location')\n location_url = urlparse.urlparse(location)\n parameters = urlparse.parse_qs(location_url[4])\n\n # Break out the redirect uri to compare and make sure we're headed\n # back to the redirect URI with the appropriate error codes.\n configured_url = urlparse.urlparse(redirect_uri)\n self.assertEqual(configured_url[0], location_url[0])\n self.assertEqual(configured_url[1], location_url[1])\n self.assertEqual(configured_url[2], location_url[2])\n self.assertEqual(configured_url[3], location_url[3])\n # 4 is ignored, it contains new parameters.\n self.assertEqual(configured_url[5], location_url[5])\n\n # Make sure we have the correct error response.\n self.assertEqual(len(kwargs), len(parameters))\n for key, value in six.iteritems(kwargs):\n self.assertIn(key, parameters)\n self.assertIsNotNone(parameters[key])\n self.assertEqual(value, parameters[key][0])",
"def test_400_bad_request(self):\n # create route to abort the request with the 400\n @self.app.route('/400')\n def bad_request_error():\n abort(400)\n response = self.client.get('/400')\n self.assertEqual(response.status_code, 400)",
"def test_http_error_raised(self):\n\n self.app.app.preprocess_request()\n\n err = HTTPError(http_status.HTTP_404_NOT_FOUND)\n\n resp = self.r(err)\n\n self.assertIn(\n err.to_data()['message_short'],\n resp[0].decode(),\n )\n self.assertEqual(\n http_status.HTTP_404_NOT_FOUND,\n resp[1],\n )",
"def test_404(self):\n response = self.make_call(origin='Milano Lambrate', destination='Milano Cadorna')\n self.assert400(response)",
"def test_authorize_invalid_client(self):\n invalid_params = self.valid_params.copy()\n invalid_params['client_id'] = 'invalid_client'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Validate the error response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=invalid_params['redirect_uri'],\n error='unauthorized_client',\n error_description=e_msg.INVALID_CLIENT_ID)",
"def test_invalid_url(self):\n self._environ['PATH_INFO'] = '/_ah/img/'\n self.mox.ReplayAll()\n self.assertResponse('400 %s' % httplib.responses[400], [], '', self.app,\n self._environ)",
"def test_http_error_raised(self):\n resp = self.r(HTTPError(http_status.HTTP_404_NOT_FOUND))\n\n msg = HTTPError.error_msgs[http_status.HTTP_404_NOT_FOUND]\n\n self.assertEqual(\n (\n {\n 'code': http_status.HTTP_404_NOT_FOUND,\n 'referrer': None,\n 'message_short': msg['message_short'],\n 'message_long': msg['message_long'],\n },\n http_status.HTTP_404_NOT_FOUND,\n ),\n (json.loads(resp[0]), http_status.HTTP_404_NOT_FOUND, ),\n )",
"def test_authorize_invalid_response_type(self):\n invalid_params = self.valid_params.copy()\n invalid_params['response_type'] = 'invalid_code'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Validate the error response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=invalid_params['redirect_uri'],\n error='unsupported_response_type',\n error_description=e_msg.INVALID_RESPONSE_TYPE)",
"def test_invalid_request_url(self):\r\n self.launch_uri = self.uri + 'wrong_lti_endpoint'\r\n response = requests.post(self.launch_uri, data=self.payload)\r\n self.assertIn('Invalid request URL', response.content)",
"def _assert_redirect_url(self, response, expected_redirect_url):\n response_dict = json.loads(response.content.decode('utf-8'))\n assert 'redirect_url' in response_dict, (\n \"Response JSON unexpectedly does not have redirect_url: {!r}\".format(\n response_dict\n )\n )\n assert response_dict['redirect_url'] == expected_redirect_url",
"def test_make_request_error(self):\n response = Helper.make_request(self.url_404)\n self.assertEqual(response.status_code, 404)",
"def test_400_bad_request(app, client):\n\n @app.route(\"/400\")\n def bad_request():\n abort(400)\n\n response = client.get(\"/400\")\n assert response.status_code == 400\n assert \"400 Bad Request\" in str(response.data)",
"def test_invalid_route_is_status_404(self):\n response = self.client.get(\"/bad\")\n self.assertTrue(response.status_code == 404)",
"def test_bad_http(self):\n # Setup the mocked response\n responses.add(responses.GET, self.api_url, json=self.error_response,\n status=404, match_querystring=False)\n\n acme = ACMEAccount(client=self.client)\n self.assertRaises(HTTPError, acme.all, self.org_id)\n\n # Verify all the query information\n self.assertEqual(len(responses.calls), 1)\n self.match_url_with_qs(responses.calls[0].request.url)",
"def test_invalid_request(client, auth_token, sample_project):\n # Given\n project_id = sample_project[\"uid\"];\n\n # When\n response = client.post(\"/projects/%s\" % project_id,\n data={\"invalid_field\": \"value\"},\n headers={'token': auth_token},\n follow_redirects=True)\n\n # Then\n assert 400 == response.status_code",
"def raise_for_status(self):\n if self.status >= 400:\n request_info = mock.Mock(real_url=\"http://example.com\")\n raise ClientResponseError(\n request_info=request_info,\n history=None,\n status=self.status,\n headers=self.headers,\n )",
"def _get_authorize_error_response(error, redirect_uri):\n params = error.get_body()\n uri = add_params_to_uri(redirect_uri, params)\n headers = [(\"Location\", uri)]\n response = flask.Response(\"\", status=302, headers=headers)\n return response",
"def test_get_fail(self):\n response = self.second_client.get(self.url)\n self.assertEquals(response.status_code, 400)"
] | [
"0.7718685",
"0.7033793",
"0.6927159",
"0.69097126",
"0.69086397",
"0.6898485",
"0.6685903",
"0.66302705",
"0.65627855",
"0.652894",
"0.6524669",
"0.649647",
"0.6418248",
"0.6401584",
"0.6385468",
"0.6310627",
"0.6294222",
"0.62791204",
"0.6268177",
"0.6264218",
"0.62368065",
"0.62340933",
"0.6203982",
"0.61967266",
"0.61848813",
"0.61766547",
"0.6166928",
"0.6136197",
"0.6111841",
"0.6108369"
] | 0.7158007 | 1 |
Set the mock response from the openid endpoint to either true or false. | def _mock_response(self, mock_post, valid=True):
mock_post.return_value.status_code = 200
if valid:
mock_post.return_value.content = \
'is_valid:true\nns:http://specs.openid.net/auth/2.0\n'
else:
mock_post.return_value.content = \
'is_valid:false\nns:http://specs.openid.net/auth/2.0\n' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_boolean(self):\n endpoint = self.api.boolean\n assert endpoint.openapi_types['body'] == (bool,)\n assert endpoint.settings['response_type'] == (bool,)",
"def test_update_true(self, mock_decorator):\n BceInstitutionRepository.create(\n uai='0802145Z', is_institution=False)\n response = self.client.put(\n '/api/bce_institutions/0802145Z',\n content_type='application/json',\n headers={'Authorization': 'Bearer token'},\n data=json.dumps({\n 'is_institution': True,\n 'id_esr': 4\n })\n )\n self.assertEqual(response.status_code, 200)\n response_json = json.loads(response.data.decode('utf8'))\n self.assertEqual(\n response_json,\n {'institution':\n {'uai': '0802145Z', 'is_institution': True}}\n )\n institution = BceInstitutionRepository.get(uai='0802145Z')\n self.assertEqual(institution.is_institution, True)",
"def test_update_false_success(self, mock_decorator, mock_request):\n BceInstitutionRepository.create(\n uai='0802145Z', is_institution=True)\n headers = {'Authorization': 'Bearer token'}\n response = self.client.put(\n '/api/bce_institutions/0802145Z',\n content_type='application/json',\n headers=headers,\n data=json.dumps({\n 'is_institution': False,\n 'id_esr': 4\n })\n )\n self.assertEqual(mock_request.called, True)\n url = ((os.getenv('INSTITUTION_URL')) + 'institutions/4',)\n args, kwargs = mock_request.call_args\n self.assertEqual(args, url)\n self.assertEqual(response.status_code, 200)\n\n institution = BceInstitutionRepository.get(uai='0802145Z')\n self.assertEqual(institution.is_institution, False)",
"def test_response_ok(self):\n r = mock.Mock(spec=requests.Response)\n r.status_code = 200\n r.content = '{\"normal\": \"resource\"}'\n\n f = Fitbit(**self.client_kwargs)\n f.client._request = lambda *args, **kwargs: r\n f.user_profile_get()\n\n r.status_code = 202\n f.user_profile_get()\n\n r.status_code = 204\n f.user_profile_get()",
"def testReponse(question, reponse):\r\n if reponse == question[5]:\r\n return True\r\n else:\r\n return False",
"def fake_opgepakt(self, value: bool) -> None:\n self._fake_opgepakt = value",
"def test_update_false_failed(self, mock_decorator, mock_request):\n BceInstitutionRepository.create(uai='0802145Z', is_institution=True)\n headers = {'Authorization': 'Bearer token'}\n response = self.client.put(\n '/api/bce_institutions/0802145Z',\n content_type='application/json',\n headers=headers,\n data=json.dumps({\n 'is_institution': False,\n 'id_esr': 4\n })\n )\n\n self.assertEqual(mock_request.called, True)\n url = ((os.getenv('INSTITUTION_URL')) + 'institutions/4',)\n args, kwargs = mock_request.call_args\n self.assertEqual(args, url)\n self.assertEqual(response.status_code, 400)\n\n institution = BceInstitutionRepository.get(uai='0802145Z')\n self.assertEqual(institution.is_institution, True)",
"def should_return(self, value):\n \n return self.request.is_response(value)",
"def should_refresh_client_fnc(response):\n return not response",
"def verify(self, response):",
"def test_ask_yesno_yes(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'yes'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'yes')",
"async def autoresponses(self, ctx, value: bool):\n await queries.update_setting(ctx, \"guild_settings\", \"autoresponses\", value)\n self.bot.cache.autoresponse[str(ctx.guild.id)] = value\n if value:\n await util.send_success(ctx, \"Automatic responses are now **enabled**\")\n else:\n await util.send_success(ctx, \"Automatic responses are now **disabled**\")",
"def test_ask_yesno_no(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'nope'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'no')",
"def answer(self, signatory):\n is_valid = signatory.verify(self.assoc_handle, self.signed)\n # Now invalidate that assoc_handle so it this checkAuth message cannot\n # be replayed.\n signatory.invalidate(self.assoc_handle, dumb=True)\n response = OpenIDResponse(self)\n valid_str = (is_valid and \"true\") or \"false\"\n response.fields.setArg(OPENID_NS, 'is_valid', valid_str)\n\n if self.invalidate_handle:\n assoc = signatory.getAssociation(\n self.invalidate_handle, dumb=False)\n if not assoc:\n response.fields.setArg(OPENID_NS, 'invalidate_handle',\n self.invalidate_handle)\n return response",
"def test_set_boolean(self):\n setting_name = 'project_bool_setting'\n url = reverse(\n 'projectroles:api_project_setting_set',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'app_name': EX_APP_NAME,\n 'setting_name': setting_name,\n 'value': True,\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 200, msg=response.content)\n obj = AppSetting.objects.get(name=setting_name, project=self.project)\n self.assertEqual(obj.get_value(), True)",
"def test_get_request_normal_response(self, mock_get):\n\n # Arrange\n # Construct our mock response object, giving it relevant expected behaviours\n mock_resp_instance = MockResponse({\"msg\": \"success\"}, 200, content=\"abc\")\n mock_get.return_value = mock_resp_instance\n\n # Act\n response = get_request_data(self.url, json_resp=False)\n\n # Assert that the request-response cycle completed successfully.\n self.assertEqual(mock_resp_instance.status_code, 200)\n self.assertEqual(response, mock_resp_instance)",
"def test_accept_answer(self):\n self.app.post(\"/api/v2/answers/1/user_preferred\", headers=self.headers,\n data=json.dumps(self.answer)) \n response = self.app.patch(\n \"/api/v2/answers/1/user_preferred\", headers=self.headers, data=json.dumps(self.answer))\n result = json.loads(response.data)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(result['status'], 200)",
"def mock_not_onboarded():\n with patch(\n \"homeassistant.components.hassio.http.async_is_onboarded\", return_value=False\n ):\n yield",
"def is_response_correct(self, response):\n for answer in self.my_osid_object.get_answers():\n if self._is_match(response, answer):\n return True\n return False",
"def set_mock_response_data(self, sdc):\n data = {\n 'entities': {\n self.mid: sdc\n },\n 'success': 1\n }\n self.mock_site._simple_request.return_value.submit.return_value = data",
"def test_ask_yesno_other(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'I am a fish'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'I am a fish')",
"def test_optional_honor(self):\r\n self.url_params['honor_code'] = ''\r\n response = self.client.post(self.url, self.url_params)\r\n self.assertEqual(response.status_code, 200)\r\n obj = json.loads(response.content)\r\n self.assertEqual(obj['success'], True)",
"def test_response_200_on_get(self):\n pass",
"def handle_toggle_simulation(self, req):\n self.simulate = req.data\n if self.simulate:\n msg = \"Vehicle #%i will now be simulated.\" % self.vehicle_id\n else:\n msg = \"Vehicle #%i will stop to be simulated.\" % self.vehicle_id\n return srvs.SetBoolResponse(True, msg)",
"def testBoolValue(self):\n objectID = uuid4()\n user = createUser(u'username', u'password', u'User',\n u'[email protected]')\n namespace = createNamespace(user, u'name')\n tag = createTag(user, namespace, u'tag')\n self.store.add(TagValue(user.id, tag.id, objectID, True))",
"async def test_handler_light(mock_aioresponse, deconz_session, deconz_called_with):\n lights = deconz_session.lights.lights\n\n mock_aioresponse.put(\"http://host:80/api/apikey/lights/0/state\")\n await lights.set_state(\n id=\"0\",\n alert=LightAlert.SHORT,\n brightness=200,\n color_loop_speed=10,\n color_temperature=400,\n effect=LightEffect.COLOR_LOOP,\n fan_speed=LightFanSpeed.OFF,\n hue=1000,\n on=True,\n on_time=100,\n saturation=150,\n transition_time=250,\n xy=(0.1, 0.1),\n )\n assert deconz_called_with(\n \"put\",\n path=\"/lights/0/state\",\n json={\n \"alert\": \"select\",\n \"bri\": 200,\n \"colorloopspeed\": 10,\n \"ct\": 400,\n \"effect\": \"colorloop\",\n \"hue\": 1000,\n \"on\": True,\n \"ontime\": 100,\n \"speed\": 0,\n \"sat\": 150,\n \"transitiontime\": 250,\n \"xy\": (0.1, 0.1),\n },\n )\n\n mock_aioresponse.put(\"http://host:80/api/apikey/lights/0/state\")\n await lights.set_state(\"0\", on=False)\n assert deconz_called_with(\n \"put\",\n path=\"/lights/0/state\",\n json={\"on\": False},\n )",
"def test_stub(self):\n self.assertEqual(self._value, True)",
"def test_process_response(self):\n t = self.create_request_object()\n response_content = u\"\"\" <Response ReferenceNumber=\"82e942b0-48e8-4cf4-b299-51e2b6a89a1b\"\n InboundODMFileOID=\"\"\n IsTransactionSuccessful=\"1\"\n SuccessStatistics=\"Rave objects touched: Subjects=0; Folders=0; Forms=0; Fields=0; LogLines=0\" NewRecords=\"\">\n </Response>\n \"\"\"\n req = mock.Mock(requests.Request, text=response_content)\n response = t.result(req)\n self.assertTrue(isinstance(response, RWSResponse))",
"def verify(self, response):\n\n from requests import Response\n wrapped_response = Response()\n wrapped_response.headers = response.headers\n wrapped_response.status_code = response._status_code\n wrapped_response._content = response.get_data()\n\n return super(FlaskResponse, self).verify(wrapped_response)",
"async def test_handler_fan(mock_aioresponse, deconz_session, deconz_called_with):\n lights = deconz_session.lights.lights\n\n mock_aioresponse.put(\"http://host:80/api/apikey/lights/0/state\")\n await lights.set_state(\"0\", fan_speed=LightFanSpeed.OFF)\n assert deconz_called_with(\"put\", path=\"/lights/0/state\", json={\"speed\": 0})\n\n mock_aioresponse.put(\"http://host:80/api/apikey/lights/0/state\")\n await lights.set_state(\"0\", fan_speed=LightFanSpeed.PERCENT_25)\n assert deconz_called_with(\"put\", path=\"/lights/0/state\", json={\"speed\": 1})\n\n mock_aioresponse.put(\"http://host:80/api/apikey/lights/0/state\")\n await lights.set_state(\"0\", fan_speed=LightFanSpeed.PERCENT_50)\n assert deconz_called_with(\"put\", path=\"/lights/0/state\", json={\"speed\": 2})\n\n mock_aioresponse.put(\"http://host:80/api/apikey/lights/0/state\")\n await lights.set_state(\"0\", fan_speed=LightFanSpeed.PERCENT_75)\n assert deconz_called_with(\"put\", path=\"/lights/0/state\", json={\"speed\": 3})\n\n mock_aioresponse.put(\"http://host:80/api/apikey/lights/0/state\")\n await lights.set_state(\"0\", fan_speed=LightFanSpeed.PERCENT_100)\n assert deconz_called_with(\"put\", path=\"/lights/0/state\", json={\"speed\": 4})\n\n mock_aioresponse.put(\"http://host:80/api/apikey/lights/0/state\")\n await lights.set_state(\"0\", fan_speed=LightFanSpeed.AUTO)\n assert deconz_called_with(\"put\", path=\"/lights/0/state\", json={\"speed\": 5})\n\n mock_aioresponse.put(\"http://host:80/api/apikey/lights/0/state\")\n await lights.set_state(\"0\", fan_speed=LightFanSpeed.COMFORT_BREEZE)\n assert deconz_called_with(\"put\", path=\"/lights/0/state\", json={\"speed\": 6})"
] | [
"0.6240062",
"0.6088881",
"0.6007175",
"0.59424764",
"0.5702211",
"0.5642949",
"0.56002766",
"0.5597038",
"0.5544386",
"0.5502624",
"0.5480442",
"0.5476116",
"0.54389155",
"0.54300946",
"0.54158604",
"0.5402787",
"0.53936625",
"0.538996",
"0.53734565",
"0.5373242",
"0.5370819",
"0.5356365",
"0.53544885",
"0.5353641",
"0.5347561",
"0.5335334",
"0.5333445",
"0.53313684",
"0.52964836",
"0.52950704"
] | 0.6738863 | 0 |
This test ensures that the access token request may execute properly with a valid token. | def test_valid_access_request(self):
# Generate a valid auth token
with base.HybridSessionManager():
authorization_code = auth_api.authorization_code_save({
'user_id': 2,
'state': 'test_state',
'code': 'test_valid_code'
})
content_type = 'application/x-www-form-urlencoded'
# POST with content: application/x-www-form-urlencoded
response = self.app.post('/v1/openid/token',
params={
'code': authorization_code.code,
'grant_type': 'authorization_code'
},
content_type=content_type,
expect_errors=True)
# Assert that this is a successful response
self.assertEqual(200, response.status_code)
# Assert that the token came back in the response
token = response.json
self.assertIsNotNone(token['access_token'])
self.assertIsNotNone(token['expires_in'])
self.assertIsNotNone(token['id_token'])
self.assertIsNotNone(token['refresh_token'])
self.assertIsNotNone(token['token_type'])
self.assertEqual('Bearer', token['token_type'])
# Assert that the access token is in the database
with base.HybridSessionManager():
access_token = \
token_api.access_token_get_by_token(token['access_token'])
self.assertIsNotNone(access_token)
# Assert that system configured values is owned by the correct user.
self.assertEqual(2, access_token.user_id)
self.assertEqual(token['id_token'], access_token.user_id)
self.assertEqual(token['expires_in'], CONF.oauth.access_token_ttl)
self.assertEqual(token['expires_in'], access_token.expires_in)
self.assertEqual(token['access_token'], access_token.access_token)
# Assert that the refresh token is in the database
with base.HybridSessionManager():
refresh_token = \
refresh_tokens.refresh_token_get_by_token(
token['refresh_token'])
self.assertIsNotNone(refresh_token)
# Assert that system configured values is owned by the correct user.
self.assertEqual(2, refresh_token.user_id)
self.assertEqual(CONF.oauth.refresh_token_ttl,
refresh_token.expires_in)
self.assertEqual(token['refresh_token'], refresh_token.refresh_token)
# Assert that the authorization code is no longer in the database.
with base.HybridSessionManager():
none_code = \
auth_api.authorization_code_get(authorization_code.code)
self.assertIsNone(none_code) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_read_o_auth_access_token(self):\n pass",
"def test_create_o_auth_access_token(self):\n pass",
"def test_access_token_get(self):\n client = oauth.Client(self.consumer, None)\n resp, content = client.request(self._uri('request_token'), \"GET\")\n\n self.assertEqual(int(resp['status']), 200)",
"def test_access_token_post(self):\n client = oauth.Client(self.consumer, None)\n resp, content = client.request(self._uri('request_token'), \"POST\")\n\n self.assertEqual(int(resp['status']), 200)\n\n res = dict(parse_qsl(content))\n self.assertTrue(b'oauth_token' in res)\n self.assertTrue(b'oauth_token_secret' in res)",
"def test_list_o_auth_access_token(self):\n pass",
"def test_valid_access_token_time(self):\n\n # Store the old TZ info, if it exists.\n old_tz = None\n if 'TZ' in os.environ:\n old_tz = os.environ['TZ']\n\n # Convert now into every possible timezone out there :)\n for name in self.tested_timezones:\n\n # Override the 'default timezone' for the current runtime.\n os.environ['TZ'] = name\n\n # Create a token.\n with base.HybridSessionManager():\n authorization_code = auth_api.authorization_code_save({\n 'user_id': 2,\n 'state': 'test_state',\n 'code': 'test_valid_code',\n 'expires_in': 300\n })\n\n content_type = 'application/x-www-form-urlencoded'\n response = self.app.post('/v1/openid/token',\n params={\n 'code': authorization_code.code,\n 'grant_type': 'authorization_code'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a valid call.\n self.assertEqual(200, response.status_code)\n\n # Reset the timezone.\n if old_tz:\n os.environ['TZ'] = old_tz\n else:\n del os.environ['TZ']",
"def testGetToken(self):\n # Token is base64 for a json object so always starts with '{\"'\n self.assertTrue(self.dl_object._access_token.startswith('eyJ'))\n self.assertTrue(len(self.dl_object._access_token) > 100)",
"def test_patch_o_auth_access_token(self):\n pass",
"def test_get_token_failure(self):\n url = '/api-token-auth/'\n data = {'username': 'adam', 'password': '321'}\n\n response = Client().post(url, data)\n self.assertEqual(response.status_code, 400)",
"def test_access_methods_with_token(self):\n\n print(\" --------------------------- Test 5 - Try Access with token ----------------------------\")\n\n user_id = uuid.uuid4()\n password = \"my-precious\"\n currency = \"EUR\"\n\n register_user(user_id, password, currency)\n response = login_user(user_id, password)\n\n self.assertTrue(response.json()['message']['auth_token'])\n\n auth_token = response.json()['message']['auth_token']\n headers = {'Content-Type': \"application/json\", 'Authorization': auth_token}\n data = \"{\\\"amount\\\" : 20.0}\"\n response = requests.post('http://192.168.85-208/account/amount', headers=headers, data=data)\n\n self.assertTrue(response.json()['message']['status'] == 'success')\n self.assertTrue(response.json()['message']['message'] == 'The amount was added.')\n self.assertEqual(response.json()['code'], 200)",
"def test_invalid_access_token(self):\n\n content_type = 'application/x-www-form-urlencoded'\n # POST with content: application/x-www-form-urlencoded\n response = self.app.post('/v1/openid/token',\n params={\n 'code': 'invalid_access_token',\n 'grant_type': 'invalid_grant_type'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a successful response\n self.assertEqual(400, response.status_code)\n self.assertIsNotNone(response.json)\n self.assertEqual('unsupported_grant_type', response.json['error'])\n self.assertEqual(e_msg.INVALID_TOKEN_GRANT_TYPE,\n response.json['error_description'])",
"def test_fail_token(client, request):\n res = client.get('/token?uid=1')\n\n assert res.status_code == 400\n assert 'User does not exist' in res.data.decode('utf-8')",
"def test_replace_o_auth_access_token(self):\n pass",
"def test_access_token_setting(self):\n client = Client()\n assert not client.is_access_token_set()\n client.set_client_access_token(\"FAKE-TOKEN\")\n assert client.is_access_token_set()",
"def test__parse_access_token():\n for input_data, expected_output in (\n ({'access_token': ''}, ''),\n ({'access_token': 'a'}, 'a'),\n ):\n output = parse_access_token(input_data)\n vampytest.assert_eq(output, expected_output)",
"def test_access_token_in_session_after_login(self, client, valid_otp_data):\n\n resp = client.post(self.url, json=valid_otp_data)\n assert resp.status_code == 200\n\n session_resp = client.get(\"/view_session\")\n assert \"access_token\" in session_resp.json()",
"async def test_token_request_succeeds(hass: HomeAssistant) -> None:\n flow = config_flow.EcobeeFlowHandler()\n flow.hass = hass\n flow.hass.data[DATA_ECOBEE_CONFIG] = {}\n\n with patch(\"homeassistant.components.ecobee.config_flow.Ecobee\") as mock_ecobee:\n mock_ecobee = mock_ecobee.return_value\n mock_ecobee.request_tokens.return_value = True\n mock_ecobee.api_key = \"test-api-key\"\n mock_ecobee.refresh_token = \"test-token\"\n\n flow._ecobee = mock_ecobee\n\n result = await flow.async_step_authorize(user_input={})\n\n assert result[\"type\"] == data_entry_flow.FlowResultType.CREATE_ENTRY\n assert result[\"title\"] == DOMAIN\n assert result[\"data\"] == {\n CONF_API_KEY: \"test-api-key\",\n CONF_REFRESH_TOKEN: \"test-token\",\n }",
"def test_access_token_returns_create_token_response():\n request = mock.Mock()\n\n response_data = views.access_token(request)\n\n request.create_token_response.assert_called_with()\n assert response_data == request.create_token_response.return_value",
"def _validate_token(self):\n if not self.token:\n self.login()\n if not self.token:\n # TODO: create exception for this\n # Access is denied!!\n raise Exception(\"AccessDenied\")",
"def test_verifies_bearer_token(self):\n\n badgr = self.get_badgr_setup()\n\n # _token_data isn't meant to be exposed; pylint: disable=W0212\n self.assertEqual(badgr._token_data['token_type'], \"Bearer\")\n self.assertEqual(badgr._token_data['access_token'],\n self._sample_token)",
"def test_read_o_auth_authorize_token(self):\n pass",
"def test_get_tokens():\n tokens = get_tokens()\n assert tokens[\"token_type\"] == \"Bearer\"\n assert tokens[\"access_token\"] is not None\n assert tokens[\"expires_at\"] is not None\n assert tokens[\"expires_in\"] is not None\n assert tokens[\"refresh_token\"] is not None\n\n assert \"token_type\" in tokens\n assert \"access_token\" in tokens\n assert \"expires_at\" in tokens\n assert \"expires_in\" in tokens\n assert \"refresh_token\" in tokens\n\n assert tokens[\"expires_at\"] > int(time.time())",
"def test_delete_o_auth_access_token(self):\n pass",
"def test_authtoken_is_valid(self):\n auth_client = self.fixtures.auth_client\n # scenario 1: when validity is unlimited (0)\n tomriddle = models.User(username='voldemort', fullname='Tom Riddle')\n scope = ['id', 'email']\n tomriddle_token = models.AuthToken(\n auth_client=auth_client, user=tomriddle, scope=scope, validity=0\n )\n self.assertTrue(tomriddle_token.is_valid())\n\n # scenario 2: when validity has not been given\n draco = models.User(username='draco', fullname='Draco Malfoy')\n draco_token = models.AuthToken(auth_client=auth_client, user=draco, scope=scope)\n with self.assertRaises(TypeError):\n draco_token.is_valid()\n\n # scenario 3: when validity is limited\n harry = models.User(username='harry', fullname='Harry Potter')\n harry_token = models.AuthToken(\n auth_client=auth_client,\n user=harry,\n scope=scope,\n validity=3600,\n created_at=utcnow(),\n )\n self.assertTrue(harry_token.is_valid())\n\n # scenario 4: when validity is limited *and* the token has expired\n cedric = models.User(username='cedric', fullname='Cedric Diggory')\n cedric_token = models.AuthToken(\n auth_client=auth_client,\n user=cedric,\n scope=scope,\n validity=1,\n created_at=utcnow() - timedelta(1),\n )\n self.assertFalse(cedric_token.is_valid())",
"def test_create_token_for_not_user(self):\n\n credentials = {'email': '[email protected]', 'password': 'Testpass12'}\n response = self.client.post(URL_TOKEN, credentials)\n\n # Check that the response is HTTP 400, and does not contain a token.\n self.assertNotIn('token', response.data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_get_token_response_is_correct(self):\n resp = self.client.get('/v3/auth/tokens')\n\n self.assert_status(resp, 201)\n self.assertEquals(resp.content_type, 'application/json')",
"async def test_token_request_fails(hass: HomeAssistant) -> None:\n flow = config_flow.EcobeeFlowHandler()\n flow.hass = hass\n flow.hass.data[DATA_ECOBEE_CONFIG] = {}\n\n with patch(\"homeassistant.components.ecobee.config_flow.Ecobee\") as mock_ecobee:\n mock_ecobee = mock_ecobee.return_value\n mock_ecobee.request_tokens.return_value = False\n mock_ecobee.pin = \"test-pin\"\n\n flow._ecobee = mock_ecobee\n\n result = await flow.async_step_authorize(user_input={})\n\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert result[\"step_id\"] == \"authorize\"\n assert result[\"errors\"][\"base\"] == \"token_request_failed\"\n assert result[\"description_placeholders\"] == {\"pin\": \"test-pin\"}",
"def validate_access_token(cmd, namespace):\n n = namespace\n\n if not n.access_token:\n n.access_token = get_config_value(cmd, 'communication', 'access_token', None)",
"def test_valid_token(self, mock_check_token_not_revoked,\n mock_get_issuer_public_key):\n # Mock the external call to retrieve the IAM public key\n # used in the _verify_token and valid_token_to_id call\n mock_get_issuer_public_key.return_value = PUBLIC_KEY\n # Mock the external call to check the token has not been rejected\n # used in the valid_token_to_id call\n mock_check_token_not_revoked.return_value = CLIENT_ID\n\n # This payload will be valid as we will sign it with PRIVATE_KEY\n payload = self._standard_token()\n\n token = self._create_token(payload, PRIVATE_KEY)\n\n with self.settings(IAM_HOSTNAME_LIST=['iam-test.idc.eu']):\n client_id = payload['sub']\n self.assertEqual(\n self._token_checker.valid_token_to_id(token), client_id,\n \"Token with payload %s should be accepted!\" % payload\n )",
"def test_create_o_auth_authorize_token(self):\n pass"
] | [
"0.8008994",
"0.79901123",
"0.7986423",
"0.7827258",
"0.76730573",
"0.7581791",
"0.7542795",
"0.7538852",
"0.7517634",
"0.7500351",
"0.7455751",
"0.7445823",
"0.7411423",
"0.73875546",
"0.7371738",
"0.7253156",
"0.72491294",
"0.72450924",
"0.72399",
"0.7238495",
"0.72369736",
"0.7233416",
"0.720556",
"0.7131353",
"0.71204466",
"0.7107075",
"0.7096129",
"0.70946765",
"0.7072255",
"0.7039355"
] | 0.82178074 | 0 |
Assert that a newly created access token is valid if storyboard is installed in a multitude of timezones. | def test_valid_access_token_time(self):
# Store the old TZ info, if it exists.
old_tz = None
if 'TZ' in os.environ:
old_tz = os.environ['TZ']
# Convert now into every possible timezone out there :)
for name in self.tested_timezones:
# Override the 'default timezone' for the current runtime.
os.environ['TZ'] = name
# Create a token.
with base.HybridSessionManager():
authorization_code = auth_api.authorization_code_save({
'user_id': 2,
'state': 'test_state',
'code': 'test_valid_code',
'expires_in': 300
})
content_type = 'application/x-www-form-urlencoded'
response = self.app.post('/v1/openid/token',
params={
'code': authorization_code.code,
'grant_type': 'authorization_code'
},
content_type=content_type,
expect_errors=True)
# Assert that this is a valid call.
self.assertEqual(200, response.status_code)
# Reset the timezone.
if old_tz:
os.environ['TZ'] = old_tz
else:
del os.environ['TZ'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_expired_access_token_time(self):\n\n expired = datetime.datetime.now(pytz.utc) - datetime.timedelta(\n minutes=6)\n\n # Store the old TZ info, if it exists.\n old_tz = None\n if 'TZ' in os.environ:\n old_tz = os.environ['TZ']\n\n # Convert now into every possible timezone out there :)\n for name in self.tested_timezones:\n\n # Override the 'default timezone' for the current runtime.\n os.environ['TZ'] = name\n\n # Create a token.\n with base.HybridSessionManager():\n authorization_code = auth_api.authorization_code_save({\n 'user_id': 2,\n 'state': 'test_state',\n 'code': 'test_valid_code',\n 'expires_in': 300,\n 'created_at': expired\n })\n\n content_type = 'application/x-www-form-urlencoded'\n # POST with content: application/x-www-form-urlencoded\n response = self.app.post('/v1/openid/token',\n params={\n 'code': authorization_code.code,\n 'grant_type': 'authorization_code'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a valid call.\n self.assertEqual(401, response.status_code)\n\n # Reset the timezone.\n if old_tz:\n os.environ['TZ'] = old_tz\n else:\n del os.environ['TZ']",
"def test_create_o_auth_access_token(self):\n pass",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_expires_soon(self):\n now = timezone.now()\n window = SparkSettings().RENEW_TOKEN_WINDOW\n cur = self.factory.build(access_token='good',\n expires_at=now + timedelta(seconds=window*2))\n exp = self.factory.build(access_token='expired',\n expires_at=now + timedelta(seconds=window/2))\n self.assertFalse(cur.expires_soon())\n self.assertTrue(exp.expires_soon())",
"def test_access_token(self):\n exp = self.factory.create(access_token='expired', expires_at=self.expired_dt)\n cur = self.factory.create(access_token=ACCESS_TOKEN, expires_at=self.current_dt)\n old = self.factory.create(access_token='old', expires_at=self.old_dt)\n with HTTMock(spark_cloud_mock):\n token = CloudCredentials.objects._access_token()\n self.assertEqual(token, ACCESS_TOKEN)\n CloudCredentials.objects.all().delete()",
"def test_authtoken_is_valid(self):\n auth_client = self.fixtures.auth_client\n # scenario 1: when validity is unlimited (0)\n tomriddle = models.User(username='voldemort', fullname='Tom Riddle')\n scope = ['id', 'email']\n tomriddle_token = models.AuthToken(\n auth_client=auth_client, user=tomriddle, scope=scope, validity=0\n )\n self.assertTrue(tomriddle_token.is_valid())\n\n # scenario 2: when validity has not been given\n draco = models.User(username='draco', fullname='Draco Malfoy')\n draco_token = models.AuthToken(auth_client=auth_client, user=draco, scope=scope)\n with self.assertRaises(TypeError):\n draco_token.is_valid()\n\n # scenario 3: when validity is limited\n harry = models.User(username='harry', fullname='Harry Potter')\n harry_token = models.AuthToken(\n auth_client=auth_client,\n user=harry,\n scope=scope,\n validity=3600,\n created_at=utcnow(),\n )\n self.assertTrue(harry_token.is_valid())\n\n # scenario 4: when validity is limited *and* the token has expired\n cedric = models.User(username='cedric', fullname='Cedric Diggory')\n cedric_token = models.AuthToken(\n auth_client=auth_client,\n user=cedric,\n scope=scope,\n validity=1,\n created_at=utcnow() - timedelta(1),\n )\n self.assertFalse(cedric_token.is_valid())",
"def test_access_token_in_session_after_login(self, client, valid_otp_data):\n\n resp = client.post(self.url, json=valid_otp_data)\n assert resp.status_code == 200\n\n session_resp = client.get(\"/view_session\")\n assert \"access_token\" in session_resp.json()",
"def test_time_zone() -> None:\n schema = vol.Schema(cv.time_zone)\n\n with pytest.raises(vol.MultipleInvalid):\n schema(\"America/Do_Not_Exist\")\n\n schema(\"America/Los_Angeles\")\n schema(\"UTC\")",
"def test_get_tokens():\n tokens = get_tokens()\n assert tokens[\"token_type\"] == \"Bearer\"\n assert tokens[\"access_token\"] is not None\n assert tokens[\"expires_at\"] is not None\n assert tokens[\"expires_in\"] is not None\n assert tokens[\"refresh_token\"] is not None\n\n assert \"token_type\" in tokens\n assert \"access_token\" in tokens\n assert \"expires_at\" in tokens\n assert \"expires_in\" in tokens\n assert \"refresh_token\" in tokens\n\n assert tokens[\"expires_at\"] > int(time.time())",
"def test_expired_thread_token_is_valid(self):\n self.token.modified = self.days_ago(const.THREAD_TOKEN_EXPIRY + 1)\n assert not self.token.is_valid()",
"def test_access_token_all_expired(self):\n exp = self.factory.create(access_token='expired', expires_at=self.expired_dt)\n with HTTMock(spark_cloud_mock):\n token = CloudCredentials.objects._access_token()\n self.assertEqual(token, None)\n exp.delete()",
"def test_create_valid_user_success(self):\n payload = {\n 'email': '[email protected]',\n 'password': '123asd123123',\n 'name': 'Test Name',\n 'time_zone': 'Europe/London'\n }\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n self.assertEqual(res.data['time_zone'], payload['time_zone'])",
"def test_patch_o_auth_access_token(self):\n pass",
"def validate_availability_zones(self, context, resource_type,\n availability_zones):",
"def test_valid_access_request(self):\n\n # Generate a valid auth token\n with base.HybridSessionManager():\n authorization_code = auth_api.authorization_code_save({\n 'user_id': 2,\n 'state': 'test_state',\n 'code': 'test_valid_code'\n })\n\n content_type = 'application/x-www-form-urlencoded'\n # POST with content: application/x-www-form-urlencoded\n response = self.app.post('/v1/openid/token',\n params={\n 'code': authorization_code.code,\n 'grant_type': 'authorization_code'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a successful response\n self.assertEqual(200, response.status_code)\n\n # Assert that the token came back in the response\n token = response.json\n self.assertIsNotNone(token['access_token'])\n self.assertIsNotNone(token['expires_in'])\n self.assertIsNotNone(token['id_token'])\n self.assertIsNotNone(token['refresh_token'])\n self.assertIsNotNone(token['token_type'])\n self.assertEqual('Bearer', token['token_type'])\n\n # Assert that the access token is in the database\n with base.HybridSessionManager():\n access_token = \\\n token_api.access_token_get_by_token(token['access_token'])\n self.assertIsNotNone(access_token)\n\n # Assert that system configured values is owned by the correct user.\n self.assertEqual(2, access_token.user_id)\n self.assertEqual(token['id_token'], access_token.user_id)\n self.assertEqual(token['expires_in'], CONF.oauth.access_token_ttl)\n self.assertEqual(token['expires_in'], access_token.expires_in)\n self.assertEqual(token['access_token'], access_token.access_token)\n\n # Assert that the refresh token is in the database\n with base.HybridSessionManager():\n refresh_token = \\\n refresh_tokens.refresh_token_get_by_token(\n token['refresh_token'])\n\n self.assertIsNotNone(refresh_token)\n\n # Assert that system configured values is owned by the correct user.\n self.assertEqual(2, refresh_token.user_id)\n self.assertEqual(CONF.oauth.refresh_token_ttl,\n refresh_token.expires_in)\n self.assertEqual(token['refresh_token'], refresh_token.refresh_token)\n\n # Assert that the authorization code is no longer in the database.\n with base.HybridSessionManager():\n none_code = \\\n auth_api.authorization_code_get(authorization_code.code)\n self.assertIsNone(none_code)"
] | [
"0.6694041",
"0.60501355",
"0.5970943",
"0.5970943",
"0.5970943",
"0.5970943",
"0.5970943",
"0.5970943",
"0.5970943",
"0.5970943",
"0.5970943",
"0.5970943",
"0.5970943",
"0.5970943",
"0.5970943",
"0.5970943",
"0.5970943",
"0.5970943",
"0.5809005",
"0.5798218",
"0.56382155",
"0.56312096",
"0.5609488",
"0.56000835",
"0.55932444",
"0.55858314",
"0.5551735",
"0.55378973",
"0.5535765",
"0.55205727"
] | 0.74466926 | 0 |
This test ensures that an access token is seen as expired if storyboard is installed in multiple timezones. | def test_expired_access_token_time(self):
expired = datetime.datetime.now(pytz.utc) - datetime.timedelta(
minutes=6)
# Store the old TZ info, if it exists.
old_tz = None
if 'TZ' in os.environ:
old_tz = os.environ['TZ']
# Convert now into every possible timezone out there :)
for name in self.tested_timezones:
# Override the 'default timezone' for the current runtime.
os.environ['TZ'] = name
# Create a token.
with base.HybridSessionManager():
authorization_code = auth_api.authorization_code_save({
'user_id': 2,
'state': 'test_state',
'code': 'test_valid_code',
'expires_in': 300,
'created_at': expired
})
content_type = 'application/x-www-form-urlencoded'
# POST with content: application/x-www-form-urlencoded
response = self.app.post('/v1/openid/token',
params={
'code': authorization_code.code,
'grant_type': 'authorization_code'
},
content_type=content_type,
expect_errors=True)
# Assert that this is a valid call.
self.assertEqual(401, response.status_code)
# Reset the timezone.
if old_tz:
os.environ['TZ'] = old_tz
else:
del os.environ['TZ'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_valid_access_token_time(self):\n\n # Store the old TZ info, if it exists.\n old_tz = None\n if 'TZ' in os.environ:\n old_tz = os.environ['TZ']\n\n # Convert now into every possible timezone out there :)\n for name in self.tested_timezones:\n\n # Override the 'default timezone' for the current runtime.\n os.environ['TZ'] = name\n\n # Create a token.\n with base.HybridSessionManager():\n authorization_code = auth_api.authorization_code_save({\n 'user_id': 2,\n 'state': 'test_state',\n 'code': 'test_valid_code',\n 'expires_in': 300\n })\n\n content_type = 'application/x-www-form-urlencoded'\n response = self.app.post('/v1/openid/token',\n params={\n 'code': authorization_code.code,\n 'grant_type': 'authorization_code'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a valid call.\n self.assertEqual(200, response.status_code)\n\n # Reset the timezone.\n if old_tz:\n os.environ['TZ'] = old_tz\n else:\n del os.environ['TZ']",
"def test_expires_soon(self):\n now = timezone.now()\n window = SparkSettings().RENEW_TOKEN_WINDOW\n cur = self.factory.build(access_token='good',\n expires_at=now + timedelta(seconds=window*2))\n exp = self.factory.build(access_token='expired',\n expires_at=now + timedelta(seconds=window/2))\n self.assertFalse(cur.expires_soon())\n self.assertTrue(exp.expires_soon())",
"def test_access_token_all_expired(self):\n exp = self.factory.create(access_token='expired', expires_at=self.expired_dt)\n with HTTMock(spark_cloud_mock):\n token = CloudCredentials.objects._access_token()\n self.assertEqual(token, None)\n exp.delete()",
"def test_legacy_client_expired_access_token(self):\n self.legacy_client._client._expires_at = 1\n response = self.legacy_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)",
"def test_access_token(self):\n exp = self.factory.create(access_token='expired', expires_at=self.expired_dt)\n cur = self.factory.create(access_token=ACCESS_TOKEN, expires_at=self.current_dt)\n old = self.factory.create(access_token='old', expires_at=self.old_dt)\n with HTTMock(spark_cloud_mock):\n token = CloudCredentials.objects._access_token()\n self.assertEqual(token, ACCESS_TOKEN)\n CloudCredentials.objects.all().delete()",
"def test_expired_credentials():\n pass",
"def test_replace_o_auth_access_token(self):\n pass",
"def test_mail_client_expired_access_token(self):\n self.mail_client._client._expires_at = 1\n response = self.mail_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)",
"async def test_invalid_token_expiry_in_config_entry(\n hass: HomeAssistant,\n component_setup: ComponentSetup,\n aioclient_mock: AiohttpClientMocker,\n) -> None:\n\n # The token is refreshed and new expiration values are returned\n expires_in = 86400\n expires_at = time.time() + expires_in\n aioclient_mock.post(\n \"https://oauth2.googleapis.com/token\",\n json={\n \"refresh_token\": \"some-refresh-token\",\n \"access_token\": \"some-updated-token\",\n \"expires_at\": expires_at,\n \"expires_in\": expires_in,\n },\n )\n\n assert await component_setup()\n\n # Verify token expiration values are updated\n entries = hass.config_entries.async_entries(DOMAIN)\n assert len(entries) == 1\n assert entries[0].state is ConfigEntryState.LOADED\n assert entries[0].data[\"token\"][\"access_token\"] == \"some-updated-token\"\n assert entries[0].data[\"token\"][\"expires_in\"] == expires_in",
"def test_jwt_refresh_with_expired_token(self):\n\n # We make sure that the refresh token is not in the window\n # allowed by the expiration delta. This is much easier using\n # freezegun.\n orig_iat = datetime.utcfromtimestamp(self.payload['orig_iat']) -\\\n settings.JWT_REFRESH_EXPIRATION_DELTA -\\\n timedelta(days=1)\n\n self.payload['orig_iat'] = timegm(orig_iat.utctimetuple())\n\n data = {\n 'token': utils.jwt_encode_handler(self.payload)\n }\n\n response = self.client.post(\n '/refresh-token/',\n json.dumps(data),\n content_type='application/json'\n )\n\n self.assertEqual(response.status_code, 400)",
"def test_rejects_expired_token(self):\n config.set(xsrf_token_key='abcdef')\n tool = utils.XsrfTool()\n token = tool.generate_token(12345, 'test_action')\n utils.set_utcnow_for_test(XsrfToolTests.TEST_NOW +\n datetime.timedelta(hours=4, minutes=1))\n self.assertFalse(tool.verify_token(token, 12345, 'test_action'))",
"def test_refreshes_token_when_expired(self):\n\n badgr = self.get_badgr_setup()\n\n # _token_data isn't meant to be exposed; pylint: disable=W0212\n original_token = badgr._token_data['access_token']\n with vcr.use_cassette('tests/vcr_cassettes/expired_auth_token.yaml'):\n badgr.get_from_server(self._sample_url)\n self.assertNotEqual(original_token,\n badgr._token_data['access_token'])",
"def test_create_o_auth_access_token(self):\n pass",
"def test_expired_thread_token_is_valid(self):\n self.token.modified = self.days_ago(const.THREAD_TOKEN_EXPIRY + 1)\n assert not self.token.is_valid()",
"def test_reset_tenant_token_now(self):\n self._check_reset_token(invalidate=True)",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()",
"def test_guest_timezone(self):\n self.check_guest_timezone()"
] | [
"0.77153397",
"0.68467736",
"0.68020415",
"0.66638994",
"0.655659",
"0.6532942",
"0.6463671",
"0.64408976",
"0.64147025",
"0.64067024",
"0.6335219",
"0.63339216",
"0.6268353",
"0.62255085",
"0.621401",
"0.6190267",
"0.6190267",
"0.6190267",
"0.6190267",
"0.6190267",
"0.6190267",
"0.6190267",
"0.6190267",
"0.6190267",
"0.6190267",
"0.6190267",
"0.6190267",
"0.6190267",
"0.6190267",
"0.6190267"
] | 0.7908679 | 0 |
This test ensures that invalid grant_type parameters get the appropriate error response. | def test_invalid_grant_type(self):
# Generate a valid auth token
with base.HybridSessionManager():
authorization_code = auth_api.authorization_code_save({
'user_id': 2,
'state': 'test_state',
'code': 'test_valid_code',
'expires_in': 300
})
content_type = 'application/x-www-form-urlencoded'
# POST with content: application/x-www-form-urlencoded
response = self.app.post('/v1/openid/token',
params={
'code': authorization_code.code,
'grant_type': 'invalid_grant_type'
},
content_type=content_type,
expect_errors=True)
# Assert that this is a successful response
self.assertEqual(400, response.status_code)
self.assertIsNotNone(response.json)
self.assertEqual('unsupported_grant_type', response.json['error'])
self.assertEqual(e_msg.INVALID_TOKEN_GRANT_TYPE,
response.json['error_description']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testInvalidGrantType(self):\n request = self.generateValidTokenRequest(arguments={'grant_type': b'grantType\\xFF\\xFF'},\n authentication=self._VALID_CLIENT)\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result, InvalidParameterError('grant_type'),\n msg='Expected the token resource to reject a request with an invalid grant type.')",
"def testNoGrantType(self):\n request = self.generateValidTokenRequest(authentication=self._VALID_CLIENT)\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result, MissingParameterError(name='grant_type'),\n msg='Expected the token resource to reject a request without a grant type.')",
"def test_invalid_access_token(self):\n\n content_type = 'application/x-www-form-urlencoded'\n # POST with content: application/x-www-form-urlencoded\n response = self.app.post('/v1/openid/token',\n params={\n 'code': 'invalid_access_token',\n 'grant_type': 'invalid_grant_type'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a successful response\n self.assertEqual(400, response.status_code)\n self.assertIsNotNone(response.json)\n self.assertEqual('unsupported_grant_type', response.json['error'])\n self.assertEqual(e_msg.INVALID_TOKEN_GRANT_TYPE,\n response.json['error_description'])",
"def testInvalidContentType(self):\n request = MockRequest('POST', 'token', arguments={\n 'grant_type': 'refresh_token',\n 'refresh_token': self._VALID_REFRESH_TOKEN\n })\n request.setRequestHeader('Content-Type', 'application/not-x-www-form-urlencoded')\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result,\n MalformedRequestError('The Content-Type must be \"application/x-www-form-urlencoded\"'),\n msg='Expected the token resource to reject a request with an invalid content type.')",
"def test_authorize_invalid_response_type(self):\n invalid_params = self.valid_params.copy()\n invalid_params['response_type'] = 'invalid_code'\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Validate the error response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=invalid_params['redirect_uri'],\n error='unsupported_response_type',\n error_description=e_msg.INVALID_RESPONSE_TYPE)",
"def testDuplicatedGrantType(self):\n validArguments = {'grant_type': 'refresh_token', 'refresh_token': self._VALID_REFRESH_TOKEN}\n request = self.generateValidTokenRequest(\n urlQuery='grant_type=' + validArguments['grant_type'],\n arguments=validArguments, authentication=self._VALID_CLIENT)\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(request, result, MultipleParameterError('grant_type'),\n msg='Expected the token resource to reject a request '\n 'with multiple grant_type parameters')\n request = self.generateValidTokenRequest(urlQuery='grant_type=1', arguments=validArguments,\n authentication=self._VALID_CLIENT)\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result, MultipleParameterError('grant_type'),\n msg='Expected the token resource to reject a request with multiple grant_type '\n 'parameters, even if one parameter is an unknown grant type.')",
"def testUnsupportedGrantType(self):\n grantType = 'extendedFunctionalityGrantType'\n request = self.generateValidTokenRequest(arguments={'grant_type': grantType},\n authentication=self._VALID_CLIENT)\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result, UnsupportedGrantTypeError(grantType),\n msg='Expected the token resource to reject a request with an unknown grant type.')\n tokenResource = TokenResource(\n self._TOKEN_FACTORY, self._PERSISTENT_STORAGE, self._REFRESH_TOKEN_STORAGE,\n self._AUTH_TOKEN_STORAGE, self._CLIENT_STORAGE, grantTypes=[grantType])\n result = tokenResource.render_POST(request)\n self.assertFailedTokenRequest(\n request, result, UnsupportedGrantTypeError(grantType),\n msg='Expected the token resource to reject a request with an unsupported grant type.')",
"def test_fail_token(client, request):\n res = client.get('/token?uid=1')\n\n assert res.status_code == 400\n assert 'User does not exist' in res.data.decode('utf-8')",
"def test_get_token_failure(self):\n url = '/api-token-auth/'\n data = {'username': 'adam', 'password': '321'}\n\n response = Client().post(url, data)\n self.assertEqual(response.status_code, 400)",
"def test_signup_invalid_params(self):\n url = '/0/chefs'\n\n # No data\n data = {}\n resp = self.client.post(url, data=data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(resp.data['code'], 400)\n self.assertEqual(resp.data['message'], 'Invalid parameters')\n self.assertIn('raw', resp.data)\n error_keys = [e['field'] for e in resp.data['raw'] if 'field' in e]\n self.assertEqual(set(['email', 'name', 'language']), set(error_keys))\n\n # Everything but password or fb_access_token\n data = {\n 'email': '[email protected]',\n 'name': 'John',\n 'surname': 'Doe',\n 'language': 'es',\n }\n resp = self.client.post(url, data=data)\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(resp.data['code'], 400)\n self.assertEqual(resp.data['message'], 'Invalid parameters')\n self.assertEqual(len(resp.data['raw']), 1)",
"def test_for_bad_request_errors(self):\n # Invalid token:\n response = self.client.get(\n reverse(\n 'users:recover_password',\n kwargs={\n 'token': 'invalid_token',\n },\n ),\n follow=True,\n )\n\n self.assertEqual(response.status_code, 400)",
"def test_authorize_no_response_type(self):\n invalid_params = self.valid_params.copy()\n del invalid_params['response_type']\n\n # Simple GET with invalid code parameters\n random_state = six.text_type(uuid.uuid4())\n response = self.get_json(path='/openid/authorize',\n expect_errors=True,\n state=random_state,\n **invalid_params)\n\n # Validate the error response\n self.assertValidRedirect(response=response,\n expected_status_code=302,\n redirect_uri=invalid_params['redirect_uri'],\n error='unsupported_response_type',\n error_description=e_msg.NO_RESPONSE_TYPE)",
"def test_invalid_refresh_token(self):\n\n content_type = 'application/x-www-form-urlencoded'\n # Generate an auth and a refresh token.\n resp_1 = self.app.post('/v1/openid/token',\n params={\n 'refresh_token': 'invalid_refresh_token',\n 'grant_type': 'refresh_token'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a correct response\n self.assertEqual(401, resp_1.status_code)\n self.assertIsNotNone(resp_1.json)\n self.assertEqual('invalid_grant', resp_1.json['error'])",
"def test_create_token_invalid_credentials(self):\n # create user\n create_user(email='[email protected]', password='abcd1234')\n payload = {\n 'email': '[email protected]',\n 'password': 'wrong'\n }\n # We do not expect a token and should get a HTTP 400\n response = self.client.post(TOKEN_URL, payload)\n\n self.assertNotIn('token', response.data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_confirm_add_flow_request_invalid_consent(self):\n self.client.login(username='duck', password='duck')\n res = self.client.get(\n '/v1/flow_requests/consents_confirmed/?success=true&consent_confirm_id=aaaaa')\n self.assertEqual(res.status_code, 400)\n self.assertEqual(res.content.decode('utf-8'), ERRORS_MESSAGE['INVALID_DATA'])",
"def test_request_token_backend_failure(self):\n\n req = new_req_session_bad()\n\n req.current_route_url(\n uri=oauth1_utils.CustomApiClient.OAUTH1_SERVER_REQUEST_TOKEN\n )\n provider = oauth1_utils.new_oauth1Provider(req)\n result = provider.endpoint__request_token(dbSessionCommit=req.dbSession)\n assert result.status_code == 400\n assert (\n result.text\n == \"error=invalid_request&error_description=Missing+mandatory+OAuth+parameters.\"\n )\n\n req.headers = {\"Authorization\": OAUTH_EXAMPLE_AUTH}\n provider = oauth1_utils.new_oauth1Provider(req)\n result = provider.endpoint__request_token(dbSessionCommit=req.dbSession)\n assert result.status_code == 400\n assert (\n result.text\n == \"error=invalid_request&error_description=Timestamp+given+is+invalid%2C+differ+from+allowed+by+over+600+seconds.\"\n )\n\n req.headers = {\"Authorization\": OAUTH_EXAMPLE_AUTH}\n provider = oauth1_utils.new_oauth1Provider(req)\n result = provider.endpoint__request_token(dbSessionCommit=req.dbSession)\n assert result.status_code == 400\n assert (\n result.text\n == \"error=invalid_request&error_description=Timestamp+given+is+invalid%2C+differ+from+allowed+by+over+600+seconds.\"\n )\n\n req.headers = {\n \"Authorization\": OAUTH_EXAMPLE_AUTH.replace(\n \"1533856374\", oauth1_utils.oauth_time_now()\n )\n }\n provider = oauth1_utils.new_oauth1Provider(req)\n result = provider.endpoint__request_token(dbSessionCommit=req.dbSession)\n assert result.status_code == 500\n assert (\n result.text\n == \"error=internal_system_failure&error_description=Internal+System+Failure\"\n )",
"def test_invalid_auth_inputs(login_inputs, expected_result, expected_status_code):\n with requests.Session() as session:\n create_next_admin(session)\n create_test_user(session, USER_INPUT)\n response = session.post(\n \"http://rbac-server:8000/api/authorization/\", json=login_inputs\n )\n assert response.json()[\"message\"] == expected_result\n assert response.json()[\"code\"] == expected_status_code",
"def test_auth_code_negative(self, api):\n resp = api.login_user(\"QWERTY\", \"QWERTY\")\n assert resp.status_code == 400",
"def test_create_token_invalid_credentials(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'testpass'\n }\n create_user(**payload)\n wrong_payload = {\n 'email': '[email protected]',\n 'password': 'wrong'\n }\n res = self.client.post(TOKEN_URI, wrong_payload)\n self.assertNotIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_type_error_raised(self):\n with self.assertRaises(TypeError):\n authenticate(username=\"test\", password=\"test\")",
"def test_incorrect_type(self):\n body = json.dumps({\n \"first_name\": 200,\n \"last_name\": \"Holmes\",\n \"email\": \"[email protected]\",\n \"password\": \"ilovek@ndA!\"\n })\n\n errorObject = {\n \"error\": \"Bad request\",\n \"field_errors\": {\n \"first_name\": [\"Invalid field type\"]\n }\n }\n\n result = self.simulate_post('/', body=body, headers=headers)\n\n self.assertEqual(result.status_code, 400)\n self.assertEqual(result.json, errorObject)",
"def wrong_oauth_config_bad_auth_type(url_base):\n return {\n \"credentials\": {\n \"client_secret\": \"test_client_secret\",\n \"client_id\": \"test_client_id\",\n \"refresh_token\": \"test_refresh_token\",\n },\n \"base_url\": url_base,\n }",
"def test_add_flow_requests_wrong_content_type(self):\n headers = self._get_oauth_header()\n res = self.client.post('/v1/flow_requests/', data=self.flow_request, **headers)\n self.assertEqual(res.status_code, 415)",
"def test_create_token_invalid_credentials(self):\n sigin_in_user(email='[email protected]', password=\"hellohello\")\n data = {\n 'email': '[email protected]', \n 'password': \"testtest\"\n }\n res = self.client.post(TOKEN_URL, data)\n self.assertNotIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_create_token_invalid_credantials(self):\n create_user(email='[email protected]', password='testpass')\n payload = {'email': '[email protected]', 'password': 'wrong'}\n res = self.client.post(TOKEN_URL, payload)\n\n self.assertNotIn('token', res.data)\n self.assertEquals(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_social_auth_exception(self):\n self._setup_provider_response_with_body(200, json.dumps(\"false\"))\n response = self.client.post(self.url, self.data())\n self._assert_access_token_error(response, \"The provided access_token is not valid.\", \"tpa-invalid-access-token\")\n self._verify_user_existence(user_exists=False, social_link_exists=False)",
"def test_create_token_invalid_credentials(self):\r\n create_user(email='[email protected]', password='testpass')\r\n payload = {\r\n 'email': '[email protected]',\r\n 'password': 'Wrongpass',\r\n 'name': 'Maks'\r\n }\r\n\r\n res = self.client.post(TOKEN_URL, payload)\r\n\r\n self.assertNotIn('token', res.data)\r\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"async def test_token_request_fails(hass: HomeAssistant) -> None:\n flow = config_flow.EcobeeFlowHandler()\n flow.hass = hass\n flow.hass.data[DATA_ECOBEE_CONFIG] = {}\n\n with patch(\"homeassistant.components.ecobee.config_flow.Ecobee\") as mock_ecobee:\n mock_ecobee = mock_ecobee.return_value\n mock_ecobee.request_tokens.return_value = False\n mock_ecobee.pin = \"test-pin\"\n\n flow._ecobee = mock_ecobee\n\n result = await flow.async_step_authorize(user_input={})\n\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert result[\"step_id\"] == \"authorize\"\n assert result[\"errors\"][\"base\"] == \"token_request_failed\"\n assert result[\"description_placeholders\"] == {\"pin\": \"test-pin\"}",
"def test_create_consent_fail_on_incorrect_type(client, session, tokens):\n data = {\n \"type\": \"gp_dr\",\n \"category\": \"newsletter\",\n \"status\": \"accepted\",\n }\n response = client.post(\n \"/consent\",\n json=data,\n headers={\"Authorization\": f\"Bearer {tokens['write']}\"},\n )\n assert response.status_code == 422",
"def test_create_token_invalid_credentials(setup_client):\n client = setup_client\n payload = {\n 'email': '[email protected]',\n 'password': 'testpass',\n }\n create_user(**payload, **{'role': 'Supplier'})\n payload[\"password\"] = \"Something else\"\n res = client.post(TOKEN_URL, payload)\n assert \"token\" not in res.data\n assert res.status_code == status.HTTP_400_BAD_REQUEST"
] | [
"0.772801",
"0.72654927",
"0.726257",
"0.70829815",
"0.68463784",
"0.677205",
"0.6727337",
"0.6633874",
"0.651238",
"0.6486372",
"0.6486188",
"0.64548945",
"0.6451076",
"0.6325722",
"0.62595737",
"0.6252418",
"0.6246032",
"0.6237852",
"0.6236558",
"0.6236232",
"0.61953956",
"0.61742216",
"0.6161994",
"0.6155454",
"0.6142567",
"0.6135207",
"0.61091846",
"0.61075276",
"0.6098867",
"0.6077605"
] | 0.8002551 | 0 |
This test ensures that a valid refresh token can be converted into a valid access token, and cleans up after itself. | def test_valid_refresh_token(self):
# Generate a valid access code
with base.HybridSessionManager():
authorization_code = auth_api.authorization_code_save({
'user_id': 2,
'state': 'test_state',
'code': 'test_valid_code'
})
content_type = 'application/x-www-form-urlencoded'
# Generate an auth and a refresh token.
resp_1 = self.app.post('/v1/openid/token',
params={
'code': authorization_code.code,
'grant_type': 'authorization_code'
},
content_type=content_type,
expect_errors=True)
# Assert that this is a successful response
self.assertEqual(200, resp_1.status_code)
# Assert that the token came back in the response
t1 = resp_1.json
# Assert that both are in the database.
with base.HybridSessionManager():
access_token = \
token_api.access_token_get_by_token(t1['access_token'])
self.assertIsNotNone(access_token)
with base.HybridSessionManager():
refresh_token = refresh_tokens.refresh_token_get_by_token(
t1['refresh_token'])
self.assertIsNotNone(refresh_token)
content_type = 'application/x-www-form-urlencoded'
# Issue a refresh token request.
resp_2 = self.app.post('/v1/openid/token',
params={
'refresh_token': t1['refresh_token'],
'grant_type': 'refresh_token'
},
content_type=content_type,
expect_errors=True)
# Assert that the response is good.
self.assertEqual(200, resp_2.status_code)
# Assert that the token came back in the response
t2 = resp_2.json
self.assertIsNotNone(t2['access_token'])
self.assertIsNotNone(t2['expires_in'])
self.assertIsNotNone(t2['id_token'])
self.assertIsNotNone(t2['refresh_token'])
self.assertIsNotNone(t2['token_type'])
self.assertEqual('Bearer', t2['token_type'])
# Assert that the access token is in the database
with base.HybridSessionManager():
new_access_token = \
token_api.access_token_get_by_token(t2['access_token'])
self.assertIsNotNone(new_access_token)
# Assert that system configured values is owned by the correct user.
self.assertEqual(2, new_access_token.user_id)
self.assertEqual(t2['id_token'], new_access_token.user_id)
self.assertEqual(t2['expires_in'], CONF.oauth.access_token_ttl)
self.assertEqual(t2['expires_in'], new_access_token.expires_in)
self.assertEqual(t2['access_token'],
new_access_token.access_token)
# Assert that the refresh token is in the database
with base.HybridSessionManager():
new_refresh_token = refresh_tokens.refresh_token_get_by_token(
t2['refresh_token'])
self.assertIsNotNone(new_refresh_token)
# Assert that system configured values is owned by the correct user.
self.assertEqual(2, new_refresh_token.user_id)
self.assertEqual(CONF.oauth.refresh_token_ttl,
new_refresh_token.expires_in)
self.assertEqual(t2['refresh_token'],
new_refresh_token.refresh_token)
# Assert that the old access tokens are no longer in the database and
# have been cleaned up.
with base.HybridSessionManager():
no_access_token = \
token_api.access_token_get_by_token(t1['access_token'])
with base.HybridSessionManager():
no_refresh_token = \
refresh_tokens.refresh_token_get_by_token(t1['refresh_token'])
self.assertIsNone(no_refresh_token)
self.assertIsNone(no_access_token) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_authtoken_refresh(self):\n hagrid = models.User(username='hagrid', fullname='Rubeus Hagrid')\n auth_token = models.AuthToken(user=hagrid, algorithm='hmac-sha-1')\n existing_token = auth_token.token\n existing_secret = auth_token.secret\n auth_token.refresh()\n self.assertNotEqual(existing_token, auth_token.token)\n self.assertNotEqual(existing_secret, auth_token.secret)",
"def test_invalid_refresh_token(self):\n\n content_type = 'application/x-www-form-urlencoded'\n # Generate an auth and a refresh token.\n resp_1 = self.app.post('/v1/openid/token',\n params={\n 'refresh_token': 'invalid_refresh_token',\n 'grant_type': 'refresh_token'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a correct response\n self.assertEqual(401, resp_1.status_code)\n self.assertIsNotNone(resp_1.json)\n self.assertEqual('invalid_grant', resp_1.json['error'])",
"def test_legacy_client_invalid_refresh_token_expired_access_token(self):\n self.legacy_client._client._expires_at = 1\n self.legacy_client.token['refresh_token'] = 'invalidrefreshtoken'\n response = self.legacy_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)",
"def test_refreshes_token_when_expired(self):\n\n badgr = self.get_badgr_setup()\n\n # _token_data isn't meant to be exposed; pylint: disable=W0212\n original_token = badgr._token_data['access_token']\n with vcr.use_cassette('tests/vcr_cassettes/expired_auth_token.yaml'):\n badgr.get_from_server(self._sample_url)\n self.assertNotEqual(original_token,\n badgr._token_data['access_token'])",
"def test_mail_client_invalid_refresh_token_expired_access_token(self):\n self.mail_client._client._expires_at = 1\n self.mail_client.token['refresh_token'] = 'invalidrefreshtoken'\n with self.assertRaises(InvalidGrantError):\n self.mail_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))",
"def test_good_token(self):\n self.assertEqual(CloudCredentials.objects.count(), 0)\n cred = self.factory.create(access_token='good_token', expires_at=self.current_dt)\n with HTTMock(spark_cloud_mock):\n refresh_access_token()\n self.assertEqual(CloudCredentials.objects.count(), 1)\n self.assertEqual(CloudCredentials.objects._access_token(), 'good_token')\n cred.delete()",
"def test_legacy_client_invalid_refresh_token(self):\n self.legacy_client._client.access_token = 'invalidaccesstoken'\n self.legacy_client.token['refresh_token'] = 'invalidrefreshtoken'\n response = self.legacy_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)",
"def test_evicts_invalid_refresh_token():\n\n tenant_id = \"tenant-id\"\n client_id = \"client-id\"\n invalid_token = \"invalid-refresh-token\"\n\n cache = TokenCache()\n cache.add({\"response\": build_aad_response(uid=\"id1\", utid=\"tid1\", access_token=\"*\", refresh_token=invalid_token)})\n cache.add({\"response\": build_aad_response(uid=\"id2\", utid=\"tid2\", access_token=\"*\", refresh_token=\"...\")})\n assert len(cache.find(TokenCache.CredentialType.REFRESH_TOKEN)) == 2\n assert len(cache.find(TokenCache.CredentialType.REFRESH_TOKEN, query={\"secret\": invalid_token})) == 1\n\n def send(request, **_):\n assert request.data[\"refresh_token\"] == invalid_token\n return mock_response(json_payload={\"error\": \"invalid_grant\"}, status_code=400)\n\n transport = Mock(send=Mock(wraps=send))\n\n client = AadClient(tenant_id, client_id, transport=transport, cache=cache)\n with pytest.raises(ClientAuthenticationError):\n client.obtain_token_by_refresh_token(scopes=(\"scope\",), refresh_token=invalid_token)\n\n assert transport.send.call_count == 1\n assert len(cache.find(TokenCache.CredentialType.REFRESH_TOKEN)) == 1\n assert len(cache.find(TokenCache.CredentialType.REFRESH_TOKEN, query={\"secret\": invalid_token})) == 0",
"def test_renews_token(self):\n self.assertEqual(CloudCredentials.objects.count(), 0)\n old = self.factory.create(access_token='old_token', expires_at=self.expired_dt)\n with HTTMock(spark_cloud_mock):\n refresh_access_token()\n self.assertEqual(CloudCredentials.objects.count(), 2)\n self.assertEqual(CloudCredentials.objects._access_token(), ACCESS_TOKEN)\n CloudCredentials.objects.all().delete()",
"def test_jwt_refresh_with_expired_token(self):\n\n # We make sure that the refresh token is not in the window\n # allowed by the expiration delta. This is much easier using\n # freezegun.\n orig_iat = datetime.utcfromtimestamp(self.payload['orig_iat']) -\\\n settings.JWT_REFRESH_EXPIRATION_DELTA -\\\n timedelta(days=1)\n\n self.payload['orig_iat'] = timegm(orig_iat.utctimetuple())\n\n data = {\n 'token': utils.jwt_encode_handler(self.payload)\n }\n\n response = self.client.post(\n '/refresh-token/',\n json.dumps(data),\n content_type='application/json'\n )\n\n self.assertEqual(response.status_code, 400)",
"def test_authenticate_refresh(app, client, session, models):\n user = models[\"user\"][0]\n # Authenticate to receive a refresh token\n response = client.post(\n \"/authenticate/local\",\n data={\"email\": user.email, \"password\": \"hunter2\"},\n )\n refresh_token = json.loads(response.data)[\"refresh_token\"]\n\n # Check that token values are as expected\n assert len(refresh_token[\"val\"]) == 64\n assert datetime.fromtimestamp(refresh_token[\"exp\"]) > datetime.now()\n assert datetime.fromtimestamp(refresh_token[\"exp\"]) < (\n datetime.now() + app.config[\"REFRESH_TOKEN_VALIDITY\"]\n )\n\n # Check that the returned token is now stored in the database\n assert refresh_token[\"val\"] == user.refresh_tokens[0].token\n\n # Expect refreshing token to succeed\n response = client.post(\n \"/refresh\", data={\"refresh_token\": refresh_token[\"val\"]}\n )\n assert response.status_code == 200\n raw_jwt_token = json.loads(response.data)[\"jwt\"]\n\n # Expect that the new claims are equal to the user claims, except for the\n # expiry which will have refreshed\n refresh_claims = jwt.decode(\n raw_jwt_token, app.config[\"RSA_PUBLIC_KEY\"], app.config[\"ALGORITHM\"],\n )\n del refresh_claims[\"exp\"]\n assert user.claims == refresh_claims\n\n # Expect refreshing an expired token to fail\n token = user.refresh_tokens[0]\n token.expiry = datetime.now() - timedelta(seconds=1)\n response = client.post(\"/refresh\", data={\"refresh_token\": token.token})\n assert response.status_code == 401",
"def test_mail_client_invalid_refresh_token(self):\n self.mail_client._client.access_token = 'invalidaccesstoken'\n self.mail_client.token['refresh_token'] = 'invalidrefreshtoken'\n with self.assertRaises(InvalidGrantError):\n self.mail_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))",
"def test_replace_o_auth_access_token(self):\n pass",
"def test_delete_o_auth_access_token(self):\n pass",
"async def test_expired_token_requires_reauth(\n hass: HomeAssistant,\n component_setup: ComponentSetup,\n aioclient_mock: AiohttpClientMocker,\n) -> None:\n\n aioclient_mock.post(\n \"https://oauth2.googleapis.com/token\",\n status=http.HTTPStatus.BAD_REQUEST,\n )\n\n await component_setup()\n\n entries = hass.config_entries.async_entries(DOMAIN)\n assert len(entries) == 1\n assert entries[0].state is ConfigEntryState.SETUP_ERROR\n\n flows = hass.config_entries.flow.async_progress()\n assert len(flows) == 1\n assert flows[0][\"step_id\"] == \"reauth_confirm\"",
"async def test_expired_token_refresh_internal_error(\n hass: HomeAssistant,\n component_setup: ComponentSetup,\n aioclient_mock: AiohttpClientMocker,\n) -> None:\n\n aioclient_mock.post(\n \"https://oauth2.googleapis.com/token\",\n status=http.HTTPStatus.INTERNAL_SERVER_ERROR,\n )\n\n await component_setup()\n\n entries = hass.config_entries.async_entries(DOMAIN)\n assert len(entries) == 1\n assert entries[0].state is ConfigEntryState.SETUP_RETRY",
"def refresh_token():\n json_request = request.json\n refresh_token = json_request.get('refresh_token')\n if not refresh_token:\n return msg.errors.bad_request(\n 'You should provide refresh token for this call')\n refresh_token_obj = RefreshToken.valid_token(refresh_token)\n if not refresh_token_obj:\n return msg.errors.unauthorized('Provided refresh token is not valid')\n access_token = generate_token(refresh_token_obj.user_id)\n return msg.success(\n message='New access token generated',\n access_token=access_token)",
"def test_expired_access_token_time(self):\n\n expired = datetime.datetime.now(pytz.utc) - datetime.timedelta(\n minutes=6)\n\n # Store the old TZ info, if it exists.\n old_tz = None\n if 'TZ' in os.environ:\n old_tz = os.environ['TZ']\n\n # Convert now into every possible timezone out there :)\n for name in self.tested_timezones:\n\n # Override the 'default timezone' for the current runtime.\n os.environ['TZ'] = name\n\n # Create a token.\n with base.HybridSessionManager():\n authorization_code = auth_api.authorization_code_save({\n 'user_id': 2,\n 'state': 'test_state',\n 'code': 'test_valid_code',\n 'expires_in': 300,\n 'created_at': expired\n })\n\n content_type = 'application/x-www-form-urlencoded'\n # POST with content: application/x-www-form-urlencoded\n response = self.app.post('/v1/openid/token',\n params={\n 'code': authorization_code.code,\n 'grant_type': 'authorization_code'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a valid call.\n self.assertEqual(401, response.status_code)\n\n # Reset the timezone.\n if old_tz:\n os.environ['TZ'] = old_tz\n else:\n del os.environ['TZ']",
"def test_refresh_token(self):\n self.assertEqual(CloudCredentials.objects.count(), 0)\n with HTTMock(spark_cloud_mock):\n CloudCredentials.objects.refresh_token()\n self.assertEqual(CloudCredentials.objects.count(), 1)\n self.assertEqual(CloudCredentials.objects._access_token(), ACCESS_TOKEN)\n CloudCredentials.objects.all().delete()",
"def test_valid_access_token_time(self):\n\n # Store the old TZ info, if it exists.\n old_tz = None\n if 'TZ' in os.environ:\n old_tz = os.environ['TZ']\n\n # Convert now into every possible timezone out there :)\n for name in self.tested_timezones:\n\n # Override the 'default timezone' for the current runtime.\n os.environ['TZ'] = name\n\n # Create a token.\n with base.HybridSessionManager():\n authorization_code = auth_api.authorization_code_save({\n 'user_id': 2,\n 'state': 'test_state',\n 'code': 'test_valid_code',\n 'expires_in': 300\n })\n\n content_type = 'application/x-www-form-urlencoded'\n response = self.app.post('/v1/openid/token',\n params={\n 'code': authorization_code.code,\n 'grant_type': 'authorization_code'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a valid call.\n self.assertEqual(200, response.status_code)\n\n # Reset the timezone.\n if old_tz:\n os.environ['TZ'] = old_tz\n else:\n del os.environ['TZ']",
"def test_patch_o_auth_access_token(self):\n pass",
"def test_revoke_refresh_token(client, tokens):\n response = client.delete(\n \"/auth/refresh-token/\",\n headers={\"Authorization\": \"Bearer {}\".format(tokens[\"refresh\"])},\n )\n\n payload = response.get_json()\n assert response.status_code == HTTPStatus.OK\n assert payload[\"msg\"] == \"Refresh token successfully revoked\"",
"def test_access_token_all_expired(self):\n exp = self.factory.create(access_token='expired', expires_at=self.expired_dt)\n with HTTMock(spark_cloud_mock):\n token = CloudCredentials.objects._access_token()\n self.assertEqual(token, None)\n exp.delete()",
"def test_access_token(self):\n exp = self.factory.create(access_token='expired', expires_at=self.expired_dt)\n cur = self.factory.create(access_token=ACCESS_TOKEN, expires_at=self.current_dt)\n old = self.factory.create(access_token='old', expires_at=self.old_dt)\n with HTTMock(spark_cloud_mock):\n token = CloudCredentials.objects._access_token()\n self.assertEqual(token, ACCESS_TOKEN)\n CloudCredentials.objects.all().delete()",
"def test_legacy_client_expired_access_token(self):\n self.legacy_client._client._expires_at = 1\n response = self.legacy_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)",
"def refresh_token():\n current_user = get_jwt_identity()\n if current_user is None:\n return abort(401)\n response = deepcopy(AUTH_OKAY)\n response['payload']['access_token'] = create_access_token(\n identity=current_user,\n expires_delta=EXPIRY_DURATION\n )\n response['payload']['expires_in'] = EXPIRY_DURATION.seconds\n response['payload']['not_before'] = int(time() + EXPIRY_DURATION.seconds)\n return jsonify(response['payload']), response['status_code']",
"def test_reset_tenant_token_now(self):\n self._check_reset_token(invalidate=True)",
"async def test_invalid_token_expiry_in_config_entry(\n hass: HomeAssistant,\n component_setup: ComponentSetup,\n aioclient_mock: AiohttpClientMocker,\n) -> None:\n\n # The token is refreshed and new expiration values are returned\n expires_in = 86400\n expires_at = time.time() + expires_in\n aioclient_mock.post(\n \"https://oauth2.googleapis.com/token\",\n json={\n \"refresh_token\": \"some-refresh-token\",\n \"access_token\": \"some-updated-token\",\n \"expires_at\": expires_at,\n \"expires_in\": expires_in,\n },\n )\n\n assert await component_setup()\n\n # Verify token expiration values are updated\n entries = hass.config_entries.async_entries(DOMAIN)\n assert len(entries) == 1\n assert entries[0].state is ConfigEntryState.LOADED\n assert entries[0].data[\"token\"][\"access_token\"] == \"some-updated-token\"\n assert entries[0].data[\"token\"][\"expires_in\"] == expires_in",
"def testAuthorizationWithoutClientAuth(self):\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'refresh_token': self._VALID_REFRESH_TOKEN\n })\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result, NoClientAuthenticationError(),\n msg='Expected the token resource to reject a request without any authentication.')\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'refresh_token': self._VALID_REFRESH_TOKEN,\n 'client_id': self._VALID_CLIENT.id,\n })\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result, NoClientAuthenticationError(),\n msg='Expected the token resource to reject a request without client authentication.')",
"def test_reset_tenant_token_later(self):\n new_token, orig_token = self._check_reset_token(invalidate=False)\n self.assertEqual(new_token.previous, orig_token.valid)"
] | [
"0.75595343",
"0.74704844",
"0.7360822",
"0.7301422",
"0.7252117",
"0.72328687",
"0.72013396",
"0.7191407",
"0.71329045",
"0.7123395",
"0.71109587",
"0.7087008",
"0.7076181",
"0.70589894",
"0.70476675",
"0.69455695",
"0.6916871",
"0.6894391",
"0.6884534",
"0.68592453",
"0.6837294",
"0.68279415",
"0.6789943",
"0.67571753",
"0.6735102",
"0.6700794",
"0.6663033",
"0.66465235",
"0.66397697",
"0.66209316"
] | 0.76067364 | 0 |
This test ensures that an invalid refresh token can be converted into a valid access token. | def test_invalid_refresh_token(self):
content_type = 'application/x-www-form-urlencoded'
# Generate an auth and a refresh token.
resp_1 = self.app.post('/v1/openid/token',
params={
'refresh_token': 'invalid_refresh_token',
'grant_type': 'refresh_token'
},
content_type=content_type,
expect_errors=True)
# Assert that this is a correct response
self.assertEqual(401, resp_1.status_code)
self.assertIsNotNone(resp_1.json)
self.assertEqual('invalid_grant', resp_1.json['error']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_legacy_client_invalid_refresh_token_expired_access_token(self):\n self.legacy_client._client._expires_at = 1\n self.legacy_client.token['refresh_token'] = 'invalidrefreshtoken'\n response = self.legacy_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)",
"def test_legacy_client_invalid_refresh_token(self):\n self.legacy_client._client.access_token = 'invalidaccesstoken'\n self.legacy_client.token['refresh_token'] = 'invalidrefreshtoken'\n response = self.legacy_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)",
"def test_mail_client_invalid_refresh_token_expired_access_token(self):\n self.mail_client._client._expires_at = 1\n self.mail_client.token['refresh_token'] = 'invalidrefreshtoken'\n with self.assertRaises(InvalidGrantError):\n self.mail_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))",
"def test_mail_client_invalid_refresh_token(self):\n self.mail_client._client.access_token = 'invalidaccesstoken'\n self.mail_client.token['refresh_token'] = 'invalidrefreshtoken'\n with self.assertRaises(InvalidGrantError):\n self.mail_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))",
"def test_valid_refresh_token(self):\n\n # Generate a valid access code\n with base.HybridSessionManager():\n authorization_code = auth_api.authorization_code_save({\n 'user_id': 2,\n 'state': 'test_state',\n 'code': 'test_valid_code'\n })\n\n content_type = 'application/x-www-form-urlencoded'\n # Generate an auth and a refresh token.\n resp_1 = self.app.post('/v1/openid/token',\n params={\n 'code': authorization_code.code,\n 'grant_type': 'authorization_code'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a successful response\n self.assertEqual(200, resp_1.status_code)\n\n # Assert that the token came back in the response\n t1 = resp_1.json\n\n # Assert that both are in the database.\n with base.HybridSessionManager():\n access_token = \\\n token_api.access_token_get_by_token(t1['access_token'])\n self.assertIsNotNone(access_token)\n\n with base.HybridSessionManager():\n refresh_token = refresh_tokens.refresh_token_get_by_token(\n t1['refresh_token'])\n\n self.assertIsNotNone(refresh_token)\n\n content_type = 'application/x-www-form-urlencoded'\n # Issue a refresh token request.\n resp_2 = self.app.post('/v1/openid/token',\n params={\n 'refresh_token': t1['refresh_token'],\n 'grant_type': 'refresh_token'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that the response is good.\n self.assertEqual(200, resp_2.status_code)\n\n # Assert that the token came back in the response\n t2 = resp_2.json\n self.assertIsNotNone(t2['access_token'])\n self.assertIsNotNone(t2['expires_in'])\n self.assertIsNotNone(t2['id_token'])\n self.assertIsNotNone(t2['refresh_token'])\n self.assertIsNotNone(t2['token_type'])\n self.assertEqual('Bearer', t2['token_type'])\n\n # Assert that the access token is in the database\n with base.HybridSessionManager():\n new_access_token = \\\n token_api.access_token_get_by_token(t2['access_token'])\n self.assertIsNotNone(new_access_token)\n\n # Assert that system configured values is owned by the correct user.\n self.assertEqual(2, new_access_token.user_id)\n self.assertEqual(t2['id_token'], new_access_token.user_id)\n self.assertEqual(t2['expires_in'], CONF.oauth.access_token_ttl)\n self.assertEqual(t2['expires_in'], new_access_token.expires_in)\n self.assertEqual(t2['access_token'],\n new_access_token.access_token)\n\n # Assert that the refresh token is in the database\n\n with base.HybridSessionManager():\n new_refresh_token = refresh_tokens.refresh_token_get_by_token(\n t2['refresh_token'])\n\n self.assertIsNotNone(new_refresh_token)\n\n # Assert that system configured values is owned by the correct user.\n self.assertEqual(2, new_refresh_token.user_id)\n self.assertEqual(CONF.oauth.refresh_token_ttl,\n new_refresh_token.expires_in)\n self.assertEqual(t2['refresh_token'],\n new_refresh_token.refresh_token)\n\n # Assert that the old access tokens are no longer in the database and\n # have been cleaned up.\n\n with base.HybridSessionManager():\n no_access_token = \\\n token_api.access_token_get_by_token(t1['access_token'])\n with base.HybridSessionManager():\n no_refresh_token = \\\n refresh_tokens.refresh_token_get_by_token(t1['refresh_token'])\n\n self.assertIsNone(no_refresh_token)\n self.assertIsNone(no_access_token)",
"def testAuthorizationWrongClientSecret(self):\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'client_id': self._VALID_CLIENT.id,\n 'client_secret': 'invalidSecret',\n 'refresh_token': self._VALID_REFRESH_TOKEN\n })\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result, InvalidClientAuthenticationError(),\n msg='Expected the token resource to reject a request with an invalid client secret.')",
"def test_legacy_client_invalid_access_token(self):\n self.legacy_client._client.access_token = 'invalidaccesstoken'\n response = self.legacy_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)",
"def test_evicts_invalid_refresh_token():\n\n tenant_id = \"tenant-id\"\n client_id = \"client-id\"\n invalid_token = \"invalid-refresh-token\"\n\n cache = TokenCache()\n cache.add({\"response\": build_aad_response(uid=\"id1\", utid=\"tid1\", access_token=\"*\", refresh_token=invalid_token)})\n cache.add({\"response\": build_aad_response(uid=\"id2\", utid=\"tid2\", access_token=\"*\", refresh_token=\"...\")})\n assert len(cache.find(TokenCache.CredentialType.REFRESH_TOKEN)) == 2\n assert len(cache.find(TokenCache.CredentialType.REFRESH_TOKEN, query={\"secret\": invalid_token})) == 1\n\n def send(request, **_):\n assert request.data[\"refresh_token\"] == invalid_token\n return mock_response(json_payload={\"error\": \"invalid_grant\"}, status_code=400)\n\n transport = Mock(send=Mock(wraps=send))\n\n client = AadClient(tenant_id, client_id, transport=transport, cache=cache)\n with pytest.raises(ClientAuthenticationError):\n client.obtain_token_by_refresh_token(scopes=(\"scope\",), refresh_token=invalid_token)\n\n assert transport.send.call_count == 1\n assert len(cache.find(TokenCache.CredentialType.REFRESH_TOKEN)) == 1\n assert len(cache.find(TokenCache.CredentialType.REFRESH_TOKEN, query={\"secret\": invalid_token})) == 0",
"def test_jwt_refresh_with_expired_token(self):\n\n # We make sure that the refresh token is not in the window\n # allowed by the expiration delta. This is much easier using\n # freezegun.\n orig_iat = datetime.utcfromtimestamp(self.payload['orig_iat']) -\\\n settings.JWT_REFRESH_EXPIRATION_DELTA -\\\n timedelta(days=1)\n\n self.payload['orig_iat'] = timegm(orig_iat.utctimetuple())\n\n data = {\n 'token': utils.jwt_encode_handler(self.payload)\n }\n\n response = self.client.post(\n '/refresh-token/',\n json.dumps(data),\n content_type='application/json'\n )\n\n self.assertEqual(response.status_code, 400)",
"def test_raises_token_expired_when_applicable(self):\n\n badgr = self.get_badgr_setup()\n with vcr.use_cassette('tests/vcr_cassettes/no_valid_auth_token.yaml'):\n with self.assertRaises(exceptions.TokenAndRefreshExpiredError):\n badgr.get_from_server(self._sample_url)",
"def test_invalid_access_token(self):\n\n content_type = 'application/x-www-form-urlencoded'\n # POST with content: application/x-www-form-urlencoded\n response = self.app.post('/v1/openid/token',\n params={\n 'code': 'invalid_access_token',\n 'grant_type': 'invalid_grant_type'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a successful response\n self.assertEqual(400, response.status_code)\n self.assertIsNotNone(response.json)\n self.assertEqual('unsupported_grant_type', response.json['error'])\n self.assertEqual(e_msg.INVALID_TOKEN_GRANT_TYPE,\n response.json['error_description'])",
"def test_refreshes_token_when_expired(self):\n\n badgr = self.get_badgr_setup()\n\n # _token_data isn't meant to be exposed; pylint: disable=W0212\n original_token = badgr._token_data['access_token']\n with vcr.use_cassette('tests/vcr_cassettes/expired_auth_token.yaml'):\n badgr.get_from_server(self._sample_url)\n self.assertNotEqual(original_token,\n badgr._token_data['access_token'])",
"async def test_expired_token_refresh_internal_error(\n hass: HomeAssistant,\n component_setup: ComponentSetup,\n aioclient_mock: AiohttpClientMocker,\n) -> None:\n\n aioclient_mock.post(\n \"https://oauth2.googleapis.com/token\",\n status=http.HTTPStatus.INTERNAL_SERVER_ERROR,\n )\n\n await component_setup()\n\n entries = hass.config_entries.async_entries(DOMAIN)\n assert len(entries) == 1\n assert entries[0].state is ConfigEntryState.SETUP_RETRY",
"def test_valid_access_token_time(self):\n\n # Store the old TZ info, if it exists.\n old_tz = None\n if 'TZ' in os.environ:\n old_tz = os.environ['TZ']\n\n # Convert now into every possible timezone out there :)\n for name in self.tested_timezones:\n\n # Override the 'default timezone' for the current runtime.\n os.environ['TZ'] = name\n\n # Create a token.\n with base.HybridSessionManager():\n authorization_code = auth_api.authorization_code_save({\n 'user_id': 2,\n 'state': 'test_state',\n 'code': 'test_valid_code',\n 'expires_in': 300\n })\n\n content_type = 'application/x-www-form-urlencoded'\n response = self.app.post('/v1/openid/token',\n params={\n 'code': authorization_code.code,\n 'grant_type': 'authorization_code'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a valid call.\n self.assertEqual(200, response.status_code)\n\n # Reset the timezone.\n if old_tz:\n os.environ['TZ'] = old_tz\n else:\n del os.environ['TZ']",
"def test_expired_access_token_time(self):\n\n expired = datetime.datetime.now(pytz.utc) - datetime.timedelta(\n minutes=6)\n\n # Store the old TZ info, if it exists.\n old_tz = None\n if 'TZ' in os.environ:\n old_tz = os.environ['TZ']\n\n # Convert now into every possible timezone out there :)\n for name in self.tested_timezones:\n\n # Override the 'default timezone' for the current runtime.\n os.environ['TZ'] = name\n\n # Create a token.\n with base.HybridSessionManager():\n authorization_code = auth_api.authorization_code_save({\n 'user_id': 2,\n 'state': 'test_state',\n 'code': 'test_valid_code',\n 'expires_in': 300,\n 'created_at': expired\n })\n\n content_type = 'application/x-www-form-urlencoded'\n # POST with content: application/x-www-form-urlencoded\n response = self.app.post('/v1/openid/token',\n params={\n 'code': authorization_code.code,\n 'grant_type': 'authorization_code'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a valid call.\n self.assertEqual(401, response.status_code)\n\n # Reset the timezone.\n if old_tz:\n os.environ['TZ'] = old_tz\n else:\n del os.environ['TZ']",
"def testAuthorizationInvalidClientId(self):\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'client_id': 'invalidClientId',\n 'client_secret': self._VALID_CLIENT.secret,\n 'refresh_token': self._VALID_REFRESH_TOKEN\n })\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result, InvalidClientIdError(),\n msg='Expected the token resource to reject a request with an invalid client id.')",
"def test_replace_o_auth_access_token(self):\n pass",
"def testInvalidContentType(self):\n request = MockRequest('POST', 'token', arguments={\n 'grant_type': 'refresh_token',\n 'refresh_token': self._VALID_REFRESH_TOKEN\n })\n request.setRequestHeader('Content-Type', 'application/not-x-www-form-urlencoded')\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result,\n MalformedRequestError('The Content-Type must be \"application/x-www-form-urlencoded\"'),\n msg='Expected the token resource to reject a request with an invalid content type.')",
"def test_authtoken_refresh(self):\n hagrid = models.User(username='hagrid', fullname='Rubeus Hagrid')\n auth_token = models.AuthToken(user=hagrid, algorithm='hmac-sha-1')\n existing_token = auth_token.token\n existing_secret = auth_token.secret\n auth_token.refresh()\n self.assertNotEqual(existing_token, auth_token.token)\n self.assertNotEqual(existing_secret, auth_token.secret)",
"def test_legacy_client_expired_access_token(self):\n self.legacy_client._client._expires_at = 1\n response = self.legacy_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)",
"def test_patch_o_auth_access_token(self):\n pass",
"def testAuthorizationWrongClientSecretInHeader(self):\n client = getTestPasswordClient(self._VALID_CLIENT.id)\n client.secret = 'invalidSecret'\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'refresh_token': self._VALID_REFRESH_TOKEN\n })\n self._addAuthenticationToRequestHeader(request, client)\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result, InvalidClientAuthenticationError(),\n msg='Expected the token resource to reject a request with an invalid client secret.')",
"def test_reset_passwd_bad_token(self, test_client):\n response = test_client.post('/api/auth/reset', json=dict(\n reset_password_token=str(\n create_access_token(identity=UserModel(uuid=uuid.uuid4()))),\n password=\"Azerty!123\"))\n res = json.loads(response.data)\n\n assert response.status_code == 401\n assert res['status'] == False",
"def testAuthorizationWithoutClientAuth(self):\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'refresh_token': self._VALID_REFRESH_TOKEN\n })\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result, NoClientAuthenticationError(),\n msg='Expected the token resource to reject a request without any authentication.')\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'refresh_token': self._VALID_REFRESH_TOKEN,\n 'client_id': self._VALID_CLIENT.id,\n })\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result, NoClientAuthenticationError(),\n msg='Expected the token resource to reject a request without client authentication.')",
"async def test_expired_token_requires_reauth(\n hass: HomeAssistant,\n component_setup: ComponentSetup,\n aioclient_mock: AiohttpClientMocker,\n) -> None:\n\n aioclient_mock.post(\n \"https://oauth2.googleapis.com/token\",\n status=http.HTTPStatus.BAD_REQUEST,\n )\n\n await component_setup()\n\n entries = hass.config_entries.async_entries(DOMAIN)\n assert len(entries) == 1\n assert entries[0].state is ConfigEntryState.SETUP_ERROR\n\n flows = hass.config_entries.flow.async_progress()\n assert len(flows) == 1\n assert flows[0][\"step_id\"] == \"reauth_confirm\"",
"def testAuthorizationMalformedClientSecret(self):\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'client_id': self._VALID_CLIENT.id,\n 'client_secret': b'malformedSecret\\xFF\\xFF',\n 'refresh_token': self._VALID_REFRESH_TOKEN\n })\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result, MalformedParameterError('client_secret'),\n msg='Expected the token resource to reject a request with a malformed client secret.')",
"def test_authtoken_is_valid(self):\n auth_client = self.fixtures.auth_client\n # scenario 1: when validity is unlimited (0)\n tomriddle = models.User(username='voldemort', fullname='Tom Riddle')\n scope = ['id', 'email']\n tomriddle_token = models.AuthToken(\n auth_client=auth_client, user=tomriddle, scope=scope, validity=0\n )\n self.assertTrue(tomriddle_token.is_valid())\n\n # scenario 2: when validity has not been given\n draco = models.User(username='draco', fullname='Draco Malfoy')\n draco_token = models.AuthToken(auth_client=auth_client, user=draco, scope=scope)\n with self.assertRaises(TypeError):\n draco_token.is_valid()\n\n # scenario 3: when validity is limited\n harry = models.User(username='harry', fullname='Harry Potter')\n harry_token = models.AuthToken(\n auth_client=auth_client,\n user=harry,\n scope=scope,\n validity=3600,\n created_at=utcnow(),\n )\n self.assertTrue(harry_token.is_valid())\n\n # scenario 4: when validity is limited *and* the token has expired\n cedric = models.User(username='cedric', fullname='Cedric Diggory')\n cedric_token = models.AuthToken(\n auth_client=auth_client,\n user=cedric,\n scope=scope,\n validity=1,\n created_at=utcnow() - timedelta(1),\n )\n self.assertFalse(cedric_token.is_valid())",
"def test_authenticate_refresh(app, client, session, models):\n user = models[\"user\"][0]\n # Authenticate to receive a refresh token\n response = client.post(\n \"/authenticate/local\",\n data={\"email\": user.email, \"password\": \"hunter2\"},\n )\n refresh_token = json.loads(response.data)[\"refresh_token\"]\n\n # Check that token values are as expected\n assert len(refresh_token[\"val\"]) == 64\n assert datetime.fromtimestamp(refresh_token[\"exp\"]) > datetime.now()\n assert datetime.fromtimestamp(refresh_token[\"exp\"]) < (\n datetime.now() + app.config[\"REFRESH_TOKEN_VALIDITY\"]\n )\n\n # Check that the returned token is now stored in the database\n assert refresh_token[\"val\"] == user.refresh_tokens[0].token\n\n # Expect refreshing token to succeed\n response = client.post(\n \"/refresh\", data={\"refresh_token\": refresh_token[\"val\"]}\n )\n assert response.status_code == 200\n raw_jwt_token = json.loads(response.data)[\"jwt\"]\n\n # Expect that the new claims are equal to the user claims, except for the\n # expiry which will have refreshed\n refresh_claims = jwt.decode(\n raw_jwt_token, app.config[\"RSA_PUBLIC_KEY\"], app.config[\"ALGORITHM\"],\n )\n del refresh_claims[\"exp\"]\n assert user.claims == refresh_claims\n\n # Expect refreshing an expired token to fail\n token = user.refresh_tokens[0]\n token.expiry = datetime.now() - timedelta(seconds=1)\n response = client.post(\"/refresh\", data={\"refresh_token\": token.token})\n assert response.status_code == 401",
"def test_mail_client_invalid_access_token(self):\n self.mail_client._client.access_token = 'invalidaccesstoken'\n response = self.mail_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)",
"def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):\n log.debug(\"Validating refresh token\")\n token = self._tokengetter(refresh_token=refresh_token)\n\n if token and token.client_id == client.client_id:\n # Make sure the request object contains user and client_id\n request.client_id = token.client_id\n request.user = token.user\n return True\n return False"
] | [
"0.77855164",
"0.7776032",
"0.76719224",
"0.76533115",
"0.73654115",
"0.7338942",
"0.7290583",
"0.7267676",
"0.7238647",
"0.71473265",
"0.7119978",
"0.7110751",
"0.70527965",
"0.7029916",
"0.69989884",
"0.69802237",
"0.69181126",
"0.6860301",
"0.68478835",
"0.6846695",
"0.6826242",
"0.68199736",
"0.67960715",
"0.6790984",
"0.6771769",
"0.6769646",
"0.6709578",
"0.6679941",
"0.6669763",
"0.6668761"
] | 0.8209073 | 0 |
Test retrieving all players | def test_retrieve_players(self):
Player.objects.create(name='Mayita', victories=0,
defeats=0)
Player.objects.create(name='Moiso', victories=0,
defeats=0)
res = self.client.get(PLAYERS_URL)
players = Player.objects.all().order_by('-name')
serializer = PlayerSerializer(players, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_gridironfootballplayers_get(self):\n pass",
"def test_get_player(self):\n pass",
"def test_player_index(self):\n player = Player(first_name='George', last_name='Smith')\n player.save()\n response = self.client.get(reverse('players'))\n self.assertQuerysetEqual(response.context['players'], [])",
"def get_players(self, all=False):\n if all:\n return self.all_players\n else:\n return self.players",
"def test_lacrosseplayers_get(self):\n query_string = [('label', 'label_example'),\n ('page', 1),\n ('per_page', 100)]\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/lacrosseplayers',\n method='GET',\n headers=headers,\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"async def get_players(self):\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/leaderboard/3v3?locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n output = {}\r\n for player in range(0, 965):\r\n output[int(player)] = data['rows'][player]\r\n with open('Pvp_Players.json', 'w') as pvp_players:\r\n json.dump(output, pvp_players)\r\n return output",
"def test_plays_get(self):\n pass",
"def get_all_game_players(self):\n return GamePlayer.objects.filter(game=self)",
"def players():\n try:\n return template('players.html', players=SERVER.players.values())\n except RoboBattleshipException as e:\n return JsonResponse.error(e)\n except:\n LOG.exception(\"Failed to show a list of all registered players on the \"\n \"server\")\n return JsonResponse.error(101)",
"def test_get_player_upcoming_chests(self):\n pass",
"def playerStandings():\n\n getPlayers = \"SELECT id, name, wins, matches FROM playerstats ORDER BY wins DESC\"\n players = executeQuery({'dbname': 'tournament', 'query' : getPlayers, 'type' : 'find'})\n return players",
"def test_gridironfootballplayers_id_get(self):\n pass",
"def get_all_players(self):\n\n self._logger.debug(\"Getting player list\")\n\n try:\n self.check_if_db_connected()\n cursor = self._db_conn.cursor()\n cursor.execute(\"SELECT first_name, last_name, nickname, time FROM player \\\n ORDER BY time DESC\")\n players = cursor.fetchall()\n\n except MySQLdb.OperationalError:\n self._logger.error(\"MySQL operational error occured\")\n traceback.print_exc()\n raise exceptions.DBConnectionError(\"Cannot connect to MySQL server\")\n\n except MySQLdb.ProgrammingError:\n self._logger.error(\"MySQL programming error\")\n traceback.print_exc()\n raise exceptions.DBSyntaxError(\"MySQL syntax error\")\n\n else:\n return players",
"def players_list(self):\n self.db = TinyDB('Models/db.json')\n self.query = Query()\n player_table = self.db.table('player_table')\n return player_table",
"def player_list():\n page = request.args.get(\"page\", \"1\")\n count = request.args.get(\"count\", \"12\")\n team_id = request.args.get(\"team_id\")\n\n if not team_id:\n raise BadRequest(\"Nama team tidak boleh kosong\")\n\n # type conversion\n page = int(page)\n count = int(count)\n team_id = int(team_id)\n\n player = player_ctrl.get_list(page=page, count=count, team_id=team_id)\n\n response = {\n \"status\": 200 if player.items != [] else 204,\n \"has_next\": player.has_next,\n \"has_prev\": player.has_prev,\n \"total\": player.total,\n \"result\": _entity_player_list(player.items)\n }\n\n return jsonify(response)",
"def players(self, game: str) -> Response:\n\n endpoint = '/api/players'\n query = f'?game={game}'\n return self.fetch(endpoint, query)",
"def players(self):\n return Player.objects.filter(team=self)",
"def test_get_all_for_team(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n user = User.create(name='foo', email='[email protected]',\n owned_teams=[team.uid])\n user.put()\n response = self.testapp.get(\n '/api/teams/{}/users'.format(team.uid),\n headers=self.login_headers(user),\n )\n response_list = json.loads(response.body)\n self.assertEqual(len(response_list), 1)",
"def fetch_players_stats():\n players_scraper = PlayerStatsScraper(API_URL, API_HEADERS)\n result = players_scraper.save_objects()\n return result",
"def test_get_list_teams(self):\n args = {\n 'name': 'test team',\n 'capacity': '11',\n 'number_players': '6',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n }\n team = Team(args)\n db.session.add(team)\n db.session.commit()\n response = self.client.get('/teams')\n self.assertEqual(response.status_code, 200)\n self.assertIn(b'test team', response.data)",
"def show_players_specific_tournament(self) -> None:\n id_choice = check.request_id(TOURNAMENTS)\n tournament_data = TOURNAMENTS.get(doc_id=id_choice)\n if tournament_data.get(\"players\") == {}:\n print(\"\\n This tournaments has no players yet\")\n else:\n players_list = tournament_data.get(\"players\")\n deserialized_player_list = []\n for player_data in players_list:\n deserialized_player = Player(**json.loads(player_data))\n deserialized_player_list.append(deserialized_player)\n utils.clear_terminal()\n print(\n \"Do you want the list of players by alphabetical order or by ranking ? \\n\"\n \"1 - Ranking players list \\n\"\n \"2 - Alphabetical players list\"\n )\n choice = check.request_selection_with_number(\"alphabetical\", \"ranking\", \"None\")\n if choice == \"alphabetical\":\n utils.clear_terminal()\n deserialized_player_list = sorted(deserialized_player_list, key=lambda player: player.first_name)\n for deserialized_player in deserialized_player_list:\n print(deserialized_player)\n elif choice == \"ranking\":\n utils.clear_terminal()\n deserialized_player_list = sorted(deserialized_player_list, key=lambda player: player.ranking)\n for deserialized_player in deserialized_player_list:\n print(deserialized_player)",
"def players(db):\n players = [PlayerFactory(), PlayerFactory()]\n db.session.commit()\n return players",
"def test_response_for_getting_all_users(self):\n response = self.client.get(\"/team/all/\", format='json')\n self.assertEqual(response.status_code, 200)",
"def get_players():\n nfl_players = redis_cache('nfl_players_key', NFL_Player_2015.query.all)\n return nfl_players",
"def getPlayers(self):\n return iter(self.players)",
"def show_players(self) -> None:\n players_list = []\n for player in PLAYERS:\n data_player = ((\n str(player.get(\"first_name\")) + \" \" +\n str(player.get(\"last_name\")) + \" | \" +\n str(player.get(\"birthday\")) + \" | \" +\n str(player.get(\"genre\")) + \" | \" +\n str(player.get(\"ranking\"))\n ))\n players_list.append(data_player)\n utils.clear_terminal()\n print(\n \"Do you want the list of players by alphabetical order or by ranking ? \\n\"\n \"1 - Ranking players list \\n\"\n \"2 - Alphabetical players list\"\n )\n choice = check.request_selection_with_number(\"ranking\", \"alphabetical\", \"None\")\n if choice == \"ranking\":\n player_id = 0\n players_list = sorted(players_list, key=lambda player: players_list[4])\n utils.clear_terminal()\n print(\"==========================================\")\n print(\"List of all Players in ranking order : \")\n print(\"==========================================\")\n for player in players_list:\n player_id += 1\n print(str(player_id) + \" : \" + player)\n elif choice == \"alphabetical\":\n player_id = 0\n players_list.sort()\n utils.clear_terminal()\n print(\"============================================\")\n print(\"List of all Players in alphabetical order : \")\n print(\"============================================\")\n for player in players_list:\n player_id += 1\n print(str(player_id) + \" : \" + player)",
"def get_player_list():\r\n return list(\r\n pymongo.MongoClient('mongodb://localhost:27017/')['wows']['na_player_list'].find( # !!!!!!!!!!!!!!!!!!!!!!!!!\r\n {'scraped': False}, {'_id': 0, 'player_id': 1, 'player_name': 1, 'clan': 1}\r\n )\r\n )",
"def fixture_player_stats(self):\n stats_list = []\n fixture_tuples = []\n fixture_player_ids = self.load_fixture_player_stats()\n i = 0\n for fixture in fixture_player_ids:\n for fixture_id, value in fixture.items():\n if value:\n for player_id in value:\n fixture_tuples.append((fixture_id, player_id))\n print(\"Getting player info for all fixtures..\")\n with Pool(self.pool) as p:\n fixture_stats = list(tqdm(p.imap(self.fixture_player_stats_singel_wrapper, fixture_tuples, chunksize=1), total=len(fixture_tuples)))\n for fixture in fixture_stats:\n if fixture:\n stats_list.append(fixture)\n else:\n i += 1\n print('Completed')\n if i >0:\n print(f'{i} games retreived had no stats')\n self.save_completed('player_fixture', stats_list, StorageConfig.STATS_DIR)",
"def get_contracted_players(self, team):\n # setting up empty list of players\n players = list()\n\n # getting html document with team's contracted players\n doc = self.get_html_document(team, 'contracts')\n\n # returning empty list if no system page could be found\n if doc is None:\n return players\n\n # collecting player names and links to capfriendly pages for different\n # player groups\n cf_links = doc.xpath(\n \"//table[@id='team']/tr[@class='column_head c']/td/parent::tr/following-sibling::tr/td[1]/a/@href\")\n cf_names = doc.xpath(\n \"//table[@id='team']/tr[@class='column_head c']/td/parent::tr/following-sibling::tr/td[1]/a/text()\")\n\n for lnk, name in zip(cf_links, cf_names):\n # retrieving capfriendly id from player page link\n cf_id = lnk.split(\"/\")[-1]\n # trying to find player in database\n plr = Player.find_by_capfriendly_id(cf_id)\n # trying to find player using suggestions\n if plr is None:\n last_name, first_name = name.split(\", \")\n suggested_players = self.get_suggested_players(\n last_name, first_name)\n for suggested_player in suggested_players:\n (\n sugg_plr_id, sugg_pos,\n sugg_last_name, sugg_first_name, _\n ) = (\n suggested_player\n )\n if (last_name, first_name) == (\n sugg_last_name, sugg_first_name):\n plr = Player.find_by_id(sugg_plr_id)\n if plr is None:\n plr = self.create_player(\n sugg_plr_id, last_name, first_name, sugg_pos)\n\n if plr is None:\n print(\"Unable to find player with name %s\" % name)\n else:\n players.append(plr)\n\n return players",
"def test_add_players(self):\n campaign = self.campaign\n\n campaign.players.add(self.player.id)\n campaign.players.add(self.gm.id)\n\n self.assertQuerysetEqual(campaign.players.all().order_by(\"username\"), [self.gm, self.player], transform=lambda x: x)"
] | [
"0.7851354",
"0.7392498",
"0.7249521",
"0.7195039",
"0.7023699",
"0.69242185",
"0.6841736",
"0.68325335",
"0.6792121",
"0.6791838",
"0.67904574",
"0.6744118",
"0.67069584",
"0.6672156",
"0.6652354",
"0.6647714",
"0.6608296",
"0.6586247",
"0.6585509",
"0.6544839",
"0.6533784",
"0.6521989",
"0.65092623",
"0.64960706",
"0.6495668",
"0.6458064",
"0.6441166",
"0.6431241",
"0.64286643",
"0.6386324"
] | 0.80052656 | 0 |
Test creating a new player | def test_create_player_successful(self):
payload = {'name': 'Mayita', 'victories': 0, 'defeats': 0}
self.client.post(PLAYERS_URL, payload)
print('PLAYERS_URL: ',PLAYERS_URL)
exists = Player.objects.filter(
name=payload['name']
).exists()
self.assertTrue(exists) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_new_player(new_player, new_room):\n\n try:\n uuid.UUID(str(new_player.id), version=4)\n except ValueError:\n raise ValueError('new_player id is not valid uuid4')\n assert new_player.order_of_turn == 1\n assert new_player.score == 0\n assert new_player.token_presence is False\n assert new_player.username == 'Victor'\n assert new_player.ready is False\n assert new_player.playing is False\n assert new_player.surrender is False\n assert new_player.dice_has_rolled is False\n assert json.loads(new_player.last_dice_values) == [0, 0]\n assert new_player.room_id == new_room.id",
"def test_create_player(self):\n self.assertIsInstance(self.player, ship.Ship)\n self.assertEqual(self.player.position, constants.PLAYER_START_PLACE)\n self.assertEqual(self.player.width, constants.PLAYER_WIDTH)\n self.assertEqual(self.player.height, constants.PLAYER_HEIGHT)\n self.assertEqual(self.player.img, constants.PLAYER_IMG)\n self.assertEqual(self.player.health, constants.PLAYER_HEALTH)",
"def test_get_player(self):\n pass",
"def test_player_created(self):\n res = self.client().post('api/v1/players/new', headers={'Content-Type': 'application/json'}, data=json.dumps(self.player))\n json_data = json.loads(res.data)\n self.assertTrue(json_data.get('jwt_token'))\n self.assertEqual(res.status_code, 201)",
"def newPlayer():\r\n pass",
"def test_create_player_invalid(self):\n payload = {'name': ''}\n res = self.client.post(PLAYERS_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_create_team(self):\n pass",
"def test_add_player(self):\n user = User.objects.create_user('Luca', '[email protected]', 'bongo_cat')\n profile = models.Profile.objects.get(user=user)\n game = models.Game.objects.all()[0]\n number_of_piles = len(settings.HYDROCARBON_STOCKS_PER_PLAYER)\n for i_pile in range(number_of_piles):\n self.assertEqual(game.hydrocarbon_piles.get(index=i_pile).stock_amount, 0)\n game.add_player(profile=profile)\n for i_pile in range(number_of_piles):\n self.assertEqual(game.hydrocarbon_piles.get(index=i_pile).stock_amount,\n settings.HYDROCARBON_STOCKS_PER_PLAYER[i_pile][0])",
"def test_new(self):\n obj = Game.new(self._creator, self._ds)\n self.assertIsInstance(obj, Game, \"Game instance not initialized.\")\n self.assertHasAttribute(obj, 'uid', \"Game has no unique ID.\")\n self.assertHasAttributes(obj, [\n 'players', 'spectators', 'state', 'points', 'options', 'table'])\n self.assertIsCREATED(obj)",
"def create_player(self, request):\n if request.player_name:\n if Player.query(Player.name == request.player_name).get():\n raise endpoints.ConflictException(\n 'A User with that name already exists!')\n else:\n raise endpoints.BadRequestException('verify the name that you are sending in the request')\n if request.email:\n if gameutils.get_regex(request.email) == None:\n print(' ERROR - invalid email, please try again')\n raise endpoints.ConflictException(\n 'invalid email, please try again!')\n else:\n raise endpoints.BadRequestException('verify the email that you are sending in the request')\n\n player = Player(name=request.player_name, email=request.email)\n player.put()\n\n return StringMessage(message='Player created!'.format(request.player_name))",
"def create_player(id_player: str):\n id_player = str(id_player)\n last_name = input(\"Last name of the player : \")\n first_name = input(\"First name of the player : \")\n birthday = input(\"Birthday of the player : \")\n sex = input(\"Sex of the player : \")\n elo = int(input(\"Elo of the player: \"))\n\n if not Player.get(id_player):\n Player(id_player, last_name, first_name, birthday, sex, elo)\n else:\n raise Exception(f\"The ID {id_player} already exists : {Player.get(id_player)}\")",
"def test_edit_player_profile (self):\n self.view_path = 'accounts_edit_player_profile'\n self.template_name = 'accounts/edit_player_profile.html'\n self._test_existance_and_correct_template (login_info={'username': self.T_PLAYER['username'],\n 'password': self.T_PLAYER['password']})\n self._test_only_player_has_access ( )\n #\n # test displayed data is correct\n #\n resp = self.client.get (reverse (self.view_path))\n form = resp.context[-1]['form']\n self.assertContains (resp, self.player.user.first_name, 2)\n self.assertContains (resp, self.player.user.last_name, 2)\n self.assertEquals (form.initial['level'], self.player.level)\n self.assertEquals (form.initial['male'], self.player.male)\n self.assertEquals (form.initial['right_handed'], self.player.right_handed)\n #\n # test data is correctly saved\n #\n self.T_PLAYER['first_name'] = random_ascii_string (form.fields['first_name'].max_length)\n self.T_PLAYER['last_name'] = random_ascii_string (form.fields['last_name'].max_length)\n self.T_PLAYER['level'] = random.choice (PlayerProfile.LEVELS)[0]\n self.T_PLAYER['male'] = random.randint (1, 2) % 2 == 0\n self.T_PLAYER['right_handed'] = random.randint (1, 9) % 3 == 0\n \n resp = self._test_model_instance_save (self.player.user, self.T_PLAYER, \n ('first_name', 'last_name'))\n self._test_model_instance_save (self.player, self.T_PLAYER,\n ('level', 'male', 'right_handed'))\n self.assertContains (resp, self.T_PLAYER['first_name'], 2)\n self.assertContains (resp, self.T_PLAYER['last_name'], 2)",
"def test_teams_create(self):\n pass",
"def post(self):\n args = player_parser.parse_args()\n print(args)\n unique_player = DBPlayer.query.filter_by(nickname=args['nickname']).first()\n if unique_player:\n return get_response(409, 'player already existed!')\n try:\n new_player = DBPlayer(**args)\n db.session.add(new_player)\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n return get_response(400, \"{e}\".format(e=str(e)))\n return get_response(201, 'done!')",
"def create_player(self,player_name, attr = None, team_file = None):\n player_first, player_last = player_name.split(\" \")\n player_file = player_name.replace(\" \", \"\") + '.json'\n if(os.path.exists(self.player_path + player_file)):\n return(False)\n else:\n with open(self.player_path + player_file, 'x') as new_file:\n with open(self.player_template_path, 'r') as template:\n data = json.load(template)\n data['player_name'] = player_first + ' ' + player_last\n json.dump(data, new_file)\n template.close()\n new_file.close()\n\n\n if attr: # If the user inputed new data, add the data, else use template\n try:\n self.update_player_attribute(player_file, attr)\n except:\n os.remove(player_file)\n\n if team_file: #if the user selected a team, add the player to the team\n self.add_team_player(team_file, player_file)\n\n return(True)",
"def create_player (self, username = None):\n # Get unique username if needed\n if (username == None):\n username = \"default_username\" + str (time.time ())\n self.username = username\n r = requests.post (self.url_endpoint, data = {\"new_player\": self.username})\n if (r.status_code != 201):\n print (\"Failed to create user:\\n\", r.text)\n return r\n play_data = json.loads (r.text)\n self.secret = play_data['player_secret']\n with open (self.filename, \"w\") as f:\n f.write (f\"username {self.username}\\nsecret {self.secret}\")",
"def create_player():\n\n\t#TODO : Ajout d'une BDD des différents joueurs avec des scores et vérifier la présence des joueurs choisis dans cette BDD pour charger les scores\n\n\tactivator = ''\n\tinhibitor = ''\n\n\tprint(\"\\nEntrez le pseudo du joueur\",colors.GREEN + \"'Activator' : \" + colors.STOP, end = \"\")\n\tactivator = input()\n\n\tprint(\"\\nEntrez le pseudo du joueur\", colors.RED + \"'Inhibitor' : \"+colors.STOP, end = \"\")\n\tinhibitor = input()\n\n\t# Default usernames if not defined by users\n\tif len(activator) == 0:\n\t\tactivator = 'Activator'\n\n\tif len(inhibitor) == 0:\n\t\tinhibitor = 'Inhibitor'\n\n\t# Attribute to each player the status he chose\n\tData.current_player['Activator'] = activator\n\tData.current_player['Inhibitor'] = inhibitor\n\n\treturn activator, inhibitor",
"def setUp(self):\n self.player = Player()",
"def create_existing_player():\n logic_test = True\n data = \"\"\n while logic_test:\n try:\n player_choice = view.select_player_view(select_players())\n data = select_players()[player_choice]\n logic_test = False\n except IndexError as error:\n view.show(error)\n continue\n return data",
"def test_new(self):\n result = self.client.get('/new-game')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'New Game', result.data)",
"def setUp(self):\n\n url = \"/register\"\n data = {\n \"username\": \"RyanBeidenTest\",\n \"password\": \"test123!\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Ryan\",\n \"last_name\": \"Beiden\",\n }\n\n response = self.client.post(url, data, format='json')\n json_response = json.loads(response.content)\n self.token = json_response['token']\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n # Create a Player Instance\n player = Player()\n player.user = User.objects.get(id=json_response['user_id'])\n player.save()\n\n # Create a Game Instance\n game = Game()\n game.title = \"Monopoly\"\n game.description = \"A super good board game.\"\n game.designer = \"Joe Smith\"\n game.year_released = \"1996-01-01\"\n game.est_time_to_play = 25\n game.num_of_players = 12\n game.age_rec = 12\n game.image_url = \"\"\n game.player = player\n game.save()",
"def test_player_creator():\n filename = os.path.abspath(\"data/data.csv\")\n creator = PlayerCreator(filename)\n creator.parse_csv()\n assert isinstance(creator.csv_data, pd.DataFrame)\n tuples = creator.create_object_tuples()\n assert isinstance(tuples, list)\n creator.create_players(tuples[:100], 10)",
"def setUp (self):\n self._create_club ( )\n self._create_player ( )",
"def test_create_a_pet(self):\n pet = Pet(0, \"fido\", \"dog\", False)\n self.assertNotEqual(pet, None)\n self.assertEqual(pet.id, 0)\n self.assertEqual(pet.name, \"fido\")\n self.assertEqual(pet.category, \"dog\")\n self.assertEqual(pet.available, False)",
"def test_add_players(self):\n campaign = self.campaign\n\n campaign.players.add(self.player.id)\n campaign.players.add(self.gm.id)\n\n self.assertQuerysetEqual(campaign.players.all().order_by(\"username\"), [self.gm, self.player], transform=lambda x: x)",
"def test_create(self):\n pass",
"def test_add_team_member(self):\n pass",
"def test_createteam(self):\n p1, p2, p3 = self.create3persons()\n t = model.Team(name='Tigers', persons=[p1, p2, p3])\n id = t.store()\n t2 = model.Team(id=id)\n self.assertEqual(t.name, t2.name)\n self.assertEqual(t.persons, t2.persons)",
"def createPlayer(self):\n sw, ne = self.playerCreationRectangle\n x = self.random.randrange(sw.x, ne.x)\n y = 1.0\n z = self.random.randrange(sw.y, ne.y)\n player = Player(Vector(x, y, z), 2, self.seconds)\n for observer in self.observers:\n observer.playerCreated(player)\n self.players.append(player)\n return player",
"def create_player(player: Player) -> None:\n with engine.connect() as conn:\n\n conn.execute(\n player_table.insert().values(\n steamid=player.steamid,\n level=player.level,\n xp=player.xp,\n credits=player.credits,\n )\n )\n\n skills = list(player.skills)\n result = conn.execute(\n skill_table.insert().values([\n {\n 'key': skill.key,\n 'level': skill.level,\n 'steamid': player.steamid,\n }\n for skill in skills\n ])\n )\n\n for id, skill in zip(result.inserted_primary_key, skills):\n skill._db_id = id"
] | [
"0.78461397",
"0.78237635",
"0.7688674",
"0.76077133",
"0.76061386",
"0.75966895",
"0.7172569",
"0.711341",
"0.6966639",
"0.6960186",
"0.6928948",
"0.6839228",
"0.67893827",
"0.6780713",
"0.67705965",
"0.67641246",
"0.6749246",
"0.6720765",
"0.66819185",
"0.6681674",
"0.6677593",
"0.66576844",
"0.65566605",
"0.6511953",
"0.649556",
"0.64937204",
"0.6453895",
"0.6410434",
"0.6406178",
"0.64060557"
] | 0.8110652 | 0 |
Test creating a new player with invalid payload | def test_create_player_invalid(self):
payload = {'name': ''}
res = self.client.post(PLAYERS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_player_created(self):\n res = self.client().post('api/v1/players/new', headers={'Content-Type': 'application/json'}, data=json.dumps(self.player))\n json_data = json.loads(res.data)\n self.assertTrue(json_data.get('jwt_token'))\n self.assertEqual(res.status_code, 201)",
"def test_create_player_successful(self):\n payload = {'name': 'Mayita', 'victories': 0, 'defeats': 0}\n self.client.post(PLAYERS_URL, payload)\n\n print('PLAYERS_URL: ',PLAYERS_URL)\n exists = Player.objects.filter(\n name=payload['name']\n ).exists()\n self.assertTrue(exists)",
"def test_new_player(new_player, new_room):\n\n try:\n uuid.UUID(str(new_player.id), version=4)\n except ValueError:\n raise ValueError('new_player id is not valid uuid4')\n assert new_player.order_of_turn == 1\n assert new_player.score == 0\n assert new_player.token_presence is False\n assert new_player.username == 'Victor'\n assert new_player.ready is False\n assert new_player.playing is False\n assert new_player.surrender is False\n assert new_player.dice_has_rolled is False\n assert json.loads(new_player.last_dice_values) == [0, 0]\n assert new_player.room_id == new_room.id",
"def test_malformed_player(self):\n board = Board()\n player1 = MalformedDataPlayer()\n player_guard1 = PlayerGuard(player1, timeout=3)\n\n p1id = uuid.uuid4()\n\n player_guard1.set_id(uuid.uuid4())\n\n self.assertRaises(PlayerMalformedData, player_guard1.place_worker, board)\n self.assertRaises(PlayerMalformedData, player_guard1.play_turn, board)",
"def test_name_must_be_present(self):\n response = self.client.post(url_for('teams'),\n data={\n 'capacity': 10,\n 'number_players': 6,\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n })\n self.assertEqual(response.status_code, 400)",
"def test_player_number_cannot_be_empty(self):\n with self.assertRaises(Exception) as context:\n self.client.post(\n url_for('teams'),\n data={\n 'name': 'team',\n 'capacity': '5',\n 'number_players': 'hello',\n 'pitch_postcode': 'E1 6LT',\n 'time': '2019-01-01 13:00'\n }\n )\n self.assertTrue('Number players must be a number' in context.exception)\n self.assertEqual(db.session.query(Team).count(), 0)",
"def test_create_player(self):\n self.assertIsInstance(self.player, ship.Ship)\n self.assertEqual(self.player.position, constants.PLAYER_START_PLACE)\n self.assertEqual(self.player.width, constants.PLAYER_WIDTH)\n self.assertEqual(self.player.height, constants.PLAYER_HEIGHT)\n self.assertEqual(self.player.img, constants.PLAYER_IMG)\n self.assertEqual(self.player.health, constants.PLAYER_HEALTH)",
"def test_invalid_game_setup(self):\n with self.assertRaises(ValueError):\n self._game.add_player(self._creator, 1)\n with self.assertRaises(ValueError):\n self._game.add_player(self._users[1], 0)\n for x in xrange(1, 4):\n self._game.add_player(self._users[x], x)\n with self.assertRaises(ValueError):\n self._game.add_player(self._users[4], 1)",
"def test_uuid_none(self):\n with self.assertRaises(ValueError):\n Game.objects.create(\n title='Lego Batman',\n )",
"def test_422_create_movie(self):\n # this test should fail becuase the record to insert is invalid\n res = self.client().post('/movies', headers={\n 'Authorization': \"Bearer {}\".format(self.executive_producer_token)\n }, json=self.INVALID_NEW_MOVIE)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 422)\n self.assertFalse(data['success'])\n self.assertIn('message', data)",
"def test_create_invalid_submission(self):\n with self.client:\n # invalid submission registration\n sub_response = register_illegal_submission(self, self.token)\n response_data = json.loads(sub_response.data.decode())\n self.assertTrue(response_data['errors']!=None)",
"def test_create_empty_payload(self):\n response = self.client.post('/exercises/', data={})\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_create_invalid(self):\n url = '/api/users/'\n data = {}\n username = str(uuid1())[:8]\n # Response should be status 400 where essential parameters are missing.\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 400)\n data['EmailAddress'] = '{}@dbca.wa.gov.au'.format(username)\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 400)\n data['DisplayName'] = 'Doe, John'\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 400)\n data['SamAccountName'] = username\n response = self.client.post(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 201) # Now valid.",
"def create_player(id_player: str):\n id_player = str(id_player)\n last_name = input(\"Last name of the player : \")\n first_name = input(\"First name of the player : \")\n birthday = input(\"Birthday of the player : \")\n sex = input(\"Sex of the player : \")\n elo = int(input(\"Elo of the player: \"))\n\n if not Player.get(id_player):\n Player(id_player, last_name, first_name, birthday, sex, elo)\n else:\n raise Exception(f\"The ID {id_player} already exists : {Player.get(id_player)}\")",
"def test_get_player(self):\n pass",
"def create_player(self, request):\n if request.player_name:\n if Player.query(Player.name == request.player_name).get():\n raise endpoints.ConflictException(\n 'A User with that name already exists!')\n else:\n raise endpoints.BadRequestException('verify the name that you are sending in the request')\n if request.email:\n if gameutils.get_regex(request.email) == None:\n print(' ERROR - invalid email, please try again')\n raise endpoints.ConflictException(\n 'invalid email, please try again!')\n else:\n raise endpoints.BadRequestException('verify the email that you are sending in the request')\n\n player = Player(name=request.player_name, email=request.email)\n player.put()\n\n return StringMessage(message='Player created!'.format(request.player_name))",
"def test_422_invalid_play_quiz(self): \n data = {'previous_questions': '2', 'quiz_category': {}}\n res = self.client().post('/play', \n data=json.dumps(data),\n content_type='application/json')\n self.assertEqual(res.status_code, 422)\n json_res = json.loads(res.get_data(as_text=False))",
"def test_create_team(self):\n pass",
"def test_422_create_actor(self):\n # failing test due to inserting invalid data --testing 422\n res = self.client().post('/actors', headers={\n 'Authorization': \"Bearer {}\".format(self.casting_director_token)\n }, json=self.INVALID_NEW_ACTOR)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 422)\n self.assertFalse(data['success'])\n self.assertIn('message', data)",
"def test_create_valid_entry(self):\n url = reverse('airlines:aircraft-list')\n response = self.client.post(url, self.valid_payload, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.data['user_defined_id'], 6)",
"def test_not_created_with_invalid(self):\n payload = {'name': ''}\n res = self.client.post(TAGS_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_create_card_missing_variety(self): # pylint: disable=invalid-name\n data = {\n 'first_name': 'Ty',\n 'last_name': 'Cobb',\n }\n resp = self.app.post('cards', json=data)\n\n assert resp.status_code == 200\n\n assert data['first_name'] == resp.json['first_name']\n assert data['last_name'] == resp.json['last_name']\n assert resp.json['variety'] is None",
"def test_create_tag_invalid(self):\n payload = {'name':''}\n res = self.client.post(TAG_URL,payload)\n self.assertEqual(res.status_code,status.HTTP_400_BAD_REQUEST)",
"def test_create_with_bad_backend(self):\n # Count the number of records before the save\n post_data = {\n 'source_type': 'test',\n 'source_id': '4bCOAuhvjsxbVBM5MM8oik',\n }\n resp = self.api_client.post('/api/metadata/tracks/', data=post_data)\n data = json.loads(resp.content)\n\n # Ensure the request filed with a 404, and an error message is returned\n self.assertEqual(resp.status_code, 404)\n self.assertEqual(data['detail'], u'The record could not be found.')",
"def test_create_card_missing_name(self):\n data = {\n 'first_name': 'Ty',\n }\n resp = self.app.post('cards', json=data)\n\n assert resp.status_code == 500",
"def post(self):\n args = player_parser.parse_args()\n print(args)\n unique_player = DBPlayer.query.filter_by(nickname=args['nickname']).first()\n if unique_player:\n return get_response(409, 'player already existed!')\n try:\n new_player = DBPlayer(**args)\n db.session.add(new_player)\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n return get_response(400, \"{e}\".format(e=str(e)))\n return get_response(201, 'done!')",
"def test_new_game(self):\n #create the api \n api_call = '/_ah/spi/GameApi.new_game'\n app = endpoints.api_server([GameApi], restricted=False)\n testapp = webtest.TestApp(app)\n \n #create two players\n first_user, second_user = self._get_two_players() \n \n #the expected request object as a dictionary, to be serialised to JSON by webtest\n request = {\"first_user\":first_user.name, \"second_user\":second_user.name} \n resp = testapp.post_json(api_call, request)\n \n #check correct default values have been created\n self.assertEqual(resp.json['next_move'], first_user.name)\n self.assertEqual(resp.json['game_over'], False)\n self.assertEqual(resp.json['unmatched_pairs'], \"8\")\n self.assertEqual(resp.json['first_user_score'], \"0\")\n self.assertEqual(resp.json['second_user_score'], \"0\")\n self.assertEqual(resp.json['history'], \"[]\")\n \n #test user not found\n request = {\"first_user\":\"\", \"second_user\":\"\"} \n self.assertRaises(Exception, testapp.post_json, api_call, request)\n \n #test calling new game with the same user twice\n request = {\"first_user\":first_user.name, \"second_user\":first_user.name} \n self.assertRaises(Exception, testapp.post_json, api_call, request)",
"def testing_create_game():\n black_user = request.form['black_email']\n white_user = request.form['white_email']\n stones = json.loads(request.form['stones'])\n create_game_internal(black_user, white_user, stones)\n return ''",
"def test_post_actor_422(self):\r\n res = self.client().post('/actors/create', json=partial_actor, headers=executive_producer)\r\n data = json.loads(res.data)\r\n\r\n self.assertEqual(res.status_code, 422)\r\n self.assertFalse(data[\"success\"])\r\n self.assertEqual(data[\"message\"], \"unprocessable entity\")",
"def test_invalid_new_game(self):\n self._game.remove_player_by_user_id(self._users[1].uid)\n with self.assertRaises(StateError):\n self._game.new_game()\n self._game.add_player(self._users[1], 1)\n self._game.new_game()\n with self.assertRaises(StateError):\n self._game.new_game()"
] | [
"0.7495499",
"0.74699783",
"0.738127",
"0.718221",
"0.69524",
"0.67708266",
"0.6658256",
"0.66042596",
"0.6594715",
"0.6582349",
"0.6539508",
"0.6506546",
"0.6504157",
"0.6490392",
"0.6451788",
"0.64410824",
"0.6428347",
"0.64130753",
"0.6399742",
"0.6389613",
"0.63864726",
"0.6382882",
"0.63682055",
"0.63494843",
"0.63309294",
"0.6330691",
"0.63304776",
"0.6299156",
"0.62938094",
"0.6285298"
] | 0.8654237 | 0 |
Return the basic info of the current tree. return | def info(self):
return nx.info(self.tree)
# def children(self):
""" Return the children of the current node.
"""
# return self.left, self.right | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_info(self):\r\n if not self.parent and self.key:\r\n print(\"######### ROOT #########\")\r\n print(\"------------------------\")\r\n print(\"key: %s\" % self.key)\r\n print(\"value: %s\" % self.value)\r\n print(\"color: %s\" % self.get_color())\r\n\r\n try:\r\n print(\"left_child: %s\" % self.left_child.key)\r\n print(\"right_child: %s\" % self.right_child.key)\r\n print(\"parent: %s\" % self.parent.key if self.parent else \"parent: None\")\r\n print(\"size_tree: %s\" % self.size_tree)\r\n except:\r\n pass\r\n print(\"------------------------\")",
"def tree(self):\r\n return self._tree",
"def get_info(self):\n return \"TODO !\"",
"def info(self):",
"def info(self):",
"def return_info(self):\n\t\treturn self.info",
"def getInfo():",
"def getInfo(self):\n return self.info",
"def get_info(self):\n return None",
"def info(self):\n return self._info",
"def get_info(self):\n pass",
"def get_info(self):\n pass",
"def tree(self):\n return self._tree",
"def tree(self):\n return self._tree",
"def tree(self):\n return self._tree",
"def tree(self):\n return self._tree",
"def return_tree(self):\n\n return self.tree, self.ParentMap",
"def show_all_information(self):\n return self.__dict__\n # print(self.first_name)\n # print(self.last_name)\n # print(self.age)\n # print(self.name)\n # print(self.gender)\n # print(self.number_of_children)",
"def print_tree(self):\n return \"\"",
"def info(self):\r\n return self._get('info', {})",
"def info(self) -> dict:",
"def info(self):\n print \"root path = {path}\".format(path=self.path)\n print \"target path = {target}\".format(target=self.target)\n print \"files = {dic}\".format(dic=self.files)",
"def __repr__(self):\n return self.displayTree(0)",
"def get_tree(self):\n return self.tree or None",
"def __manage_tree(self):\n for pre, fill, node in RenderTree(self.tree):\n if node.name is 'count':\n logger.info(\n \"Tree info %s%s: %s %s p/s attack: %s\",\n pre, node.name, node.value, node.pps, node.attack)\n else:\n logger.info(\"Pre - [%s], Fill - [%s], Node - [%s]\",\n pre, fill, node.name)",
"def getInfo(self):\n return self._info",
"def print_info(self):\n\n print \"parent:\\t {0}\".format(self.parent)\n print \"value:\\t {0}\".format(self.value)\n \n #children\n print \"posXposYposZ: \\t {0}\".format(self.posXposYposZ)\n print \"posXposYnegz: \\t {0}\".format(self.posXposYnegZ)\n print \"posXnegYposZ: \\t {0}\".format(self.posXnegYposZ)\n print \"posXnegYnegZ: \\t {0}\".format(self.posXnegYnegZ)\n print \"negXposYposZ: \\t {0}\".format(self.negXposYposZ)\n print \"negXposYnegZ: \\t {0}\".format(self.negXposYnegZ)\n print \"negXnegYposZ: \\t {0}\".format(self.negXnegYposZ)\n print \"negXnegYnegZ: \\t {0}\".format(self.negXnegYnegZ) \n\n #position in space\n print \"Xupperlimit: \\t {0}\".format(self.Xupperlimit)\n print \"Yupperlimit: \\t {0}\".format(self.Yupperlimit)\n print \"Zupperlimit: \\t {0}\".format(self.Zupperlimit)\n \n print \"Xlowerlimit: \\t {0}\".format(self.Xlowerlimit)\n print \"Ylowerlimit: \\t {0}\".format(self.Ylowerlimit)\n print \"Zlowerlimit: \\t {0}\".format(self.Zlowerlimit)\n\n print \"Xcenter: \\t {0}\".format(self.Xcenter)\n print \"Ycenter: \\t {0}\".format(self.Ycenter)\n print \"Zcenter: \\t {0}\".format(self.Zcenter)",
"def info(self):\n return self._info",
"def detail(self):\n info = self.info()\n return info",
"def getStatusTree(self):\n return self.statusTree"
] | [
"0.71606535",
"0.6920607",
"0.6735312",
"0.6625062",
"0.6625062",
"0.6621492",
"0.6614933",
"0.6579641",
"0.65775186",
"0.6573978",
"0.65506",
"0.65506",
"0.64760476",
"0.64760476",
"0.64760476",
"0.64760476",
"0.6467341",
"0.64480984",
"0.64376783",
"0.6396005",
"0.63847786",
"0.63826156",
"0.63779837",
"0.6376205",
"0.6347239",
"0.6309549",
"0.6305518",
"0.6275202",
"0.62735415",
"0.62607414"
] | 0.7387961 | 0 |
Return a list of operators based on deepfirst search algorithm. the order is from left to right parameter | def dfs_operators(self, node=None):
if not node:
node = self.get_nodes_from_position('root')[0]
bfs_all_nodes = list(nx.dfs_edges(self.tree, node))
operators = [i for i, j in bfs_all_nodes]
# operators = [i for i, j in bfs_all_nodes if self.tree.out_degree(j) == 0]
# new a list to reduce the duplicate operators.
operators_output = []
for i in operators:
if i not in operators_output:
operators_output.append(i)
return operators_output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _search_brother_ops(self, graph, op_node):\n visited = [op_node.idx()]\n stack = []\n brothers = []\n for op in graph.next_ops(op_node):\n if (op.type() != 'conv2d') and (op.type() != 'fc') and (\n not op._is_bwd_op()):\n stack.append(op)\n visited.append(op.idx())\n while len(stack) > 0:\n top_op = stack.pop()\n for parent in graph.pre_ops(top_op):\n if parent.idx() not in visited and (not parent._is_bwd_op()):\n if ((parent.type == 'conv2d') or (parent.type == 'fc')):\n brothers.append(parent)\n else:\n stack.append(parent)\n visited.append(parent.idx())\n\n for child in graph.next_ops(top_op):\n if (child.type != 'conv2d') and (child.type != 'fc') and (\n child.idx() not in visited) and (\n not child._is_bwd_op()):\n stack.append(child)\n visited.append(child.idx())\n return brothers",
"def find_op(sv, piece, op_group, alphabetic):\r\n op_positions=[]\r\n for o in op_group:\r\n op=Space+o+Space if o in alphabetic else o # separate alphabetic with spaces\r\n here=-1\r\n while op in piece[here+1:]: # while (here:=piece.find(op, here+1))>-1: Python 3.8\r\n here=piece.find(op, here+1)\r\n op_positions.append((here, o, op))\r\n if len(op_positions)>1: op_positions.sort(key=lambda x: -x[0]) # sort list in descending order of position\r\n \r\n op_list=whole_operators(piece, op_positions) # do not split <= >= or != \r\n return op_list",
"def operartors(self) -> List[Operator]:\n return list(self.__ops.keys())",
"def opsplit(expstr):\n\n #ops are the one char operators (sorted on precidence)\n ops = expr.getOps()\n #Remove outer parentesis if we have them\n if expstr[0] == '(' and expstr[-1] == ')' and balanced(expstr[1:-1]):\n expstr = expstr[1:-1]\n #Add a '0' to the beginning of the string if we start with an operator\n if expstr[0] in ops:\n expstr = '0'+expstr\n for op in ops:\n pc = 0\n cc = len(expstr)-1\n revexpstr = list(expstr)\n revexpstr.reverse()\n #Search for the operator backwards (to preserve operator presidence)\n for c in revexpstr:\n if c == '(':\n pc += 1\n elif c == ')':\n pc -= 1\n if c == op and pc == 0:\n #Build the tree recursively\n return [op,opsplit(expstr[:cc]),opsplit(expstr[cc+1:])]\n cc -=1\n #if we find something that looks like a function, parse it separately \n if funcpattern(expstr):\n fnamestr = funcname(expstr)\n fargs = funcargs(expstr)\n farglist = [opsplit(arg) for arg in fargs]\n return [fnamestr]+farglist\n return expstr",
"def extract_operators(e, independent=False):\n ops = []\n\n if isinstance(e, Operator):\n ops.append(e)\n\n elif isinstance(e, Add):\n for arg in e.args:\n ops += extract_operators(arg, independent=independent)\n\n elif isinstance(e, Mul):\n for arg in e.args:\n ops += extract_operators(arg, independent=independent)\n else:\n if debug:\n print(\"Unrecongized type: %s: %s\" % (type(e), str(e)))\n\n return list(set(ops))",
"def list_operators():\n for operator_symbol in operations:\n print(operator_symbol)",
"def get_ops_list(model_data):\n model = schema_fb.Model.GetRootAsModel(model_data, 0)\n op_set = set()\n\n for subgraph_idx in range(model.SubgraphsLength()):\n subgraph = model.Subgraphs(subgraph_idx)\n for op_idx in range(subgraph.OperatorsLength()):\n op = subgraph.Operators(op_idx)\n opcode = model.OperatorCodes(op.OpcodeIndex())\n builtin_code = schema_util.get_builtin_code_from_operator_code(opcode)\n if builtin_code == schema_fb.BuiltinOperator.CUSTOM:\n opname = opcode.CustomCode().decode(\"utf-8\")\n op_set.add(opname)\n else:\n op_set.add(visualize.BuiltinCodeToName(builtin_code))\n return op_set",
"def extract_all_operators(e_orig):\n if debug:\n print(\"extract_all_operators: \", e_orig)\n\n if isinstance(e_orig, Operator):\n return [e_orig]\n\n e = drop_c_number_terms(normal_ordered_form(e_orig.expand(),\n independent=True))\n\n if isinstance(e, Pow) and isinstance(e.base, Operator):\n return [e]\n\n ops = []\n\n if isinstance(e, Add):\n for arg in e.args:\n ops += extract_all_operators(arg)\n\n if isinstance(e, Mul):\n op_f = [f for f in e.args if (isinstance(f, Operator) or\n (isinstance(f, Pow) and\n isinstance(f.base, Operator)))]\n ops.append(Mul(*op_f))\n ops += op_f\n\n unique_ops = list(set(ops))\n\n sorted_unique_ops = sorted(unique_ops, key=operator_order)\n\n return sorted_unique_ops",
"def sort_operators(current_operator, operator_stack, output_list): # HELPER\n #print(\"SORT \", current_operator)\n # order of operations represented as a list\n order = ['*','<<','**','/','>>','+','-']\n i = order.index(current_operator)\n\n for elem in range(operator_stack.size()):\n if operator_stack.peek() != '(':\n # numeral representation of precedence of TOP of stack\n j = order.index(operator_stack.peek())\n # if top of stack is of 'higher' or equal precendence, add to output list\n if j >= i:\n output_list.append(operator_stack.pop())\n # Finally, push current operator to stack\n operator_stack.push(current_operator)",
"def operators(self):\n return self.domain.operators.keys()",
"def _forward_search_related_op(self, graph, param):\n assert isinstance(param, VarWrapper)\n visited = {}\n for op in graph.ops():\n visited[op.idx()] = False\n stack = []\n for op in graph.ops():\n if (not op.is_bwd_op()) and (param in op.all_inputs()):\n stack.append(op)\n visit_path = []\n while len(stack) > 0:\n top_op = stack[len(stack) - 1]\n if visited[top_op.idx()] == False:\n visit_path.append(top_op)\n visited[top_op.idx()] = True\n next_ops = None\n if top_op.type() == \"conv2d\" and param not in top_op.all_inputs():\n next_ops = None\n elif top_op.type() == \"mul\":\n next_ops = None\n else:\n next_ops = self._get_next_unvisited_op(graph, visited, top_op)\n if next_ops == None:\n stack.pop()\n else:\n stack += next_ops\n return visit_path",
"def _dfs(op, visited=None):\n visited = visited or set()\n ret = []\n for child in op.inputs:\n if child.op in visited:\n return ret\n visited.add(child.op)\n if child.op.type not in op_regularizer_manager.NON_PASS_THROUGH_OPS:\n ret.extend(_dfs(child.op, visited))\n if child.op.type in ('Conv2D',): # TODO: support depthwise conv.\n ret.append(child.op)\n return ret",
"def operators(self):\n return self._operators",
"def find_ops(optype):\n gd = tf.get_default_graph()\n return [var for var in gd.get_operations() if var.type == optype]",
"def whole_operators(piece, op_positions):\r\n op_list=[]\r\n if op_positions: # remove overlapping elements (e.g. \"<=\", \"!=\" vs. \"=\", \"<\")\r\n last=len(piece) # initialization\r\n lastop=\"\" # previous op\r\n for here, o, op in op_positions: # scan list\r\n if last-here>1: # no overlap: store and continue\r\n op_list.append((o,op)) \r\n lastop, last = o, here\r\n elif o.endswith(lastop): # consecutive overlapping ops (2 chars max) \r\n if op_list: op_list.pop() # remove shorter op\r\n op_list.append((o,op)) # keep operator, without and with space \r\n lastop, last = o, here # store larger op as previous\r\n return op_list",
"def eq(self, ops):\n left, right, deriv = self.eqs_and_deriv(ops)\n eqs = []\n if self.side in [Side.LEFT, Side.BOTH]:\n eqs.append(Eq.parse_eq(left, ops))\n # Add the right-side equation with its derivation \n if self.side in [Side.RIGHT, Side.BOTH]:\n eq = Eq.parse_eq(right, ops)\n eq.derived_def = deriv\n eq.derived = self.derived\n eqs.append(eq)\n return eqs",
"def search(self, search):\n # walk through the query tree, returning each child before its parents,\n # building up a list of \"primitive\" queries to be performed, and a\n # sequence of operations to be done on those.\n def walk(query):\n \"\"\"Walk a query tree.\n\n \"\"\"\n stack = [[query, 0]]\n while len(stack) != 0:\n query, index = stack[-1]\n if isinstance(query, queries.QueryCombination):\n if index < len(query.subqs):\n stack[-1][1] = index + 1\n stack.append([query.subqs[index], None])\n continue\n yield len(stack) - 1, query\n del stack[-1]\n\n pieces = {} # pieces of the query being built up, keyed by depth\n cmds = []\n for depth, query in walk(search.query):\n if query.op == queries.Query.TERMS:\n keys = [(self.dbprefix + \"docs:\" + term, sha(term))\n for term in query.terms]\n subpieces = pieces.setdefault(depth, [])\n if len(keys) == 0:\n raise UnimplementedError(\"empty queries not yet implemented\")\n elif len(keys) == 1:\n termkey, cachekey = keys[0]\n cmds.append(('score', cachekey, [termkey]))\n subpieces.append(cachekey)\n else:\n cachekeys = []\n for termkey, cachekey in keys:\n cmds.append(('score', cachekey, [termkey]))\n cachekeys.append(cachekey)\n cachekey = sha(''.join(cachekeys))\n cmds.append((query.default_op, cachekey, cachekeys))\n subpieces.append(cachekey)\n elif query.op in (queries.Query.OR, queries.Query.AND,\n queries.Query.NOT):\n subpieces = pieces.setdefault(depth, [])\n cachekeys = pieces[depth + 1]\n cachekey = sha(str(query.op) + ':' + ''.join(cachekeys))\n cmds.append((query.op, cachekey, cachekeys))\n subpieces.append(cachekey)\n del pieces[depth + 1]\n else:\n raise UnimplementedError(\"Query operator %r not yet \"\n \"implemented\" % query.opname(query.op))\n\n def cachekey(key):\n return self.dbprefix + \"cache:\" + key\n\n cleanup_keys = []\n pipe = self.client.pipeline(transaction=True)\n destkey = None\n resnum = 0\n for cmd, dest, inputs in cmds:\n destkey = cachekey(dest)\n if cmd == 'score':\n # FIXME - actually, we want to calculate scores for the terms,\n # here.\n pipe.sinterstore(destkey, inputs)\n resnum += 1\n elif cmd == queries.Query.OR:\n pipe.sunionstore(destkey, [cachekey(input) for input in inputs])\n resnum += 1\n elif cmd == queries.Query.AND:\n pipe.sinterstore(destkey, [cachekey(input) for input in inputs])\n resnum += 1\n elif cmd == queries.Query.NOT:\n pipe.sdiffstore(destkey, [cachekey(input) for input in inputs])\n resnum += 1\n else:\n raise UnimplementedError(\"Unknown command: %r\" % cmd)\n cleanup_keys.append(destkey)\n if destkey is None:\n return ()\n pipe.smembers(destkey)\n for key in cleanup_keys:\n pipe.delete(key)\n result = pipe.execute()\n return result[resnum]",
"def orderOp(self, stream):\n ops = []\n if stream.op(OP_ORDER):\n for op in stream.ops['ops']:\n if op.startswith(OP_ORDER):\n ops.append( (self.leftStream(stream, op), op) )\n return ops",
"def depth_node_ordering(start_node, end_nodes):\n ordered_list = []\n ordered_set = set()\n working_list = [start_node]\n while working_list != []:\n node = working_list.pop(0)\n if not node in ordered_set:\n ordered_set.add(node)\n ordered_list.append(node)\n if not is_leaf_node(node) and not node in end_nodes:\n for node_op in node.get_inputs():\n working_list.append(node_op)\n return ordered_list",
"def _display_operators(operators):\n def sort_name(o): return o.name\n def filter_op(o): return any([o.email, o.address, o.website, o.twitter])\n\n return sorted(filter(filter_op, operators), key=sort_name)",
"def pre_order_traversal(self):\n\n elements = []\n\n ##visit base node\n elements.append(self.data)\n\n ##visit left tree\n if self.left:\n elements += self.left.pre_order_traversal()\n\n #visit right tree\n if self.right:\n elements += self.right.pre_order_traversal()\n\n return elements",
"def repair_operators(self) -> List[Tuple[str, _OperatorType]]:\n return list(self._r_ops.items())",
"def _tokensToSearchCondition(tokens, position=0):\n\n\tresult = []\n\n\t# states\n\tCONDITION_START = 0\n\tFIELD_NAME = 1\n\tCOMPARISON = 2\n\tVALUE = 3\n\n\tstate = CONDITION_START\n\n\tCOMPARISONS = {'==': op.EQ, '>': op.GT, '<': op.LT,\n\t\t'~': op.REGEXP, '<=': op.LTE, '>=': op.GTE}\n\tINVERSED_COMPARISONS = {'!=': op.EQ, '!~': op.REGEXP}\n\tOPERATORS = {'and': op.AND, 'or': op.OR, 'not': op.NOT}\n\n\ti = position\n\twhile i < len(tokens):\n\t\tif tokens[i].type == _Token.OPENING_PARENTHESIS:\n\t\t\tnew_position, subcondition = _tokensToSearchCondition(tokens, i + 1)\n\t\t\tresult.append(subcondition)\n\t\t\ti = new_position\n\t\telif tokens[i].type == _Token.CLOSING_PARENTHESIS:\n\t\t\treturn i + 1, result\n\t\telif state == CONDITION_START:\n\t\t# intermediate state; there can be either operator or field name here\n\t\t# if token looks like operator and the next token is not a comparison -\n\t\t# we consider this token to be an operator, otherwise it is a field name\n\t\t\tif tokens[i].type == _Token.SIMPLE and tokens[i].value.lower() in OPERATORS and \\\n\t\t\t\ti < len(tokens) - 1 and tokens[i + 1].value not in COMPARISONS:\n\t\t\t\t\tresult.append(OPERATORS[tokens[i].value.lower()])\n\t\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\tstate = FIELD_NAME\n\t\telif state == FIELD_NAME:\n\t\t# field name should be valid and token type should be simple\n\t\t# (no quoted value names)\n\t\t\tname_list = _getFieldNameList(tokens[i].value)\n\t\t\tif tokens[i].type != _Token.SIMPLE or name_list is None:\n\t\t\t\traise ParserError(\"Wrong field name\", tokens[i].start, tokens[i].end)\n\t\t\tresult.append(name_list)\n\t\t\ti += 1\n\t\t\tstate = COMPARISON\n\t\telif state == COMPARISON:\n\t\t\t# no quoted comparisons\n\t\t\tif tokens[i].type != _Token.SIMPLE or (tokens[i].value not in COMPARISONS and \\\n\t\t\t\t\ttokens[i].value not in INVERSED_COMPARISONS):\n\t\t\t\traise ParserError(\"Wrong comparison operator\", tokens[i].start, tokens[i].end)\n\n\t\t\tif tokens[i].value in COMPARISONS:\n\t\t\t\tresult.append(COMPARISONS[tokens[i].value])\n\t\t\telse:\n\t\t\t# when processing inversed operator, we replace it by non-inverted one\n\t\t\t# and invert op.NOT before the condition\n\t\t\t\tif len(result) > 1 and result[-2] == op.NOT:\n\t\t\t\t\tdel result[-2]\n\t\t\t\telse:\n\t\t\t\t\tresult.insert(-2, op.NOT)\n\n\t\t\t\tresult.append(INVERSED_COMPARISONS[tokens[i].value])\n\t\t\ti += 1\n\t\t\tstate = VALUE\n\t\telif state == VALUE:\n\t\t\tvalue = tokens[i].value\n\n\t\t\t# if the token is simple, value type should be deduced\n\t\t\t# if not, it is definitely a string\n\t\t\tif tokens[i].type == _Token.SIMPLE:\n\t\t\t\ttry:\n\t\t\t\t\tvalue = _deduceValueType(value)\n\t\t\t\texcept ValueError:\n\t\t\t\t\traise ParserError(\"Unknown value format\",\n\t\t\t\t\t\ttokens[i].start, tokens[i].end)\n\n\t\t\tresult.append(value)\n\t\t\ti += 1\n\t\t\tstate = CONDITION_START\n\n\treturn result, i",
"def operator_at_traversal_path(path, op):\n fmt_strs = [path[0]] + ['%s' for leaf in path[1:]]\n traversal = '->'.join(fmt_strs[:-1]) + '{op}%s'.format(op=op)\n return traversal",
"def findNextOpr(txt):\r\n if not isinstance(txt,str) or len(txt)<=0:\r\n return \"error: findNextOpr\"\r\n\r\n # --- YOU CODE STARTS HERE\r\n else: \r\n #Create List of Operators to compare\r\n opList = [\"+\", \"-\", \"/\",\"*\", \"^\"]\r\n #Iterate through the string, and compare with the list to see if each character is an operator\r\n for x, y in enumerate(txt): \r\n for z in opList:\r\n if y == z: \r\n return x\r\n return -1",
"def get_jump_operators(self, list_empty=False):\n aw = []\n for k, jumps in enumerate(self.get_jumps()):\n if jumps is None:\n continue\n\n if k == 0:\n for t in jumps:\n aw.append((t['w'], 1, t['d'] * ketbra(self.s, *t['I'])))\n else:\n for jump in jumps.reshape((int(len(jumps) / (k + 1)), k + 1)):\n op = sum(t['d'] * ketbra(self.s, *t['I']) for t in jump)\n aw.append((jump[0]['w'], k+1, op))\n\n\n return [a for a in aw if list_empty or not np.allclose(a[2], 0)]",
"def _op_search(self, op, reg_list, param_list=None): # pylint: disable-msg=invalid-name\n\n arity = 0\n if param_list:\n arity = len(param_list)\n\n gate_definition = self._unrollable(self._op_sig(op, arity))\n\n if gate_definition:\n self._unroll(gate_definition, reg_list, param_list)",
"def find_node_by_op_type(self, op_type: str) -> List[Operator]:\n return list(self.__op_type_list[op_type])",
"def in_order_traversal(self):\n elements = []\n\n #visit left tree\n if self.left:\n elements += self.left.in_order_traversal()\n\n #visit base node\n elements.append(self.data)\n\n #visit right tree\n if self.right:\n elements += self.right.in_order_traversal()\n\n return elements",
"def lookup_ops(self):\n return self._lookup_ops"
] | [
"0.6276911",
"0.5942017",
"0.58628064",
"0.57594097",
"0.56592035",
"0.56488216",
"0.5646335",
"0.5536815",
"0.55339974",
"0.5520272",
"0.5457814",
"0.54213226",
"0.5388805",
"0.53401655",
"0.5337534",
"0.53144145",
"0.52870244",
"0.52579343",
"0.52437997",
"0.52392375",
"0.5236051",
"0.52258223",
"0.5146287",
"0.51402414",
"0.5132968",
"0.51311314",
"0.5125132",
"0.5111673",
"0.51043725",
"0.51025134"
] | 0.63426775 | 0 |
Return a list of nodes of the position. parameter | def get_nodes_from_position(self, position=None):
return [nodes for nodes, positions in self.tree.nodes(data=True) if positions["position"] == position] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nodes(self): \n return [n for n in self.iternodes()]",
"def get_nodes(self):\n pass",
"def get_node_list(self):\n return []",
"def getNodes(self, pos=None):\n\t\tif pos==None: pos=self.pos\n\t\tW=self.plantAreaW\n\t\tL=self.plantAreaL\n\t\tcart=self.m.getCartesian\n\t\tpC=self.m.getCylindrical(pos)\n\t\tl=self.plantHeads[0].length\n\t\tdirection=self.m.direction-pi/2.+pC[1]\n\t\ta=cart([W/2., l/2.],origin=pos, direction=direction, local=False, fromLocalCart=True)\n\t\tb=cart([-W/2., l/2.],origin=pos, direction=direction, local=False, fromLocalCart=True)\n\t\tc=cart([-W/2., -(L-l/2.)],origin=pos, direction=direction, local=False, fromLocalCart=True)\n\t\td=cart([W/2., -(L-l/2.)],origin=pos, direction=direction, local=False, fromLocalCart=True)\n\t\treturn [a,b,c,d]",
"def get_nodes(self):\n\n nodes = []\n\n if not self.node:\n return nodes\n \n nodes.extend(self.node.left.get_nodes())\n nodes.append(self.node.vp)\n nodes.extend(self.node.right.get_nodes())\n\n return nodes",
"def get_node_list(self):\n return self.node_list",
"def get_node_list(self):\n return [[node] for node in self.graph.nodes]",
"def get_nodes(self, indexes=None):\r\n nodes = []\r\n if indexes is None:\r\n nodes = [(0,self.loc.coord[0]), (1,self.loc.coord[1])]\r\n else:\r\n if not isinstance(indexes, list):\r\n indexes = [indexes] # Make a list of one\r\n for index in indexes:\r\n nodes.append((index,self.loc.coord[index]))\r\n return nodes",
"def nodes(self):\n return self.__nodes",
"def nodes (self):\n return self.__nodes",
"def nodes(self):\r\n return (node.content for node in self.traverse())",
"def get_nodes(self):\n self.map_graph_id()\n self.nodes_list = [\n self.NX_GRAPHS[self.graph_id].nodes[idx]['label'] \n for idx in range(len(self.NX_GRAPHS[self.graph_id].nodes))]",
"def get_nodes(self) -> List[Node]:\n\t\treturn sorted(self.nodes, key=lambda x: x.name.lower())",
"def get_nodes(self):\n return list(map(lambda x: x[0], self.__nodes))",
"def nodes(self):\n return self._nodes",
"def nodes(self):\n return self._nodes",
"def nodes(self):\n return self._nodes",
"def get_ordered_nodes(self):\n nodes = []\n self.build_nodes_list(self.root, nodes)\n return nodes",
"def getListOfNodes(self):\n return _libsbml.ASTNode_getListOfNodes(self)",
"def list_nodes(self, type_):\n raise NotImplementedError()",
"def nodes(topology):\n return topology.nodes()",
"def _get_nodes(self, selector):\r\n arr = []\r\n def traverse(cont):\r\n children = cont.get_children()\r\n for n in xrange(len(children)):\r\n child = children[n]\r\n if child.node_type == selector:\r\n arr.append(child)\r\n elif child.node_type != 'Shape':\r\n traverse(child)\r\n traverse(self)\r\n return arr",
"def get_nodes(self):\n\n return list(self.graph.nodes)",
"def compute_node_positions(self):\n pass",
"def nodes(self):\n return list(self._nodes_dict.values())",
"def nodes(self):\n return list(self.node_dict.keys())",
"def nodes(self):\n return self._node_reg",
"def nodes(self):\n return self.graph.nodes",
"def get_nodes(self):\n return [node for node in self._nodes.itervalues()]",
"def get_nodes(self):\n return [node for node in self._nodes.itervalues()]"
] | [
"0.77235353",
"0.73504615",
"0.73303485",
"0.7233757",
"0.71931404",
"0.713033",
"0.7059593",
"0.70508826",
"0.7001874",
"0.6970905",
"0.69392806",
"0.6934877",
"0.69146484",
"0.6897836",
"0.68802404",
"0.68802404",
"0.68802404",
"0.6878907",
"0.6866242",
"0.6832237",
"0.6695788",
"0.6686299",
"0.6682454",
"0.6663546",
"0.66400146",
"0.6627495",
"0.6621626",
"0.6612324",
"0.6604438",
"0.6604438"
] | 0.7450987 | 1 |
Return a float value of inclusion probability. pi_i = n / N, where n = |sequence| = |s| N = |population| = |M| | def inclusion_probability(M, s):
# initialize a dictionary to store events with inclusion probabilites
s_with_inclusion_probabilites = {}
# calculating the events of intersection and difference.
V = [i for i in M.tree.nodes() if not (i.__contains__('parallel') or i.__contains__('series'))]
s_in_M = list(set(s).intersection(V))
# the size of two samples.
n = len(s_in_M)
N = len(V)
pv = float(n) / N
for i in s:
if i in s_in_M:
s_with_inclusion_probabilites[i] = pv
else:
# in fact that, the probabilities should be 0,
# however, for convenience, we define the probability as 1 - pv
s_with_inclusion_probabilites[i] = 1 - pv
return s_with_inclusion_probabilites | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def probability_of_generating_containing_events(M, s):\n\n # initialize the probabilities of generating containing events.\n f = 1\n\n s_with_inclusion_probabilities = inclusion_probability(M, s)\n for v, p in s_with_inclusion_probabilities.items():\n f *= p\n\n return f",
"def prob1(n):\n#raise NotImplementedError(\"Problem 1 Incomplete\")\n if n == 0 :\n raise ValueError(\"Sampling 0 points is not defined.\")\n total = 0\n for i in xrange(n) :\n if np.random.normal() > 3 :\n total += 1\n return float(total)/n",
"def estimate_pi(n):\n points_out = 0\n points_in = 0\n for i in range(n):\n x = random.uniform(0,1)\n y = random.uniform(0,1)\n if math.sqrt(x**2 + y**2) <= 1:\n points_in += 1\n else:\n points_out += 1\n est_pi = (points_in / (points_out + points_in)) * 4\n return est_pi",
"def _compute_register_probs(cls, num_values, probability):\n bits = np.arange(1, num_values + 1)\n probs = scipy.stats.geom.pmf(bits, probability)\n\n return probs / sum(probs)",
"def _basic_probability(count: int, sequence_total_count: int) -> float:\n return float(count) / sequence_total_count",
"def calculate_probability(self):\n return 0",
"def infection_probability(mu, sigma, n=1):\n prob = np.abs(np.random.normal(mu, sigma, n))\n \n if (prob + prob) <0 or (prob + prob) >1: \n prob = np.array([mu])\n prob = prob.tolist()[0]\n return prob",
"def probability_meet_infected_person(self, virus: Virus, n_infected: int, event_population: int) -> _VectorisedFloat:\n return sct.binom.pmf(n_infected, event_population, self.probability_random_individual(virus))",
"def estimate_pi(sims):\n \n # counter to hold points lying inside the circle\n in_circle = 0\n \n for s in range(0,sims):\n \n x = np.random.rand()\n y = np.random.rand()\n \n if (x**2 + y**2) <= 1:\n in_circle += 1\n \n # The ratio of pts. inside the circle and the total pts. will be same as the ratio\n # of the area of circle to the area of the square, inside which the circle is inscribed\n # Area of circle = PI * R * R\n # Area of square = (2R) * (2R)\n \n pi_estimated = 4.0 * in_circle / sims\n \n print(\"Simulations ran: \", sims)\n print(\"Estimated pi\", pi_estimated)\n print(\"Error\", PI - pi_estimated)",
"def prodi(items: Iterable[float]) -> float:\n p: float = 1\n for n in items:\n p *= n\n return p",
"def estimate_pi(n_samples):\n return ##",
"def simPP(intensity,bound):\r\n\r\n N=np.random.poisson(bound)\r\n homPP=np.random.uniform(size=N)\r\n PP=np.array([s for s in homPP if bound*np.random.ranf()<=intensity(s)])\r\n\r\n return PP",
"def recalculate_pi(self, i, corpus):\n return sum(self.gamma(i,0,O) for O in corpus) / len(corpus)",
"def calculate_probability(disease, symptoms):\n nominator = disease.probability\n denominator = 0.0\n right = 1.0 - disease.probability\n for i in range(len(symptoms)):\n if symptoms[i] == SYMPTOM_PRESENT:\n nominator *= disease.present_probs[i]\n right *= disease.not_present_probs[i]\n elif symptoms[i] == SYMPTOM_NOT_PRESENT:\n nominator *= (1.0 - disease.present_probs[i])\n right *= (1.0 - disease.not_present_probs[i])\n denominator = right + nominator\n return round(nominator / denominator, 4)",
"def prob4():\n\n\n N = 500000\n random_draws = np.random.multivariate_normal(mean = [-1,1], cov =[[1,0],[0,1]], size = N)\n\n h = lambda x: x[0] < -1 and x[1] > 1\n f = lambda x: stats.multivariate_normal(mean = [ 0, 0]).pdf(x)\n g = lambda x: stats.multivariate_normal(mean = [-1, 1]).pdf(x)\n\n probability = [h(random_draws[i]) * f(random_draws[i]) / g(random_draws[i]) for i in range(N)]\n\n return 1./N * np.sum(probability)",
"def _pi(self):\n if self.done() or self.N == 0: return # leaf or non-agent played without simulating\n if self.env.turn > 10: self.t = (self.N + self.env.turn) / (self.N)\n for child in self.children: \n # at least one child was simulated\n if child.N: self.pi[child.last()] = child.N**self.t / (self.N+1)\n else: self.pi[child.last()] = 1/len(self.children)\n self.pi /= sum(self.pi)",
"def probability(self, sequence):\n return 2 ** (self.log_probability(self._transform(sequence)))",
"def p() -> float:\n return 0.9",
"def probability(p):\n return p > random.uniform(0.0, 1.0)",
"def probability_random_individual(self, virus: Virus) -> _VectorisedFloat:\n return self.geographic_cases*virus.infectiousness_days*self.ascertainment_bias/self.geographic_population",
"def PI(x, gp, ndim,fMax, epsilon=0.1):\n\t#epsilon = 0.1\n\tx1=np.array(x).reshape(-1,ndim)\n\tmuNew, stdNew = gp.predict(x1, return_std=True)\n\t#fMax=max(Y_init)\n \n\tZ = (muNew - fMax - epsilon)/stdNew\n\n\treturn -scipy.stats.norm.cdf(Z)",
"def collision_prob_cosine(sim: float) -> float:\n return 1.0 - np.arccos(sim) / np.pi",
"def stationary_distribution(self):\n P = self.markov_transition()\n N = len(P)\n I = np.identity(N)\n A = P.T - I # get right-kernel\n pi = null_space(A)\n pi = pi / sum(pi)\n pi = [float(item) for item in pi]\n return pi",
"def nu_poisson(self) -> float:\n return self._nu_poisson",
"def estimate_pi():\n total = 0\n k = 0\n factor = 2 * sqrt(2) / 9801\n while True:\n num = factorial(4 * k) * (1103 + 26390 * k)\n den = factorial(k) ** 4 * 396 ** (4 * k)\n term = factor * num / den\n total += term\n\n if abs(term) < 1e-15:\n break\n k += 1\n\n return 1 / total",
"def p(self) -> Probability:\n ...",
"def calculate_probability(k: int, m: int, n: int) -> float:\n population = [\"AA\" for _ in range(k)] + [\"Aa\" for _ in range(m)] + [\"aa\" for _ in range(n)]\n pairings = it.combinations(population, 2)\n probabilities = [PROBABILITIES[pairing] for pairing in pairings]\n output = sum(probabilities) / len(probabilities)\n\n return output",
"def prob1(n):\n\n # create a giant draw from a normal distribution\n random_draws = np.random.normal(loc= 0, scale = 1, size = n)\n\n # mask the values\n mask = random_draws > 3\n\n return np.sum(mask)/float(n)",
"def prob4():\n#raise NotImplementedError(\"Problem 4 Incomplete\")\n h = lambda x : x[0] < -1 and x[1] > 1\n f = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([0,0]),cov=np.eye(2))\n g = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([-1,1]),cov=np.eye(2))\n X = np.random.multivariate_normal(mean=np.array([-1,1]),cov=np.eye(2),size=10000)\n return 1./10000*np.sum(np.apply_along_axis(h,1,X)*np.apply_along_axis(f,1,X)/np.apply_along_axis(g,1,X))",
"def prob(x: np.ndarray, mu, sigma):\n n = mu.shape[0]\n inv = np.linalg.inv(sigma)\n den = np.sqrt(np.linalg.det(sigma)) * np.power(2 * np.pi, n / 2)\n dif = (x - mu).reshape(1, -1)\n num = -0.5 * dif @ inv @ dif.T\n num = np.exp(num)[0][0]\n return num / den"
] | [
"0.6631667",
"0.64187765",
"0.63519084",
"0.6297203",
"0.62468183",
"0.61480355",
"0.61434984",
"0.609498",
"0.6062884",
"0.6046621",
"0.6035067",
"0.60236925",
"0.60080016",
"0.59939605",
"0.598396",
"0.59779894",
"0.5967327",
"0.5939666",
"0.59200907",
"0.5919952",
"0.591934",
"0.59175533",
"0.5912961",
"0.5821736",
"0.57943904",
"0.5783219",
"0.5768783",
"0.57403094",
"0.57397276",
"0.57332575"
] | 0.6789651 | 0 |
Return a float value of inclusion probability. f(pv, s) = product_{v in s}(pv) product_{v not in s}(1 pv) | def probability_of_generating_containing_events(M, s):
# initialize the probabilities of generating containing events.
f = 1
s_with_inclusion_probabilities = inclusion_probability(M, s)
for v, p in s_with_inclusion_probabilities.items():
f *= p
return f | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def inclusion_probability(M, s):\n\n # initialize a dictionary to store events with inclusion probabilites\n s_with_inclusion_probabilites = {}\n\n # calculating the events of intersection and difference.\n V = [i for i in M.tree.nodes() if not (i.__contains__('parallel') or i.__contains__('series'))]\n s_in_M = list(set(s).intersection(V))\n\n # the size of two samples.\n n = len(s_in_M)\n N = len(V)\n\n pv = float(n) / N\n\n for i in s:\n if i in s_in_M:\n s_with_inclusion_probabilites[i] = pv\n else:\n # in fact that, the probabilities should be 0,\n # however, for convenience, we define the probability as 1 - pv\n s_with_inclusion_probabilites[i] = 1 - pv\n\n return s_with_inclusion_probabilites",
"def f1(predictions, gold):\n if len(gold) == 0:\n return 1. if len(predictions) == 0 else 0.\n if len(predictions) == 0:\n return 0.\n predictions_set = set(predictions)\n gold_set = set(gold)\n nom = 2 * len(predictions_set.intersection(gold_set))\n denom = len(predictions_set) + len(gold_set)\n return float(nom)/float(denom)",
"def probability(s, a, b):\r\n return s.cdf(b) - s.cdf(a)",
"def qfFunction(f, x, N):\r\n return ssstats.binom.ppf(x, N, f)",
"def prodi(items: Iterable[float]) -> float:\n p: float = 1\n for n in items:\n p *= n\n return p",
"def sample_prob(probs):\n return tf.to_float(tf.random_uniform(tf.shape(probs)) <= probs)",
"def sample_prob(probs):\n return tf.to_float(tf.random_uniform(tf.shape(probs)) <= probs)",
"def asin(x):\n return 0.0",
"def probit_phi(x):\n mu = 0;sd = 1;\n return 0.5 * (1 + tsr.erf((x - mu) / (sd * tsr.sqrt(2))))",
"def fs_probability(self, fs):\n\n\t\tprobs = []\n\t\tfor c in fs: probs.append(self.get_probability(c))\n\t\tprobs /= np.sum(probs)\n\t\treturn probs",
"def probability_meet_infected_person(self, virus: Virus, n_infected: int, event_population: int) -> _VectorisedFloat:\n return sct.binom.pmf(n_infected, event_population, self.probability_random_individual(virus))",
"def probability(p):\n return p > random.uniform(0.0, 1.0)",
"def f_implies(p, q):\n f = Implies(p, q).factor()\n return f if f in B else f.factor()",
"def _compute_register_probs(cls, num_values, probability):\n bits = np.arange(1, num_values + 1)\n probs = scipy.stats.geom.pmf(bits, probability)\n\n return probs / sum(probs)",
"def evaluate_one(self, x):\n # p = 1. / (np.sqrt(2. * np.pi) * self.sigma) * \\\n # np.exp(-0.5 * (self.mean - x) * self.invvar * (self.mean - x))\n p = self.dist.probability(x)\n return p",
"def probit(x):\n from tensorflow_probability import distributions\n return distributions.Normal(0, 1).cdf(x)",
"def fisher_p_value(contingency_table: np.ndarray) -> List[float]:\n _, fisher_p_value = stats.fisher_exact(contingency_table, alternative=\"greater\")\n return [fisher_p_value]",
"def ppf(self,x):\n ppfValue = self._distribution.inverseCdf(x,random())\n return ppfValue",
"def ppf(self,x):\n ppfValue = self._distribution.inverseCdf(x,random())\n return ppfValue",
"def p(party, vote_count, s):\n return t(party, vote_count) / d(s)",
"def feature_prob(self, f, cat):\n if self.category_count(cat) == 0:\n return 0\n # The total number of times this feature appeared in this \n # category divided by the total number of items in this category\n pfc = self.feature_count(f, cat)\n pc = self.category_count(cat)\n return float(pfc)/pc",
"def ppf(self,x):\n if x > 1.0 or x < 0:\n self.raiseAnError(IOError,'Categorical distribution cannot calculate ppf for', str(x), '! Valid value should within [0,1]!')\n sortedMapping = sorted(self.mapping.items(), key=operator.itemgetter(0))\n if x == 1.0:\n return float(sortedMapping[-1][0]) if self.isFloat else sortedMapping[-1][0]\n else:\n cumulative=0.0\n for element in sortedMapping:\n cumulative += element[1]\n if cumulative >= x:\n return float(element[0]) if self.isFloat else element[0]",
"def test_valid_inclusion_probabilities(self):\n self.assertEqual(\n private_sampling.PrivateThresholdSampleKeysOnly(\n threshold=1, eps=0.1, delta=0.5**30).compute_inclusion_prob(1),\n 0.5**30)\n self.assertEqual(\n private_sampling.PrivateThresholdSampleKeysOnly(\n threshold=0.5,\n eps=0.1,\n delta=1.0,\n sampling_method=private_sampling.PrioritySamplingMethod)\n .compute_inclusion_prob(1), 0.5)\n s = private_sampling.PrivateThresholdSampleKeysOnly(\n threshold=1, eps=0.1, delta=0.5**10)\n inclusion_prob = [s.compute_inclusion_prob(i) for i in range(0, 1000, 10)]\n for x in inclusion_prob:\n self.assertGreaterEqual(x, 0.0)\n self.assertLessEqual(x, 1.0)\n for i in range(len(inclusion_prob) - 1):\n self.assertGreaterEqual(inclusion_prob[i + 1], inclusion_prob[i])",
"def evaluate(self, xs):\n # ps = 1. / (np.sqrt(2. * np.pi) * self.sigma) * \\\n # np.exp(-0.5 * (self.mean - xs) * self.invvar * (self.mean - xs))\n # ps = np.zeros_like(xs)\n # for n, x in enumerate(xs):\n # ps[n] += self.evaluate_one(x)\n ps = self.dist.probability(xs)\n return ps",
"def prob4():\n#raise NotImplementedError(\"Problem 4 Incomplete\")\n h = lambda x : x[0] < -1 and x[1] > 1\n f = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([0,0]),cov=np.eye(2))\n g = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([-1,1]),cov=np.eye(2))\n X = np.random.multivariate_normal(mean=np.array([-1,1]),cov=np.eye(2),size=10000)\n return 1./10000*np.sum(np.apply_along_axis(h,1,X)*np.apply_along_axis(f,1,X)/np.apply_along_axis(g,1,X))",
"def probability_s(self, s, c):\n return sum([self.get_likelihood(c, w) for w in s]) + self.prior_probability[c]",
"def probability(prob):\n return random.random() <= prob",
"def find_probability(problist, listoffive):\n\tprobs = []\n\tfor i in listoffive:\n\t\tprobs.append(problist[i])\n\ttotprob = 1\n\tfor n in probs:\n\t\ttotprob = totprob * n\n\treturn totprob",
"def ppf(self,x):\n ppfValue = self.invCDF(x)\n return ppfValue",
"def fraction_of_infectious_virus(self) -> _VectorisedFloat:\n return 1."
] | [
"0.65426224",
"0.61161095",
"0.61135066",
"0.5821492",
"0.57806534",
"0.5777198",
"0.5777198",
"0.5770672",
"0.57525903",
"0.5751081",
"0.5734077",
"0.5697218",
"0.5689497",
"0.5657795",
"0.5614329",
"0.55981725",
"0.5575849",
"0.5567694",
"0.5567694",
"0.5539852",
"0.54907244",
"0.5488256",
"0.54736185",
"0.5455088",
"0.5436425",
"0.5429832",
"0.54285926",
"0.5392518",
"0.5383827",
"0.53762054"
] | 0.63304335 | 1 |
Return a int value of the number of completed extension of the SPorder. We store the number of extension of each subtree(subpartial order) such each node can have its partial order information with its current number of extension. | def number_of_extensions(M, root=None):
sp_order_formula = [i for i in M.series_partial_order_representation(root) if
(i.__contains__('series') or i.__contains__('parallel'))]
while sp_order_formula:
# Extend the children of the current operator
operator = sp_order_formula.pop()
left, right = M.tree.successors(operator)
for child in [left, right]:
# Recursion if the child is a operator (also known as a sub-tree in Binary Construction Tree).
if M.tree.node[child]['num_extension'] == 0:
if child.__contains__('series') or child.__contains__('parallel'):
M.tree.node[child]['num_extension'] = number_of_extensions(M, child)
else:
M.tree.node[child]['num_extension'] = 1
# Employ the property of series-parallel partial order to calculate the number of extension
if operator.__contains__('series'):
num_extension = (M.tree.node[left]['num_extension']) * (M.tree.node[right]['num_extension'])
M.tree.node[operator]['num_extension'] = num_extension
if operator.__contains__('parallel'):
# n1, n2 is the number of events (labels) on a partial order, we need to store the previous result.
n1 = len([i for i in M.series_partial_order_representation(left) if
not (i.__contains__('series') or i.__contains__('parallel'))])
n2 = len([i for i in M.series_partial_order_representation(right) if
not (i.__contains__('series') or i.__contains__('parallel'))])
num_extension = (math.factorial(n1 + n2) / (math.factorial(n1) * (math.factorial(n2)))) * (
M.tree.node[left]['num_extension']) * (M.tree.node[right]['num_extension'])
M.tree.node[operator]['num_extension'] = num_extension
#
# # When series structure
# if operator.__contains__('series') or operator.__contains__('parallel'):
# try:
# # Employ the property of series-parallel partial order to calculate the number of extension
# if operator.__contains__('series'):
# num_extension = (M.tree.node[left]['num_extension']) * (M.tree.node[right]['num_extension'])
# M.tree.node[operator]['num_extension'] = num_extension
#
# if operator.__contains__('parallel'):
# # n1, n2 is the number of events (labels) on a partial order, we need to store the previous result.
# n1 = len(M.series_partial_order_representation(left))
# n2 = len(M.series_partial_order_representation(right))
#
# num_extension = (math.factorial(n1 + n2) / (math.factorial(n1) * (math.factorial(n2)))) * (
# M.tree.node[left]['num_extension']) * (M.tree.node[right]['num_extension'])
#
# M.tree.node[operator]['num_extension'] = num_extension
#
# print "{}:{}".format(operator, num_extension)
# sp_order_formula.append(operator)
# except Exception:
# raise Exception
#
# # When parallel structure
# else:
# if len(temp_stack) > 0:
# temp_left = temp_stack.pop()
#
# # When they share the same parent then push the temp_left to sp_order_formula
# if M.tree.predecessors(temp_left)[0] == M.tree.predecessors(left)[0]:
# sp_order_formula.append(operator)
# sp_order_formula.append(right)
# sp_order_formula.append(left)
# sp_order_formula.append(temp_left)
# # otherwise, keep pushing the new operator(parent) to temp_stack
# else:
# temp_stack.append(left)
# sp_order_formula.append(operator)
# sp_order_formula.append(right)
#
# result = sp_order_formula.pop()
return M.tree.node[root]['num_extension'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_number_of_parts(score): \n number_of_parts = 0\n for e in score.recurse().parts:\n number_of_parts = number_of_parts + 1\n\n return( number_of_parts ) # get_number_of_parts ",
"def depht(self, p):\n if self.is_root(p):\n return 0\n else:\n return 1 + self.depht(self.parents(p))",
"def num_stages(tree, order):\n p = len(order)\n stages = cstree_to_stages(tree, order)\n return sum([len(stages[i]) for i in range(1,p)])",
"def get_ext_count(self):\n return m2.x509_get_ext_count(self.x509)",
"def getNumExtension(self, *args):\n return _libsbml.SBMLExtensionRegistry_getNumExtension(self, *args)",
"def count(self):\n return len(self.order_lst)",
"def n_subfile(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=False):\n n += 1\n return n",
"def naive_order_calculation(self):\n\t\torder = 0\n\t\tfor pt in self.enumerate_points():\n\t\t\torder += 1\n\t\treturn order",
"def total_priority(self) -> int:\n return self.tree[0].item()",
"def get_numTrans(self):\n return len(self.TL)",
"def size(self):\n return self.variables.end_of_tree - 1",
"def get_iter_num(self):\n\tif len(self.cost) > 0:\n first_key = list(self.cost.keys())[0]\n num = len(self.cost[first_key]) - 1\n\telse:\n\t first_key = list(self.prim_var.keys())[0]\n num = len(self.prim_var[first_key]) - 1\n\treturn num",
"def __len__(self):\n return len(self.subtrees())",
"def incremental_part(self) -> int:\n if self.is_old_style:\n return int(self.numeric_part[4:])\n return int(self.split('.', 1)[1])",
"def __len__(self):\n return sum(len(p) for p in self.parts)",
"def __len__(self):\n return len(self._order)",
"def _total_priority(self):\n return self.nodes[0]",
"def count(self):\r\n return self.count_helper(self.top_node)",
"def getSize(self):\n if self.subsym == None:\n if self.size == 0:\n return 1\n else:\n return self.size\n else:\n if self.size == 0:\n return self.subsym.getSize()\n else:\n return self.size * self.subsym.getSize()",
"def pdepth(self, s=False):\n try:\n d = plist([x.pdepth() + 1 for x in self], root=self.__root__)\n except Exception:\n d = plist([0], root=self.__root__)\n if s:\n d = d.ungroup(-1).puniq()\n if d:\n return max(d)\n return 0\n return d",
"def Depth(self):\n return self.path.count('.') + (self.parent is not None)",
"def num_parts(self):\n return self._num_parts",
"def numPaths(self):\n if self.numpaths > -1:\n return self.numpaths\n\n if self.jolt == 0:\n return 1\n\n paths = 0\n for parent in self.parents:\n paths += parent.numPaths()\n \n return paths",
"def count(self):\n return self.__tree.node_count",
"def total_nt(self) -> int:\n return self.sequence.length",
"def num_tree(self):\n if self.handle is None:\n raise AttributeError('Model not loaded yet')\n out = ctypes.c_size_t()\n _check_call(_LIB.TreeliteQueryNumTree(self.handle, ctypes.byref(out)))\n return out.value",
"def count_items(self):\n count = 0\n for o in self.order_lst:\n count += o.count()\n \n return count",
"def incremental_part(self) -> int:\n return self.arxiv_id.incremental_part",
"def complexity(self, mode='#nodes'):\n if mode == '#nodes':\n return len(self.nodes)",
"def get_last_order_number_used():\n return Order.__last_order_number_used"
] | [
"0.5881316",
"0.5866302",
"0.5593788",
"0.5586631",
"0.5544093",
"0.5523313",
"0.5400796",
"0.5370281",
"0.5363735",
"0.53597265",
"0.53486913",
"0.5302492",
"0.5282759",
"0.52362657",
"0.52335614",
"0.5229352",
"0.5222564",
"0.5211037",
"0.5194625",
"0.5190381",
"0.518178",
"0.51655275",
"0.5158521",
"0.5142015",
"0.5113787",
"0.5088867",
"0.5081601",
"0.5077674",
"0.50768197",
"0.5063407"
] | 0.65274924 | 0 |
Returns a random number of closes based on close_parens_probabilities. close_parens_probabilities defaults to [0.772, 0.206, 0.021, 0.001]. This is roughly equivalent to each selection coming from a binomial distribution with n=4 and p=1/16. | def generate_close_count(self):
prob = random.random()
close_probabilities = reductions(
lambda i, j: i + j,
self.close_parens_probabilities
) + [1.0]
parens = 0
while prob > close_probabilities[1]:
parens += 1
del close_probabilities[0]
return parens | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def random_coefficients(self, n=3, max_range = 10):\n return np.random.uniform(-1*max_range, max_range, n)",
"def generate_close_count(self):\n return 0",
"def chance(n, p):\n total = 0.0\n for k in range(n+1):\n total += comb(n, k, exact=False) * p**k * (1-p) ** (n-k)\n return total",
"def sample_from_probabilities(probabilities, topn=ALPHASIZE):\n p = np.squeeze(probabilities)\n p[np.argsort(p)[:-topn]] = 0\n p = p / np.sum(p)\n return np.random.choice(ALPHASIZE, 1, p=p)[0]",
"def build_random_population(n: int)->Population:\n DEF_COO = 2\n v = [make_random_automaton(DEF_COO) for i in range(n)]\n return Population(v)",
"def rand_order_size():\n return poisson(2.0) + 1",
"def generate_random_tropical_poly(max_degree, min_coefficient, max_coefficient):\n coefficients = []\n for d in range(0, random.randint(1, max_degree) + 1):\n coefficients.append(random.randint(min_coefficient, max_coefficient))\n return coefficients",
"def random_p_mn(num_compounds, num_reactions, num_reversible, p, seed=None):\n # setup\n rand_float = np.random.random_sample\n rand_int = np.random.random_integers\n cmpd_prefix = OPTIONS.compound_prefix\n rxn_prefix = OPTIONS.reaction_prefix\n if seed:\n np.random.seed(int(seed))\n num_compounds = int(num_compounds)\n num_reactions = int(num_reactions)\n num_reversible = int(num_reversible)\n p = float(p)\n network = MetabolicNetwork()\n # add compounds\n for i in range(num_compounds):\n network.add_node(met.BasicCompound(\"%s%d\" % (cmpd_prefix, i)))\n # choose a number of reactions as reversible\n reversibles = set()\n while len(reversibles) < num_reversible:\n reversibles.add(rand_int(0, num_reactions - 1))\n for i in range(num_reactions):\n if i in reversibles:\n network.add_node(met.BasicReaction(\n \"%s%d\" % (rxn_prefix, i), reversible=True))\n else:\n network.add_node(met.BasicReaction(\n \"%s%d\" % (rxn_prefix, i)))\n for src in network.compounds:\n for tar in network.reactions:\n if rand_float() < p:\n network.add_edge(src, tar, coefficient=0)\n LOGGER.debug(\"added link %s -> %s.\", str(src), str(tar))\n # a conditional case here (elif not if) because we cannot determine\n # substrates and products from bidirectional edges\n elif rand_float() < p:\n network.add_edge(tar, src, coefficient=0)\n LOGGER.debug(\"added link %s -> %s.\", str(tar), str(src))\n prune_network(network)\n return network",
"def prob_choice(p):\n \n return np.random.random_sample() < p",
"def prbs(m, n):\n return np.array(np.random.rand(m, n) > 0.5, dtype=np.int) - 0.5",
"def getHighestRank_Naive(self):\n\n # filter out low confidences\n maxConfidence = max(self.Predictors, key=operator.attrgetter('confidence'))\n p = [p for p in self.Predictors if p.confidence >= maxConfidence.confidence]\n \n if len(p) == 1:\n # only one predictor has high confidence\n chosenPredictor = p[0]\n elif len(p) > 1:\n # many predictors has high confidence. look for highest wins\n maxScore = max(p, key=operator.attrgetter('scoreWins'))\n \n# maxScore = 0\n# for pred in p:\n# maxScore = max(maxScore, pred.scoreWins - pred.scoreLosts) \n \n predictors = p\n p = [p for p in predictors if p.scoreWins >= maxScore.scoreWins]\n \n if len(p) == 1:\n chosenPredictor = p[0]\n elif len(p) > 1:\n # there are ties. look for lowest losts\n maxScore = min(p, key=operator.attrgetter('scoreLosts'))\n predictors = p\n p = [p for p in predictors if p.scoreLosts == maxScore]\n \n if len(p) == 1:\n chosenPredictor = p[-1]\n elif len(p) > 1:\n # choose at random\n random = rps.random() % len(p)\n chosenPredictor = p[random]\n \n if len(p) == 0:\n maxConfidence = max(self.Predictors, key=operator.attrgetter('confidence'))\n p = [p for p in self.Predictors if p.confidence >= maxConfidence.confidence]\n \n random = rps.random() % len(p)\n chosenPredictor = p[random]\n else:\n # confidences are low. look for highest wins\n maxScore = max(self.Predictors, key=operator.attrgetter('scoreWins'))\n p = [p for p in self.Predictors if p.scoreWins == maxScore]\n \n if len(p) == 1:\n chosenPredictor = p[0]\n elif len(p) > 1:\n # choose at random\n random = rps.random() % len(p)\n chosenPredictor = p[random]\n else:\n # choose at random\n random = rps.random() % len(self.Predictors)\n chosenPredictor = self.Predictors[random]\n \n if Debug:\n maxScore = max([p.scoreWins for p in self.Predictors]) \n print(\"max score: %f \" % (maxScore), end=\"\") \n maxScore = max([p.confidence for p in self.Predictors]) \n print(\"max confidence: %f \" % (maxScore), end=\"\") \n print(\"chosen predictor: %s\" % (chosenPredictor.name))\n #input()\n\n \n rankConfidence = chosenPredictor.confidence\n return chosenPredictor, rankConfidence",
"def compute_open_max_probability(openmax_known_score, openmax_unknown_score):\n\n prob_closed, prob_open, scores = [], [], []\n\n # Compute denominator for closet set + open set normalization.\n # Sum up the class scores.\n for category in range(10):\n scores += [np.exp(openmax_known_score[category])]\n total_denominator = np.sum(np.exp(openmax_known_score)) + np.exp(openmax_unknown_score)\n\n # Scores for image belonging to either closed or open set.\n prob_closed = np.array([scores / total_denominator])\n prob_open = np.array([np.exp(openmax_unknown_score) / total_denominator])\n\n probs = np.append(prob_closed.tolist(), prob_open)\n\n assert len(probs) == 11\n return probs",
"def binomial(n: int, p: float) -> int:\n return sum(bernoulli_trial(p) for _ in range(n))",
"def discrete_sampling(N, probs, states=None):\n\n p = probs.squeeze()/np.sum(probs)\n\n bins = np.digitize(\n np.random.uniform(0., 1., (N, 1)), np.hstack((0, np.cumsum(p))))-1\n\n if states is None:\n x = bins\n else:\n assert(states.shape[0] == probs.shape[0])\n x = states[bins]\n\n return x.squeeze()",
"def probchoice(V, d, obs=[]):\n\n #d = 0.01\n #obs = []\n #V = array([0., 0., 0.2, 0.2, 0.2, 0.4])\n\n #top = [exp(d*v) for v in V]\n top = exp(V * (1./d))\n\n #print top\n #print dummy\n\n # set the value of any prior observations to zero\n for i in range(len(obs)): top[obs[i][0]] = 0.\n\n bottom = sum(top)\n cp = [t/bottom for t in top]\n\n r = random()\n #print r\n #print cumsum(cp)\n\n return where((1*(r < cumsum(cp)))==1)[0][0]\n\n #return sum(1*(random() < cumsum(cp)))-1",
"def sample_response(self, slate_p):\n slate_p[slate_p >= 0.5] = 1.0\n slate_p[slate_p < 0.5] = 0.0\n# m = Bernoulli(slate_p)\n# return m.sample()\n return slate_p",
"def sampleBracketsPowerModel(nSamples):\n\tbrackets = []\n\tfor sampleIndex in range(nSamples):\n\t\tbracket = []\n\t\tregionWinners = np.zeros(4)\n\t\tfor regionIndex in range(4):\n\t\t\tregionVector, regionWinners[regionIndex] = sampleRegionPowerModel()\n\t\t\tbracket += regionVector\n\t\t# 2. Select outcomes of F4/NCG games (Rounds 5, 6)\n\t\tteam0 = {'seed': regionWinners[0], 'region': 0}\n\t\tteam1 = {'seed': regionWinners[1], 'region': 1}\n\t\tteam2 = {'seed': regionWinners[2], 'region': 2}\n\t\tteam3 = {'seed': regionWinners[3], 'region': 3}\n\t\twinProb1 = getWinProbability(team0, team1, r=5)\n\t\twinProb2 = getWinProbability(team2, team3, r=5)\n\t\tf4Result1 = 1 if random.random() < winProb1 else 0\n\t\tf4Result2 = 1 if random.random() < winProb2 else 0\n\t\tbracket.append(f4Result1)\n\t\tbracket.append(f4Result2)\n\t\tncgSeeds = applyRoundResults(regionWinners, [f4Result1, f4Result2])\n\n\t\t# NCG\n\t\tncgTeam1 = {'seed': ncgSeeds[0], 'region': -1}\n\t\tncgTeam2 = {'seed': ncgSeeds[1], 'region': -1}\n\t\twinProb = getWinProbability(ncgTeam1, ncgTeam2, r=6)\n\t\tncgResult = 1 if random.random() < winProb else 0\n\t\tbracket.append(ncgResult)\n\t\tbrackets.append(bracket)\n\treturn brackets",
"def generate_brainpool_curves(count: int, p: ZZ, initial_seed: str) -> SimulatedCurves:\n simulated_curves = SimulatedCurves(\"brainpool\", p.nbits(), initial_seed, count)\n curve = Brainpool(initial_seed, p)\n b_seed = None\n for _ in range(count):\n if curve.not_defined():\n curve.set_a()\n if not curve.check_a():\n curve.seed_update()\n curve.clear()\n continue\n b_seed = increment_seed(curve.seed())\n curve.set_b(b_seed)\n if not curve.check_b():\n b_seed = increment_seed(b_seed)\n continue\n if not curve.secure():\n curve.set_seed(increment_seed(b_seed))\n curve.clear()\n continue\n curve.generate_generator(b_seed)\n curve.compute_properties()\n simulated_curves.add_curve(curve)\n curve = Brainpool(curve.seed(), p)\n curve.seed_update()\n\n return simulated_curves",
"def probability(N_dr, L_opmin, L_opmax, L_min, L_max, L_d):\n opening_nomullignas = []\n opening_withmullignas = []\n sum_nomulligans = 0\n sum_withmulligans = 0\n mulligan_coeff = 0\n\n for i in range(L_opmin, min(L_opmax + 1, 8)): # first make a list of tuples of the form:\n # (number_of_lands_in_opening_hand, probability_of_drawing_such_a_hand)\n a = hypergeom(i, 7, 60, L_d)\n opening_nomullignas.append((i, a))\n mulligan_coeff = mulligan_coeff + a # this will be used later for calculating the probability of\n # taking the mulligan and is used as a coefficient before the mulligan sum\n for (x, y) in opening_nomullignas: # use the list of tuples to calculate the first part of equation 5\n partial_nomulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_nomulligans = partial_nomulligans + hypergeom(j, N_dr, 53, L_d - x)\n sum_nomulligans = sum_nomulligans + partial_nomulligans * y\n\n mulligan_coeff = 1 - mulligan_coeff # probability of mulliganing\n for i in range(L_opmin, min(L_opmax + 1, 7)): # doing the same thing as before, but drawing 6 instead of 7 cards\n a = hypergeom(i, 6, 60, L_d)\n opening_withmullignas.append((i, a))\n\n for (x, y) in opening_withmullignas:\n partial_withmulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_withmulligans = partial_withmulligans + hypergeom(j, N_dr, 54, L_d - x)\n sum_withmulligans = sum_withmulligans + partial_withmulligans * y\n total_withmulligans = mulligan_coeff * sum_withmulligans\n\n return total_withmulligans + sum_nomulligans",
"def init_start_prob(n_states):\n start_prob_est = np.random.rand(n_states, 1)\n start_prob_est /= np.sum(start_prob_est, 0)\n assert np.isclose(np.sum(start_prob_est, 0), 1.)\n return start_prob_est",
"def rnd(n: float, n_places: int) -> float:\n mult = math.pow(10, n_places or 3)\n return math.floor(n * mult + 0.5) / mult",
"def rnd(n: float, n_places: int) -> float:\n mult = math.pow(10, n_places or 3)\n return math.floor(n * mult + 0.5) / mult",
"def random_curve(number_of_unique_knots, polynomial_order = 2, dimensions = 3):\n knot_multiplicites = rand.randint(1, polynomial_order + 1, size = number_of_unique_knots)\n\n # ensure interpolation on the edges of the control polygon\n knot_multiplicites[0] = polynomial_order + 1\n knot_multiplicites[-1] = polynomial_order + 1\n\n knot_vector = np.repeat(range(len(knot_multiplicites)), repeats = knot_multiplicites)\n\n basis = BSplineBasis(knot_vector = knot_vector, polynomial_order = polynomial_order)\n\n control_points = rand.random_sample((basis.number_of_basis_functions, dimensions))\n\n curve = BSplineCurve(basis, control_points)\n\n return curve",
"def choice(some_list, probabilities, max_probability=1):\n x = random.uniform(0, max_probability)\n cumulative_probability = 0.0\n\n for item, item_probability in zip(some_list, probabilities):\n cumulative_probability += item_probability\n if x < cumulative_probability: break\n\n return item",
"def sample(probs):\n\n probs = probs / probs.sum()\n return np.random.choice(np.arange(len(probs)), p=probs.flatten())",
"def argmax_break_ties(self, probs):\n return np.random.choice(np.where(probs == probs.max())[0])",
"def bernoulli_trial(p: float) -> int:\n return 1 if random.random() < p else 0",
"def random(cls, borns=[-1, 1], radius_borns=[0, 1], **kwargs):\n x = random.uniform(*borns)\n y = random.uniform(*borns)\n r = random.uniform(*radius_borns)\n return cls(x, y, radius=r, **kwargs)",
"def prob4():\n\n\n N = 500000\n random_draws = np.random.multivariate_normal(mean = [-1,1], cov =[[1,0],[0,1]], size = N)\n\n h = lambda x: x[0] < -1 and x[1] > 1\n f = lambda x: stats.multivariate_normal(mean = [ 0, 0]).pdf(x)\n g = lambda x: stats.multivariate_normal(mean = [-1, 1]).pdf(x)\n\n probability = [h(random_draws[i]) * f(random_draws[i]) / g(random_draws[i]) for i in range(N)]\n\n return 1./N * np.sum(probability)",
"def genpoly(sum_count=10, deg=5, cof=10, min_count=1):\n\n p = Polynome([0], '')\n d_prev = -1\n while p.length < min_count:\n p.reset()\n for j in range(sum_count):\n d = randrange(deg)\n c = randrange(-cof, cof)\n while d == d_prev and c != 0:\n d = randrange(deg)\n c = randrange(-cof, cof)\n d_prev = d\n p.plus(c, d)\n return p"
] | [
"0.5542607",
"0.5430894",
"0.53839785",
"0.53698313",
"0.5318463",
"0.530422",
"0.5260695",
"0.52213675",
"0.5187429",
"0.51843035",
"0.5050707",
"0.5049101",
"0.50225484",
"0.502187",
"0.5020095",
"0.5019308",
"0.49802074",
"0.4975735",
"0.49733624",
"0.49695677",
"0.4961603",
"0.4961603",
"0.49605796",
"0.49522468",
"0.49401844",
"0.49288383",
"0.49273053",
"0.49268448",
"0.49249288",
"0.49226183"
] | 0.7580884 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.