query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Evaluate the NER model on the examples given. | def evaluate(ner_model, examples):
# The Scorer computes and stores evaluation scores
scorer = Scorer()
for text, annotations in examples:
# Process the text to get entities predicted
document = ner_model.make_doc(text)
correct_annotations = GoldParse(document, entities=annotations['entities'])
predicted_annotations = ner_model(text)
# Update the evaluation scores from the document
scorer.score(predicted_annotations, correct_annotations)
return scorer.scores | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def evaluate_model():\n\n # Get the processed data (in proper format to evaluate the NER model)\n data = get_json_from_file_path(PROCESSED_DATA_PATH)\n # Split the dataset for training and test as we did for training\n train_data, test_data = train_test_split(data, train_size=0.7, \n random_state=4)\n\n # Load the model trained\n try:\n ner_model = spacy.load(OUTPUT_MODEL_PATH)\n except Exception as err:\n msg = f'Could not load the model. Error: {err}'\n raise Exception(msg)\n\n # Compute evaluation scores\n print('Computing metrics...')\n scores = evaluate(ner_model, test_data)\n # General metrics of the model\n f_score = scores.get('ents_f')\n precision = scores.get('ents_p')\n recall = scores.get('ents_r')\n print('\\nScoring:')\n print(f'F-score: {f_score}')\n print(f'Precision: {precision}')\n print(f'Recall: {recall}')\n\n # Get the specific scores for each entity \n scores_per_entity = scores.get('ents_per_type')\n # Get the F-score of the entities\n f_scores_of_entities = []\n for entity_scores in scores_per_entity.values():\n f_scores_of_entities.append(entity_scores['f'])\n # Compute the macro averaged F-score\n macro_avg_f_score = sum(f_scores_of_entities)/len(f_scores_of_entities)\n print(f'Macro averaged F-score: {macro_avg_f_score}')\n \n print('\\nScores per entity;')\n print('{:<15} {:<10} {:<10} {:<10}'.format('Entity','F-score','Precision','Recall'))\n for key, value in scores_per_entity.items():\n entity = key\n f, p, r = value['f'], value['p'], value['r']\n print('{:<15} {:<10.2f} {:<10.2f} {:<10.2f}'.format(entity, f, p, r))",
"def evaluate_model(estimator: es.Estimator, speech_labels: List[str], entries, input_fn_eval) -> Dict[str, float]:\n # Get predictions\n predictions = estimator.predict(input_fn=input_fn_eval)\n\n # Get probabilities of each predicted class\n probs = [pred[\"probabilities\"] for pred in predictions]\n num_of_examples = len(probs)\n targets = [entry[1] for entry in entries] # The ground truth transcript\n\n total_wer, total_cer = 0., 0.\n greedy_decoder = decoder.DeepSpeechDecoder(speech_labels, blank_index=28)\n for prob, target in zip(probs, targets):\n decode = greedy_decoder.decode(prob)\n total_cer += greedy_decoder.cer(decode, target)\n total_wer += greedy_decoder.wer(decode, target)\n\n total_cer /= num_of_examples\n total_wer /= num_of_examples\n global_step = estimator.get_variable_value(tf.GraphKeys.GLOBAL_STEP)\n eval_results = {\n _WER_KEY: total_wer,\n _CER_KEY: total_cer,\n tf.GraphKeys.GLOBAL_STEP: global_step\n }\n return eval_results",
"def _evaluate_examples(self):\n\n getLogger(\"problog_lfi\").debug(\"Evaluating examples:\")\n if self._log:\n evaluator = ExampleEvaluatorLog(self._weights)\n else:\n evaluator = ExampleEvaluator(self._weights)\n\n results = []\n for i, example in enumerate(self._compiled_examples):\n try:\n result = evaluator(example)\n results.append(result)\n getLogger(\"problog_lfi\").debug(\n \"Example \"\n + str(i + 1)\n + \":\\tFrequency = \"\n + str(result[0][0])\n + \"\\tp_evidence = \"\n + str(result[0][1])\n + \"\\tp_queries = \"\n + str(result[0][2])\n )\n except InconsistentEvidenceError:\n # print(\"Ignoring example {}/{}\".format(i + 1, len(self._compiled_examples)))\n getLogger(\"problog_lfi\").warning(\n \"Ignoring example {}/{}\".format(i + 1, len(self._compiled_examples))\n )\n\n return list(chain.from_iterable(results))",
"def evaluate(net,\n tokenizer, ner_tagger,\n device, eval_data_filepath, eval_preds_filepath,\n fb_passes = 1, text_length = 250, verbose=False):\n\n \"\"\" PREPADE DATA FOR PREDICTION \"\"\"\n dh = utils.HotPotDataHandler(eval_data_filepath)\n dev_data = dh.data_for_paragraph_selector()\n\n point_ids = [point[0] for point in dev_data] # needed to handle useless datapoints\n queries = [point[2] for point in dev_data]\n contexts = [point[3] for point in dev_data]\n\n graphs = [EntityGraph.EntityGraph(c,\n context_length=text_length,\n tagger=ner_tagger)\n for c in contexts]\n\n # if the NER in EntityGraph doesn't find entities, the datapoint is useless.\n useless_datapoint_inds = [i for i, g in enumerate(graphs) if not g.graph]\n queries = [q for i, q in enumerate(queries) if i not in useless_datapoint_inds]\n contexts = [c for i, c in enumerate(contexts) if i not in useless_datapoint_inds]\n graphs = [g for i, g in enumerate(graphs) if i not in useless_datapoint_inds]\n\n # required for prediction in the right format\n s_lens_batch = [utils.sentence_lengths(c, tokenizer) for c in contexts]\n\n # turn the texts into tensors in order to put them on the GPU\n qc_ids = [net.encoder.token_ids(q, c) for q, c in zip(queries, contexts)] # list[ (list[int], list[int]) ]\n q_ids, c_ids = list(zip(*qc_ids)) # tuple(list[int]), tuple(list[int])\n q_ids_list = [torch.tensor(q).to(device) for q in q_ids] # list[Tensor]\n c_ids_list = [torch.tensor(c).to(device) for c in c_ids] # list[Tensor]\n\n for i,g in enumerate(graphs):\n graphs[i].M = g.M.to(device) # work with enumerate to actually mutate the graph objects\n\n \"\"\" FORWARD PASSES \"\"\"\n answers = {} # {question_id: str} (either \"yes\", \"no\" or a string containing the answer)\n sp = {} # {question_id: list[list[paragraph_title, sent_num]]} (supporting sentences)\n\n # return useless datapoints unanswered\n for i in useless_datapoint_inds:\n answers[point_ids[i]] = \"noanswer\"\n sp[point_ids[i]] = []\n\n for i, (query, context, graph, s_lens) in enumerate(zip(q_ids_list, c_ids_list, graphs, s_lens_batch)):\n\n if verbose: print(queries[i])\n\n answer, sup_fact_pairs = predict(net, query, context, graph, tokenizer,\n s_lens, fb_passes=fb_passes) #TODO sort these parameters\n\n answers[dev_data[i][0]] = answer # {question_id: str}\n sp[dev_data[i][0]] = sup_fact_pairs # {question_id: list[list[paragraph_title, sent_num]]}\n\n if verbose: print(answer)\n\n with open(eval_preds_filepath, 'w') as f:\n json.dump( {\"answer\":answers, \"sp\":sp} , f)\n\n\n \"\"\" EVALUATION \"\"\"\n return official_eval_script.eval(eval_preds_filepath, eval_data_filepath) #TODO return aything else than the metrics?",
"def evaluate():\n\tmodel.eval()\n\tstddev = 1 # And mean=0\n\tfor batch_idx, (data, _) in enumerate(syn_test_loader):\n\t\tdata = data.cuda()\n\t\tif batch_idx == 0:\n\t\t\tnoise = torch.autograd.Variable(torch.randn(batch_size, bottleneck).cuda() * stddev)\n\t\t\tsample_representation(\"orig_nat\", data, noise)\n\t\t\tsample_representation(\"natural\", data, noise)\n\t\t\tsample_representation(\"orig_syn\", data, noise)\n\t\t\tsample_representation(\"synth\", data, noise)",
"def eval(self):\r\n if WORDSPLIT:\r\n train, test = self.get_train_test_wordsplit()\r\n elif UTTERANCE_SPLIT:\r\n train, test, val = self.get_train_test_utterance_split()\r\n wordlist = joblib.load('wordlist.pkl')\r\n dictionary = joblib.load('dict.pkl')\r\n phones = joblib.load('phones.pkl')\r\n metadata_help = {'wordlist': wordlist, 'dictionary': dictionary, 'phones': phones}\r\n p2c = utils.phone2class(phones)\r\n c2p = utils.class2phone(phones)\r\n \"\"\"Get test generator\"\"\"\r\n test_data = Dataset({'files': test, 'mode': 'eval', 'metadata_help': metadata_help})\r\n test_gen = data.DataLoader(test_data, batch_size=1,\r\n shuffle=True, collate_fn=test_data.collate_eval, drop_last=True)\r\n for batch_number, features in tqdm(enumerate(test_gen)):\r\n spectrograms = features['spectrograms']\r\n phones = features['phones']\r\n batch_metadata = features['metadata'][0]\r\n self.G = self.G.eval()\r\n\r\n outputs = self.G(spectrograms)\r\n outputs = np.squeeze(outputs.detach().cpu().numpy())\r\n phones = np.squeeze(phones.detach().cpu().numpy())\r\n phones = phones.astype(dtype=int)\r\n phones = [c2p[x] for x in phones]\r\n\r\n output_classes = np.argmax(outputs, axis=1)\r\n\r\n \"\"\"Decode the output predictions into a phone sequence\"\"\"\r\n # https://stackoverflow.com/questions/38065898/how-to-remove-the-adjacent-duplicate-value-in-a-numpy-array\r\n duplicates_eliminated = np.asarray([k for k, g in groupby(output_classes)])\r\n blanks_eliminated = duplicates_eliminated[duplicates_eliminated != 0]\r\n predicted_phones_ = [c2p[x] for x in blanks_eliminated]\r\n \"\"\"remove SOS and EOS\"\"\"\r\n predicted_phones = []\r\n for x in predicted_phones_:\r\n if x != 'SOS' and x != 'EOS':\r\n predicted_phones.append(x)\r\n\r\n data_to_save = {'speaker': batch_metadata['speaker'],\r\n 'word': batch_metadata['word'],\r\n 'true_phones': batch_metadata['phones'],\r\n 'predicted_phones': predicted_phones}\r\n dump_path = os.path.join(self.predict_dir, batch_metadata['utterance'] + '.pkl')\r\n joblib.dump(data_to_save, dump_path)",
"def run_examples():\n\n for example in examples:\n\n print(str(example) + \" : \", end=\" \")\n try:\n t, smush = analyse(example, my_env)\n print(lookup(t, smush))\n # print(\"Smush\")\n # for k,v in smush.items():\n # print(f\"\\t{k} : {v}\")\n except (ParseError, InferenceError) as e:\n print(e)",
"def evaluate(model, iterator, f, ner_label, verbose = False):\n # set model to evaluation mode\n model.eval()\n\n y_true = []\n y_pred = []\n Words, Is_heads, Tags, Y, Y_hat = [], [], [], [], []\n with torch.no_grad():\n for i, batch in enumerate(iterator):\n words, input_ids, is_heads, tags, input_tags, entity_label, seqlens = batch\n\n _, _, y_hat = model(input_ids, input_tags, entity_label) # y_hat: (N, T)\n\n Words.extend(words)\n Is_heads.extend(is_heads)\n Tags.extend(tags)\n Y.extend(input_tags.numpy().tolist())\n Y_hat.extend(y_hat.cpu().numpy().tolist())\n ## gets results and save\n with open(\"temp\", 'w') as fout:\n for words, is_heads, tags, y_hat in zip(Words, Is_heads, Tags, Y_hat):\n y_hat = [hat for head, hat in zip(is_heads, y_hat) if head == 1]\n preds = [ner_label.idx2tag[hat] for hat in y_hat]\n if len(preds[1:-1]) > 0:\n y_pred.append(preds[1:-1])\n if len(tags.split()[1:-1]) > 0:\n y_true.append(tags.split()[1:-1])\n assert len(preds) == len(words.split()) == len(tags.split())\n for w, t, p in zip(words.split()[1:-1], tags.split()[1:-1], preds[1:-1]):\n fout.write(f\"{w} {t} {p}\\n\")\n fout.write(\"\\n\")\n\n assert len(y_pred) == len(y_true)\n\n # logging loss, f1 and report\n p, r, f1 = f1_score(y_true, y_pred)\n\n # metrics_str = \"; \".join(\"{}: {:05.2f}\".format(k, v) for k, v in metrics.items())\n # logging.info(\"- {} metrics: \".format(mark) + metrics_str)\n #\n # if verbose:\n # report = classification_report(true_tags, pred_tags)\n # logging.info(report)\n\n final = f + \".P%.4f_R%.4f_F%.4f\" %(p, r, f1)\n with open(final, 'w') as fout:\n result = open(\"temp\", \"r\").read()\n fout.write(f\"{result}\\n\")\n\n fout.write(f\"precision={p}\\n\")\n fout.write(f\"recall={r}\\n\")\n fout.write(f\"f1={f1}\\n\")\n if verbose:\n report = classification_report(y_true, y_pred)\n print(report)\n\n os.remove(\"temp\")\n\n print(\"precision=%.2f\"%p)\n print(\"recall=%.2f\"%r)\n print(\"f1=%.2f\"%f1)\n return p, r, f1",
"def eval_model(self, eval_data): # noqa: ignore flake8\"\n os.makedirs(self.model_dir, exist_ok=True)\n source_texts, target_texts = create_dataset(eval_data)\n logger.info(\"Evaluating the model...\")\n logger.info(\"Number of examples: {}\".format(len(source_texts)))\n\n if self.src_2_ids is None:\n self.src_2_ids = load_word_dict(self.src_vocab_path)\n self.trg_2_ids = load_word_dict(self.trg_vocab_path)\n if self.model is None:\n if os.path.exists(self.model_path):\n self.model = Seq2Seq(\n encoder_vocab_size=len(self.src_2_ids),\n decoder_vocab_size=len(self.trg_2_ids),\n embed_size=self.embed_size,\n enc_hidden_size=self.hidden_size,\n dec_hidden_size=self.hidden_size,\n dropout=self.dropout\n )\n self.load_model()\n self.model.to(device)\n else:\n raise ValueError(\"Model not found at {}\".format(self.model_path))\n self.model.eval()\n\n train_src, train_trg = one_hot(source_texts, target_texts, self.src_2_ids, self.trg_2_ids, sort_by_len=True)\n\n id_2_srcs = {v: k for k, v in self.src_2_ids.items()}\n id_2_trgs = {v: k for k, v in self.trg_2_ids.items()}\n logger.debug(f'evaluate src: {[id_2_srcs[i] for i in train_src[0]]}')\n logger.debug(f'evaluate trg: {[id_2_trgs[i] for i in train_trg[0]]}')\n eval_data = gen_examples(train_src, train_trg, self.batch_size, self.max_length)\n\n total_num_words = 0.\n total_loss = 0.\n with torch.no_grad():\n for it, (mb_x, mb_x_len, mb_y, mb_y_len) in enumerate(eval_data):\n mb_x = torch.from_numpy(mb_x).to(device).long()\n mb_x_len = torch.from_numpy(mb_x_len).to(device).long()\n mb_input = torch.from_numpy(mb_y[:, :-1]).to(device).long()\n mb_output = torch.from_numpy(mb_y[:, 1:]).to(device).long()\n mb_y_len = torch.from_numpy(mb_y_len - 1).to(device).long()\n mb_y_len[mb_y_len <= 0] = 1\n\n mb_pred, attn = self.model(mb_x, mb_x_len, mb_input, mb_y_len)\n\n mb_out_mask = torch.arange(mb_y_len.max().item(), device=device)[None, :] < mb_y_len[:, None]\n mb_out_mask = mb_out_mask.float()\n\n loss = self.loss_fn(mb_pred, mb_output, mb_out_mask)\n\n num_words = torch.sum(mb_y_len).item()\n total_loss += loss.item() * num_words\n total_num_words += num_words\n loss = total_loss / total_num_words\n logger.info(f\"Evaluation loss: {loss}\")\n return {'loss': loss}",
"def evaluate_model():\n\n print '\\n\\tevaluate result'\n os.system('./conlleval.pl -d \\'\\t\\' < ' + encoded_test + ' >> ' + result_file)\n print '\\t--done\\n'",
"def eval(self):\n # self.recognizer.eval()\n self.detector.eval()\n self.shared_conv.eval()",
"def evaluate(model, tokenizer, dataset, lines, output_test_file, batch_size=32):\n sampler = SequentialSampler(dataset)\n dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=batch_size)\n\n print(\"*** Evaluating ***\")\n eval_loss = 0.0\n num_steps = 0\n preds = None\n out_label_ids = None\n for i, batch in enumerate(dataloader):\n if i % 200 == 199:\n print(\"=\", end=\"\")\n if i % 5000 == 4999:\n print(\"[Step \" + str(i+1) + \" / \" + str(len(dataloader)) + \"] \" )\n model.eval()\n batch = tuple(t.to(device) for t in batch)\n\n with torch.no_grad():\n labels = batch[3]\n outputs = model(input_ids=batch[0], attention_mask=batch[1], labels=labels)\n tmp_eval_loss, logits = outputs[:2]\n eval_loss += tmp_eval_loss.mean().item()\n \n num_steps += 1\n if preds is None:\n preds = logits.detach().cpu().numpy()\n out_label_ids = labels.detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, labels.detach().cpu().numpy(), axis=0)\n \n eval_loss = eval_loss / num_steps\n \n preds_label = np.argmax(preds, axis=1)\n \n accuracy = (preds_label == out_label_ids).mean()\n output_dir = os.path.dirname(output_test_file)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n with open(output_test_file, \"w\") as writer:\n all_logits = preds.tolist()\n for i, logit in enumerate(all_logits):\n line = '<CODESPLIT>'.join(\n [item.encode('ascii', 'ignore').decode('ascii') for item in lines[i]])\n\n writer.write(line + '<CODESPLIT>' + '<CODESPLIT>'.join([str(l) for l in logit]) + '\\n')\n print(\"Accuracy =\", str(accuracy))\n\n return accuracy",
"def eval(self):\n self.train(mode=False)",
"def fit(self, examples):\n\n sentences = []\n for example in examples:\n sentences += text_to_w2v_input(example)\n\n self.w2v_model = get_w2v_model(sentences)",
"def evaluate(args, model, tokenizer, eval_dataset, eval_dataloader, task_name, model_type, split, step):\n model.eval()\n processor = MoralStoriesProcessor()\n results = dict()\n softmax = torch.nn.Softmax(dim=1)\n\n # Eval!\n logger.info('***** Running evaluation on the validation / test set *****')\n logger.info(' Num examples = %d', len(eval_dataset))\n logger.info(' Batch size = %d', args.eval_batch_size)\n batch_losses = list()\n eval_loss = 0.0\n micro_loss, macro_loss = 0.0, 0.0\n num_batches, num_tokens = 0, 0\n preds = None\n soft_preds = None\n out_label_ids = None\n # Perform a single evaluation step\n for batch in tqdm(eval_dataloader, desc='Evaluating', mininterval=10, ncols=100):\n batch = tuple(t.to(args.device) for t in batch)\n with torch.no_grad():\n if 'gen' not in task_name:\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'token_type_ids': batch[2] if model_type == 'bert' else None,\n 'labels': batch[3]}\n else:\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'labels': batch[3]}\n if 'gpt2' not in model_type:\n # Prepare decoder inputs and labels for enc-dec models\n inputs['labels'] = batch[3][:, 1:].contiguous() # shift\n decoder_input_ids = batch[3][:, :-1].clone() # shift\n decoder_input_ids[decoder_input_ids == -100] = tokenizer.pad_token_id # remove masking\n inputs['decoder_input_ids'] = decoder_input_ids.contiguous()\n\n outputs = model(**inputs)\n\n tmp_eval_loss, logits = outputs[:2]\n soft_logits = softmax(logits)\n eval_loss += tmp_eval_loss.mean().item()\n batch_losses.append(tmp_eval_loss.item())\n\n if 'gen' not in task_name:\n if preds is None:\n preds = logits.detach().cpu().numpy()\n soft_preds = soft_logits.detach().cpu().numpy()\n out_label_ids = inputs['labels'].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n soft_preds = np.append(soft_preds, soft_logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)\n else:\n # Obtain per-token loss for perplexity computation\n batch_loss = get_token_loss(args, logits, batch[3], batch[4], model_type=model_type)\n macro_loss += batch_loss.mean().item()\n micro_loss += batch_loss.sum().item()\n num_batches += 1\n num_tokens += batch_loss.view(-1).shape[0]\n\n # Compute and update evaluation metric values\n if 'gen' not in task_name:\n # Isolate model predictions\n preds = np.argmax(preds, axis=1)\n soft_preds = soft_preds.tolist()\n curr_result = compute_cls_metrics(preds, out_label_ids)\n else:\n macro_perplexity = torch.exp(torch.tensor(macro_loss / num_batches)).item()\n micro_perplexity = torch.exp(torch.tensor(micro_loss / num_tokens)).item()\n curr_result = {'macro_perplexity': macro_perplexity,\n 'micro_perplexity': micro_perplexity}\n\n if len(results.keys()) == 0:\n for k, v in curr_result.items():\n results[k] = [v]\n else:\n for k, v in curr_result.items():\n results[k].append(v)\n\n # Log metrics\n output_eval_file = os.path.join(args.output_dir, 'results_{}_{}.txt'.format(task_name, split))\n with open(output_eval_file, 'a') as writer:\n logger.info('***** Eval results *****')\n writer.write('STEP: {:s}\\n'.format(str(step)))\n for key in sorted(curr_result.keys()):\n logger.info(' %s = %s', key, str(curr_result[key]))\n writer.write('%s = %s\\n' % (key, str(curr_result[key])))\n\n # Log predictions\n if 'gen' not in task_name:\n output_pred_file = \\\n os.path.join(args.output_dir, 'predictions_{}_{}_{}.lst'.format(task_name, split, step))\n with open(output_pred_file, 'w') as writer:\n logger.info('***** Write predictions *****')\n for pred in preds:\n writer.write('{}\\n'.format(processor.get_labels()[pred]))\n\n # Maintain a single metrics file\n if os.path.exists(args.output_dir):\n with open(os.path.join(args.output_dir, 'metrics_{}_{}.json'.format(task_name, split)), 'w') as f:\n f.write(json.dumps(results))\n f.close()\n\n # Report mean dev loss\n mean_eval_loss = eval_loss / len(eval_dataloader)\n logging.info('\\n' + '*' * 10)\n logging.info('Mean development loss: {:.4f}'.format(mean_eval_loss))\n logging.info('*' * 10 + '\\n')\n\n return results, mean_eval_loss, preds, soft_preds",
"def train(self, examples):\n print(examples)\n # first we will do gensim to get word embeddings\n tokens = []\n for example in examples:\n for tuple in example:\n tokens.append([tuple[0]])\n self.model = Word2Vec(tokens, min_count=1, size=100).wv\n # shuffle the examples so that they are gone through 'randomly'\n #print(examples)\n random.shuffle(examples)\n #print(examples)\n # iterate through our examples\n for j in range(len(examples)):\n # the stored label for the previous token\n prev_label = None\n prev_word = None\n # iterate through our tokens for the example\n for i in range(len(examples[j])):\n # store our token and its label\n token = examples[j][i][0]\n y = examples[j][i][1]\n # get the features for our current token\n next_word = None\n if i <= (len(examples)-1):\n next_word = examples[j][i+1][0]\n features = self.featurize(prev_label, prev_word, token, next_word)\n # set our previous label to our current since\n # we are done featurizing and need to store it for\n # the next iteration\n prev_label = y\n # a dictionary that will store our z values\n z = {}\n # calculate our z value for every state for\n # the example we are on\n # z(state) = features * weights\n # z[state] = np.dot(features, weights[state])\n for state in self.states:\n z[state] = np.dot(features, self.weights[state])\n # store our max\n max = -1\n # store our y_hat\n y_hat = None\n # store our probabilities\n prob = {}\n # this runs softmax on our z's\n # y_hat = softmax(z)\n denom = sum(np.exp(np.array(list(z.values()))))\n for state in self.states:\n # softmax = p(state) = e^z[state] / (sum[e^z for all z's)\n # making sure this works the way I want it to, should\n # be three values\n #print(np.array(list(z.values())))\n #print(np.exp(np.array(list(z.values()))))\n prob[state] = np.exp(z[state]) / denom\n # if our current prob is greater than the others then it is our boy\n if prob[state] > max:\n # save the new prob as the max\n max = prob[state]\n # save the state as our prediction y_hat\n y_hat = state\n # this will hold our gradients for all the states\n gradients = {}\n for state in self.states:\n # gradient[state] = ((y_hat == state) - prob[state]) * features\n gradients[state] = ((y_hat == state) - prob[state]) * features\n # weights[state] -= loss * gradients\n self.weights[state] -= self.loss * gradients[state]",
"def evaluate(model, test_files):\n print(\"Running predictions.\")\n models = load_model(model)\n predictions = predict(models, test_files)\n\n # # write predictions to file\n # write_predictions(\"evaluate_out.json\",predictions)\n evaluate_individual(predictions, test_files, models)\n evaluate_overall(predictions)",
"def predict(self, ex_words: List[str]) -> int:\n raise Exception(\"Don't call me, call my subclasses\")",
"def inference(self, inputs):\n # NOTE: This makes the assumption that your model expects text to be tokenized\n # with \"input_ids\" and \"token_type_ids\" - which is true for some popular transformer models, e.g. bert.\n # If your transformer model expects different tokenization, adapt this code to suit\n # its expected input format.\n input_ids = inputs[\"input_ids\"]\n input_ids = input_ids.to(self.device)\n\n coarse_result = self.model.generate(input_ids = input_ids, )\n coarse_result = coarse_result.to(\"cpu\")\n fined_result = self.tokenizer.decode(coarse_result[0].tolist()[inputs[\"original_length\"]+1:],\n skip_special_tokens = True)\n #logger.info(\"Model predicted: '%s'\", fined_result)\n\n return [fined_result]",
"def train(self, examples):\n # iterate over our sentences in the examples\n for sentence in examples:\n # some testing prints\n # print('---------------------------')\n # print(sentence)\n # print('---------------------------')\n # get every tuple in the sentence\n for i in range(len(sentence)):\n # seperate the word and the state\n word = sentence[i][0]\n state = sentence[i][1]\n # add our word and state to our set of all words and states\n self.vocab.add(word)\n self.states.add(state)\n # if we are at the first word in the sentence need to\n # increment the number of times this tag appeared first in a sentence\n if i == 0:\n self.pi[state] += 1\n # else we need to increment the number of times the\n # current tag was preceeded by the tag before it\n else:\n if sentence[i - 1][1] not in self.transitions:\n self.transitions[sentence[i - 1][1]] = Counter()\n self.transitions[sentence[i - 1][1]][state] += 1\n # now we increment the number of times the word had this tag\n if state not in self.emissions:\n self.emissions[state] = Counter()\n self.emissions[state][word] += 1\n # print(self.emissions)\n # print(self.transitions)\n # print(self.pi)\n # print('---------------------------')\n\n # now we store the counts we will need since during our iterations\n # the counts will change\n # this stores how many sentences we have\n # count(sentences)\n pi_val = sum(self.pi.values())\n # now we are going to get the counts of the tags\n # count(t_i)\n # we are using emissions because each tag occurs in it unlike\n # in transitions where the last tag is lost kind of\n for state in self.emissions.keys():\n # print(state, sum(self.emissions[state].values()))\n self.tag_count[state] = sum(self.emissions[state].values())\n # print('---------------------------')\n # now we do the probability of a sentence starting with each tag\n # count(t_i) / count(sentences)\n for state in self.pi:\n self.pi[state] /= pi_val\n # now we will calculate the probabilites that each tag proceeds the next tag\n # ie p(t_i | t_i-1) = count(t_i-1, t_i) / count(t_i-1)\n for prev_state in self.transitions:\n for state in self.transitions[prev_state]:\n # print(prev_state, state, self.transitions[prev_state][state])\n # print(prev_state, tag_count[prev_state])\n self.transitions[prev_state][state] /= self.tag_count[prev_state]\n # print(self.transitions[prev_state][state])\n # print('---------------------------')\n # and the probability of a word having the tag with laplace smoothing\n # p(w_i | t_i) = count(t_i, w_i) / count(t_i)\n for state in self.emissions:\n for word in self.emissions[state]:\n # print(state, word, self.emissions[state][word])\n # print(state, tag_count[state])\n self.emissions[state][word] = (self.emissions[state][word] + 1) / (\n self.tag_count[state] + len(self.vocab))\n # print(self.emissions[state][word])\n # print('---------------------------')\n # print(self.emissions)\n # print(self.transitions)\n # print(self.pi)\n # print('---------------------------')\n # print(len(self.vocab))\n # print(len(self.states))\n # print('---------------------------')",
"def print_eval(trainset, testset, exptypes=EXPTYPES, semantic=False, savemodels=False, loadmodels=False, deprep=False, externals=True, predict=True):\n system_pairs = []\n print \"== cleaning lsts ==\"\n cleanupnonespanexpressions(testset)\n cleanholdercandidates(testset)\n cleanholders(testset)\n cleanupnonespanexpressions(trainset)\n cleanholdercandidates(trainset)\n cleanholders(trainset)\n \n print \"== train ==\"\n ev = evaluate()\n features, labels, stats = getfeaturesandlabels(trainset, semantic=semantic, predict=False)\n print counters, '\\n'\n\n print \"== test ==\"\n counters.clear()\n ftest, ltest, stest = getfeaturesandlabels(testset, semantic=semantic, predict=predict)\n print counters\n for exp in exptypes:\n vec, X, y = create_matrix(features[exp], labels[exp])\n if externals:\n vecw, Xw, yw = create_matrix(features[exp + 'w'], labels[exp + 'w'])\n vecimp, Ximp, yimp = create_matrix(features[exp + 'w'], labels[exp + 'implicit'])\n if loadmodels:\n clf = read_model(loadmodels + exp)\n else:\n clf = create_model(X, y)\n if externals:\n clfw = create_model(Xw, yw)\n clfimp = create_model(Ximp, yimp)\n if savemodels:\n write_model(clf, savemodels + exp)\n print \"== eval ==\"\n if deprep:\n print \"== {} ==\".format(deprep)\n Xt, yt = transform_to_matrix(ftest[exp], ltest[exp], vec)\n if externals:\n Xtw, ytw = transform_to_matrix(ftest[exp + 'w'], ltest[exp + 'w'], vecw)\n Xtimp, ytimp = transform_to_matrix(ftest[exp + 'w'], ltest[exp + 'implicit'], vecimp)\n results = clf.predict_proba(Xt)\n s_p_w = False\n s_p_imp = False\n gold_p1 = ev.get_unique_exp(copy.deepcopy(stest['positions'][exp + 'w']), exp, count=False)\n gold_p2 = copy.deepcopy(gold_p1)\n gold_p3 = copy.deepcopy(gold_p1)\n if clfw:\n resultsw = clfw.predict_proba(Xtw)\n s_p_w=ev.get_system_pairs_prob(stest['positions'][exp + 'w'], resultsw, gold_p1)\n counters['s_p_w' + exp] = len(s_p_w)\n if DEBUG:\n print \"RESULTSW\"\n print resultsw\n if clfimp:\n resultsimp = clfimp.predict_proba(Xtimp)\n s_p_imp=ev.get_system_pairs_prob(stest['positions'][exp + 'implicit'], resultsimp, gold_p2)\n counters['s_p_imp' + exp] = len(s_p_imp)\n if DEBUG:\n print \"RESULTSIMP\"\n print resultsimp\n s_p_int=ev.get_system_pairs_prob(stest['positions'][exp], results, gold_p3)\n counters['s_p_int' + exp] = len(s_p_int)\n system_pairs_exp = ev.merge_system_pairs(s_p_int, s_p_imp=s_p_imp, s_p_w=s_p_w)\n counters['system_pairs_all' + exp] = len(system_pairs_exp)\n for pair in system_pairs_exp:\n if 'confidence' in pair and pair['confidence'] > 0:\n counters['system_pairs' + exp] += 1\n if predict:\n ssc_exp = ev.spansetcoverage_o_p(system_pairs_exp, exptype=exp)\n print \"system exp - {}:\\n{}\".format(exp, prf_prettystring(ssc_exp))\n else:\n ssc_exp = ev.spansetcoverage_o_p(system_pairs_exp, exptype=exp)\n print \"gold exp - {}:\\n{}\".format(exp, prf_prettystring(ssc_exp))\n system_pairs.extend(system_pairs_exp)\n if predict:\n ssc = ev.spansetcoverage_o_p(system_pairs)\n print \"system exp - all:\\n\", prf_prettystring(ssc)\n else:\n ssc = ev.spansetcoverage_o_p(system_pairs)\n print \"gold exp - all: \\n\", prf_prettystring(ssc)\n \n for k,v in sorted(counters.items(), key=lambda x: x[0]):\n print k, v\n if isinstance(deprep, basestring):\n dump_jsonfile(system_pairs, 'system_pairs-' + deprep + '.json')\n return {'stats': stest, 'system_pairs': system_pairs}",
"def print_examples(example_iter, model, n=2, max_len=100, \n sos_index=1, \n src_eos_index=None, \n trg_eos_index=None, \n src_vocab=None, trg_vocab=None):\n\n model.eval()\n count = 0\n print()\n \n if src_vocab is not None and trg_vocab is not None:\n src_eos_index = src_vocab.stoi[EOS_TOKEN]\n trg_sos_index = trg_vocab.stoi[SOS_TOKEN]\n trg_eos_index = trg_vocab.stoi[EOS_TOKEN]\n else:\n src_eos_index = None\n trg_sos_index = 1\n trg_eos_index = None\n \n for i, batch in enumerate(example_iter):\n \n src = batch.src.cpu().numpy()[0, :]\n trg = batch.trg_y.cpu().numpy()[0, :]\n\n # remove </s> (if it is there)\n src = src[:-1] if src[-1] == src_eos_index else src\n trg = trg[:-1] if trg[-1] == trg_eos_index else trg \n \n result, _ = greedy_decode(\n model, batch.src, batch.src_mask, batch.src_lengths,\n max_len=max_len, sos_index=trg_sos_index, eos_index=trg_eos_index)\n print(\"Example #%d\" % (i+1))\n print(\"Src : \", \" \".join(lookup_words(src, vocab=src_vocab)))\n print(\"Trg : \", \" \".join(lookup_words(trg, vocab=trg_vocab)))\n print(\"Pred: \", \" \".join(lookup_words(result, vocab=trg_vocab)))\n print()\n \n count += 1\n if count == n:\n break",
"def train(self, iteration, train_examples, model_path=None):\n return self.process_trained_model(\n self.train_with_examples(iteration, train_examples, model_path),\n iteration,\n train_examples,\n model_path,\n )",
"def train_and_evaluate(OUTPUT_DIR,do_train = True,do_eval=True):\n\n\t\n\tBATCH_SIZE = 32\n\tLEARNING_RATE = 2e-5\n\tNUM_TRAIN_EPOCHS = 5.0\n\n\t#in this steps lr will be low and training will be slow\n\tWARMUP_PROPORTION = 0.1\n\n\n\n\tif os.path.exists(OUTPUT_DIR) and os.listdir(OUTPUT_DIR) and do_train:\n\t\traise ValueError(\"Output directory ({}) already exists and is not empty.\".format(OUTPUT_DIR))\n\tif not os.path.exists(OUTPUT_DIR):\n\t\tos.makedirs(OUTPUT_DIR)\n\t\t\n\t#create train and test data\n\n\ttrain_sents,train_labels,test_sents,test_labels = create_train_test(\"ADE/DRUG-AE.rel\",\"ADE/negative_data_AE.rel\")\n\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ttokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\", do_lower_case=True)\n\n\tif do_train:\n\n\t\ttrain_examples = [InputExample(guid=None,text_a=sentence,text_b=None,label=label) for sentence,label in zip(train_sents, train_labels)]\n\t\tnum_train_examples = len(train_examples)\n\n\t\tnum_train_steps = int(math.ceil(num_train_examples / BATCH_SIZE * NUM_TRAIN_EPOCHS))\n\t\tnum_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)\n\n\t\tmodel = BertForSequenceClassification.from_pretrained(\"bert-base-uncased\",num_labels = num_labels)\n\t\tmodel.to(device)\n\n\t\tparam_optimizer = list(model.named_parameters())\n\t\tno_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n\t\toptimizer_grouped_parameters = [\n\t\t\t{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n\t\t\t{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n\t\t\t]\n\n\t\toptimizer = BertAdam(optimizer_grouped_parameters,lr=LEARNING_RATE,warmup=WARMUP_PROPORTION,t_total=num_train_steps)\n\n\t\tglobal_step = 0\n\t\tnb_tr_steps = 0\n\t\ttr_loss = 0\n\n\t\ttrain_features = convert_examples_to_features(\n\t\t\ttrain_examples, label_list, MAX_SEQ_LENGTH, tokenizer)\n\n\n\t\tlogger.info(\"***** Running training *****\")\n\t\tlogger.info(\" Num examples = %d\", num_train_examples)\n\t\tlogger.info(\" Batch size = %d\", BATCH_SIZE)\n\t\tlogger.info(\" Num steps = %d\", num_train_steps)\n\n\n\t\tall_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)\n\t\tall_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)\n\t\tall_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)\n\t\tall_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)\n\n\t\ttrain_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n\t\ttrain_sampler = RandomSampler(train_data)\n\n\t\ttrain_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=BATCH_SIZE)\n\n\t\tmodel.train()\n\t\t# for name, param in model.named_parameters():\n\t\t# if param.requires_grad:\n\t\t# print(name)\n\t\t# return\n\t\tfor _ in trange(int(NUM_TRAIN_EPOCHS), desc=\"Epoch\"):\n\t\t\ttr_loss = 0\n\t\t\tnb_tr_examples, nb_tr_steps = 0, 0\n\t\t\tfor step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")):\n\t\t\t\tbatch = tuple(t.to(device) for t in batch)\n\t\t\t\tinput_ids, input_mask, segment_ids, label_id = batch\n\t\t\t\tloss = model(input_ids, segment_ids, input_mask, label_id)\n\t\t\t\tloss.backward()\n\n\t\t\t\ttr_loss += loss.item()\n\t\t\t\tnb_tr_examples += input_ids.size(0)\n\t\t\t\tnb_tr_steps += 1\n\t\t\t\toptimizer.step()\n\t\t\t\toptimizer.zero_grad()\n\t\t\t\tglobal_step += 1\n\t\t\tprint(tr_loss)\n\n\t\t# Save a trained model and the associated configuration\n\t\tmodel_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n\t\toutput_model_file = os.path.join(OUTPUT_DIR, WEIGHTS_NAME)\n\t\ttorch.save(model_to_save.state_dict(), output_model_file)\n\t\toutput_config_file = os.path.join(OUTPUT_DIR, CONFIG_NAME)\n\t\twith open(output_config_file, 'w') as f:\n\t\t\tf.write(model_to_save.config.to_json_string())\n\t\tlabel_map = {i : label for i, label in enumerate(label_list,1)} \n\t\tmodel_config = {\"bert_model\":\"bert-base-uncased\",\"do_lower\":True,\"max_seq_length\":MAX_SEQ_LENGTH,\"num_labels\":num_labels,\"label_map\":label_map}\n\t\tjson.dump(model_config,open(os.path.join(OUTPUT_DIR,\"model_config.json\"),\"w\"))\n\n\telse:\n\t\toutput_config_file = os.path.join(OUTPUT_DIR, CONFIG_NAME)\n\t\toutput_model_file = os.path.join(OUTPUT_DIR, WEIGHTS_NAME)\n\t\tconfig = BertConfig(output_config_file)\n\t\tmodel = BertForSequenceClassification(config, num_labels=num_labels)\n\t\tmodel.load_state_dict(torch.load(output_model_file))\n\n\tmodel.to(device)\n\n\tif do_eval:\n\n\t\tEVAL_BATCH_SIZE = 32\n\n\t\teval_examples = [InputExample(guid=None,text_a=sentence,text_b=None,label=label) for sentence,label in zip(test_sents, test_labels)]\n\t\tnum_eval_examples = len(eval_examples)\n\n\t\teval_features = convert_examples_to_features(\n\t\t\teval_examples, label_list, MAX_SEQ_LENGTH, tokenizer)\n\n\t\tlogger.info(\"***** Running evaluation *****\")\n\t\tlogger.info(\" Num examples = %d\", num_eval_examples)\n\t\tlogger.info(\" Batch size = %d\", EVAL_BATCH_SIZE)\n\t\tall_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)\n\t\tall_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)\n\t\tall_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)\n\t\tall_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)\n\t\teval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) \n\t\t# # Run prediction for full data\n\t\teval_sampler = SequentialSampler(eval_data)\n\t\teval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=EVAL_BATCH_SIZE)\n\t\tmodel.eval()\n\n\t\teval_loss, eval_accuracy = 0, 0\n\t\tnb_eval_steps, nb_eval_examples = 0, 0\n\t\ty_true = []\n\t\ty_pred = []\n\t\tlabel_map = {i : label for i, label in enumerate(label_list,1)}\n\t\tfor input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"):\n\t\t\tinput_ids = input_ids.to(device)\n\t\t\tinput_mask = input_mask.to(device)\n\t\t\tsegment_ids = segment_ids.to(device)\n\t\t\tlabel_ids = label_ids.to(device)\n\n\t\t\twith torch.no_grad():\n\t\t\t\tlogits = model(input_ids, segment_ids, input_mask)\n\t\t\t\t\n\t\t\tlogits = torch.argmax(F.log_softmax(logits,dim=1),dim=1)\n\t\t\tlogits = logits.detach().cpu().numpy()\n\t\t\tlabel_ids = label_ids.to('cpu').numpy()\n\t\t\ty_pred.extend(logits)\n\t\t\ty_true.extend(label_ids)\n\t\tprint(len(y_pred))\n\t\tprint(len(y_true))\n\t\treport = classification_report(y_true, y_pred)\n\t\toutput_eval_file = os.path.join(OUTPUT_DIR, \"eval_results.txt\")\n\t\twith open(output_eval_file, \"w\") as writer:\n\t\t\tlogger.info(\"***** Eval results *****\")\n\t\t\tlogger.info(\"\\n%s\", report)\n\t\t\twriter.write(report)",
"def run(self, verbose=0):\n self.verbose = verbose\n self._preproc()\n self._lda()\n self._evaluate()",
"def test_predict(self):\n\n docs = self.docs\n for m in self.models:\n preds = m.predict(docs)\n self.assertTrue(isinstance(preds, turicreate.SArray))\n self.assertEqual(len(preds), len(docs))\n self.assertEqual(preds.dtype, int)\n\n preds = m.predict(docs, output_type=\"probability\")\n self.assertTrue(isinstance(preds, turicreate.SArray))\n self.assertTrue(len(preds) == len(docs))\n s = preds.apply(lambda x: sum(x))\n self.assertTrue((s.apply(lambda x: abs(x - 1)) < 0.000001).all())\n\n # Test predictions when docs have new words\n new_docs = turicreate.SArray([{\"-1,-1\": 3.0, \"0,4\": 5.0, \"0,3\": 2.0}])\n preds = m.predict(new_docs)\n self.assertEqual(len(preds), len(new_docs))\n\n # Test additional burnin. Ideally we could show that things\n # converge as you increase burnin.\n preds_no_burnin = m.predict(docs, output_type=\"probability\", num_burnin=0)\n self.assertEqual(len(preds_no_burnin), len(docs))",
"def predict(self, infile, model_path=None, eval_gold=False, as_text=False):\n\n\t\tif model_path is None: # Try default model location\n\t\t\tmodel_path = script_dir + os.sep + \"models\" + os.sep + self.corpus + \"_ensemble_sent.pkl\"\n\n\t\tclf, num_labels, cat_labels, multicol_dict, vocab, firsts, lasts = joblib.load(model_path)\n\n\t\tif as_text:\n\t\t\tconllu = infile\n\t\telse:\n\t\t\tconllu = io.open(infile,encoding=\"utf8\").read()\n\n\t\t#tagged = udpipe_tag(conllu,self.udpipe_model)\n\t\ttagged = tt_tag(conllu,self.lang)\n\n\t\ttrain_feats, _, toks, _, _ = read_conll(tagged,genre_pat=self.genre_pat,mode=\"sent\",as_text=True,char_bytes=self.lang==\"zho\")\n\t\theaders = sorted(list(train_feats[0].keys()))\n\n\t\tdata = []\n\n\t\tpreds = {}\n\t\tfor e in self.estimators:\n\t\t\tpred = e.predict(tagged)\n\t\t\t_, preds[e.name + \"_prob\"] = [list(x) for x in zip(*pred)]\n\t\t\theaders.append(e.name + \"_prob\")\n\n\t\tgenre_warning = False\n\t\tfor i, item in enumerate(train_feats):\n\t\t\titem[\"first\"] = item[\"word\"][0] if item[\"word\"][0] in firsts else \"_\"\n\t\t\titem[\"last\"] = item[\"word\"][-1] if item[\"word\"][-1] in lasts else \"_\"\n\t\t\tif \"genre\" in cat_labels:\n\t\t\t\tif item[\"genre\"] not in multicol_dict[\"encoder_dict\"][\"genre\"].classes_: # New genre not in training data\n\t\t\t\t\tif not genre_warning:\n\t\t\t\t\t\tsys.stderr.write(\"! WARN: Genre not in training data: \" + item[\"genre\"] + \"; suppressing further warnings\\n\")\n\t\t\t\t\t\tgenre_warning = True\n\t\t\t\t\titem[\"genre\"] = \"_\"\n\t\t\tif \"pos\" in cat_labels:\n\t\t\t\tif item[\"pos\"] not in multicol_dict[\"encoder_dict\"][\"pos\"].classes_:\n\t\t\t\t\titem[\"pos\"] = \"_\"\n\t\t\tif \"cpos\" in cat_labels:\n\t\t\t\tif item[\"cpos\"] not in multicol_dict[\"encoder_dict\"][\"cpos\"].classes_:\n\t\t\t\t\titem[\"cpos\"] = \"_\"\n\t\t\tif item[\"word\"] not in vocab and \"word\" in multicol_dict[\"encoder_dict\"]:\n\t\t\t\tif item[\"pos\"] in multicol_dict[\"encoder_dict\"][\"word\"].classes_:\n\t\t\t\t\titem[\"word\"] = item[\"pos\"]\n\t\t\t\telse:\n\t\t\t\t\titem[\"word\"] = \"_\"\n\t\t\tfor e in self.estimators:\n\t\t\t\titem[e.name + \"_prob\"] = preds[e.name + \"_prob\"][i]\n\n\t\t\tfeats = []\n\t\t\tfor k in headers:\n\t\t\t\tfeats.append(item[k])\n\n\t\t\tdata.append(feats)\n\n\t\tdata, headers, _, _ = self.n_gram(data,headers,[],[])\n\n\t\tdata = pd.DataFrame(data, columns=headers)\n\t\tdata_encoded = self.multicol_transform(data,columns=multicol_dict[\"columns\"],all_encoders_=multicol_dict[\"all_encoders_\"])\n\n\t\tdata_x = data_encoded[cat_labels+num_labels].values\n\t\tpred = clf.predict(data_x)\n\n\t\t# Ensure first token in document is always a sentence break\n\t\tfor i, x in enumerate(data_encoded[\"tok_id\"].values):\n\t\t\tif x == 1:\n\t\t\t\tpred[i] = 1\n\n\t\tif eval_gold:\n\t\t\tgold_feats, _,_,_,_ = read_conll(conllu,genre_pat=self.genre_pat,mode=\"sent\",as_text=True)\n\t\t\tgold = [int(t['wid'] == 1) for t in gold_feats]\n\t\t\tconf_mat = confusion_matrix(gold, pred)\n\t\t\tsys.stderr.write(str(conf_mat) + \"\\n\")\n\t\t\ttrue_positive = conf_mat[1][1]\n\t\t\tfalse_positive = conf_mat[0][1]\n\t\t\tfalse_negative = conf_mat[1][0]\n\t\t\tprec = true_positive / (true_positive + false_positive)\n\t\t\trec = true_positive / (true_positive + false_negative)\n\t\t\tf1 = 2*prec*rec/(prec+rec)\n\t\t\tsys.stderr.write(\"P: \" + str(prec) + \"\\n\")\n\t\t\tsys.stderr.write(\"R: \" + str(rec) + \"\\n\")\n\t\t\tsys.stderr.write(\"F1: \" + str(f1) + \"\\n\")\n\t\t\twith io.open(\"diff.tab\",'w',encoding=\"utf8\") as f:\n\t\t\t\tfor i in range(len(gold)):\n\t\t\t\t\tf.write(\"\\t\".join([toks[i],str(gold[i]),str(pred[i])])+\"\\n\")\n\t\t\treturn conf_mat, prec, rec, f1\n\t\telse:\n\t\t\treturn pred",
"def evaluate(text, articles, no_preprocess=False):\n if not _trained:\n print(\"No classifier initialized. Make sure to do so first\")\n raise Exception\n\n if not no_preprocess:\n text = body_reader.get_words_in(text)\n\n if _classifier == \"euclid\":\n return euclidean.evaluate(articles, text)\n elif _classifier == \"bayes\":\n return bayes.evaluate(articles, text)\n elif _classifier == \"rocchio\":\n return rocchio.evaluate(articles, text)",
"def eval(self): \n inputs,enc_input_weights, outputs, dec_input_weights = self.get_batch()\n predicted_ids = self.model.step(self.sess, inputs, enc_input_weights) \n print(\"=\"*20)\n for i in range(FLAGS.batch_size):\n print(\"* %dth sample target: %s\" % (i,str(outputs[i,1:]-2)))\n for predict in predicted_ids[i]:\n print(\"prediction: \"+str(predict)) \n print(\"=\"*20)",
"def test_ne():\n if not PRODUCTION_ENVIRONMENT:\n remove_all_articles()\n\n eval = 'data_resources/ned/evaluation/large_eval_checked.json'\n output = process_evaluation_input(eval)\n eval_output = json.load(open(eval))\n evaluate_ned(output, eval_output)"
] | [
"0.6644543",
"0.6285906",
"0.62076914",
"0.61468154",
"0.5994535",
"0.59692717",
"0.59335965",
"0.58229786",
"0.58054614",
"0.5794374",
"0.5765685",
"0.5763517",
"0.57432854",
"0.5719396",
"0.5703791",
"0.56885344",
"0.5686573",
"0.56428343",
"0.5631374",
"0.5617793",
"0.5611722",
"0.56095684",
"0.5608002",
"0.5607736",
"0.55803865",
"0.5577337",
"0.55727863",
"0.5567647",
"0.555971",
"0.5538782"
] | 0.7269016 | 0 |
Return pointer value given pointer label | def _getPointerValue(self, pointerLabel):
index = POINTER_LABEL_LIST.index(pointerLabel)
return float(self._raw_data['POINTERS'][index]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_pointer_value(pointer):\n return ct.cast(pointer, ct.c_void_p).value",
"def _get_pointer(self) -> str:\n pointers = {1: \"THAT\", 0: \"THIS\"}\n try:\n return pointers[self.value]\n except KeyError:\n raise InvalidSegmentException(\n f\"Expected pointer be 0 or 1 but got {self.value}\"\n )",
"def val_pointer_eq_val_pointer(self, label1, label2):\n # put label2's value in D\n self.value_to_d(label2)\n # put D in label 1\n self.d_to_value(label1)",
"def addr(label_name):\n\n if not utils.is_string_type(label_name):\n return None\n\n return labelmanager.addr(label_name)",
"def find_label(self, *args):\n return _ida_hexrays.cfuncptr_t_find_label(self, *args)",
"def fromLabel(name):\n return Data.labels.index(name)",
"def findLabel(self, label):\n return self.root._findLabel(label)",
"def label(tree):\n return tree[0]",
"def _getRef(self, label):\r\n\r\n ref = None\r\n\r\n for mapping in reversed(self.blscope):\r\n if label in mapping:\r\n ref = mapping[label]\r\n break\r\n\r\n if ref is None and label in self.fnscope:\r\n ref = self.fnscope[label]\r\n\r\n return ref",
"def fl_get_label_char_at_mouse(ptr_flobject):\n _fl_get_label_char_at_mouse = library.cfuncproto(\n library.load_so_libforms(), \"fl_get_label_char_at_mouse\", \\\n cty.c_int, [cty.POINTER(xfdata.FL_OBJECT)], \\\n \"\"\"int fl_get_label_char_at_mouse(FL_OBJECT * obj)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n library.keep_elem_refs(ptr_flobject)\n retval = _fl_get_label_char_at_mouse(ptr_flobject)\n if isinstance(retval, bytes):\n return retval.decode('utf-8')\n else: # str\n return retval",
"def get_parameter_by_label(self, label):\n \n for attr in self.parm_list:\n if attr.label == label:\n return attr\n\n self.logging.error(\"Can't find topic: \"+label)\n return None",
"def dereference_value(self, value: int) -> int:\n if self.is_register(value):\n return self[value]\n\n return value",
"def get_label(self, offset):\n self.ret = idc.GetDisasm(offset).replace(\"extrn \", \"\").split(\":\")[0]\n return self.ret",
"def get_item_from_label(self, label):\n idx = self.labels.index(label)\n item = self[idx][0]\n return item",
"def get_label(urs):\n return assign_term(urs)[1]",
"def nodeWithLabel(self, label):\r\n for nt in self.listNodes.keys(): \t# for all kind of nodes...\r\n for node in self.listNodes[nt]: \t# for all nodes of type <nt>\r\n if node.GGLabel.getValue() == label: # check if the node's label is what we are looking for...\r\n return node # a node has been found!\r\n return None # no appropriate node has been found \r",
"def label_index2node(label_index, labels):\n hi_pairs, med_pairs = labels\n if label_index < len(hi_pairs):\n return hi_pairs[label_index][0]\n else:\n error_msg = \"there is no node with label \"+str(label_index)\n assert label_index-len(hi_pairs) < len(med_pairs), error_msg\n return med_pairs[label_index-len(hi_pairs)][0]",
"def parse(cls, label) -> Any:\n return label",
"def extract_label(selector):\n return selector.split('=')[-1][:-1]",
"def _findLabel(self, label):\n if self.label == label:\n return self\n else:\n for i in range(self.nChildren()):\n found = self.children[i]._findLabel(label)\n if found:\n return found\n return None",
"def __getitem__(self, label_value: int) -> 'SegmentInfo':\n return self.infos[label_value]",
"def get_pointer_address(self, pointer:Union[List[int], Tuple[int]], update_cache=False)->int:\r\n\t\t\r\n\t\tif not update_cache and tuple(pointer) in self.pointer_cache: return self.pointer_cache[tuple(pointer)]\r\n\t\t\r\n\t\taddress:int = pointer[0]\r\n\t\tif len(pointer) > 1: \r\n\t\t\taddress += pointer[1]\r\n\t\t\tfor i in range(2, len(pointer)):\r\n\t\t\t\tReadAddr = c_size_t(address)\r\n\t\t\t\tif not self.ReadProcessMemory(self.process_handle, cast(address, c_void_p), byref(ReadAddr), sizeof(c_size_t)//2, 0): raise MemoryError(\"error calculating pointer refrence\")\r\n\t\t\t\taddress = ReadAddr.value\r\n\t\t\t\taddress += pointer[i]\r\n\t\t\r\n\t\tself.pointer_cache[tuple(pointer)] = address\r\n\t\t\r\n\t\treturn address",
"def expr_label(runtime_addr, s):\n\n runtime_addr = memorymanager.RuntimeAddr(runtime_addr)\n # TODO: If this continues to just forward to label() perhaps make that behavuour\n # official and just provide both names for backwards compatibility/documenting the\n # difference for users who want to??\n return label(runtime_addr, s)",
"def read_ptr(self, offset):\n return self.read_int64(offset)",
"def _getData(self, label):\n\n try:\n return self._data[label]\n except KeyError:\n try:\n field = ATOMIC_FIELDS[label]\n except KeyError:\n return None\n else:\n return getattr(self, '_get' + field.meth_pl)()",
"def lookup_class_idx(self,label):\r\n \r\n return self.class_labels[label]",
"def lookup(self, label):\n if label in self.bindings:\n return self.bindings[label]\n else:\n if self.parent:\n return self.parent.lookup(label)\n else:\n raise SnekNameError(\"name '{}' is not defined\".format(label))",
"def _pos2label(self, p, labels):\n if labels is not None:\n if p in labels.keys():\n return labels[p]\n else:\n return ''\n # raise ValueError('Fatal ERROR: no label for this position in label dictionary!')\n else:\n if p == 1:\n return 'top'\n elif p == 2:\n return 'bottom'\n elif p == 3:\n return 'left'\n elif p == 4:\n return 'right'",
"def get_val(self, arg_idx):\n\t\tidx = arg_idx-1\n\t\tif idx >= len(self.__par_modes) or self.__par_modes[idx] == 0:\n\t\t\treturn self.memory[self.memory[self.ptr+arg_idx]]\n\t\telif self.__par_modes[idx] == 1:\n\t\t\treturn self.memory[self.ptr + arg_idx]",
"def getLabelInfo(self, label): # real signature unknown; restored from __doc__\n pass"
] | [
"0.6980862",
"0.65615785",
"0.6370699",
"0.62846607",
"0.61790186",
"0.59617084",
"0.5798804",
"0.5661203",
"0.5612971",
"0.5582321",
"0.55821013",
"0.5565275",
"0.5552737",
"0.5509971",
"0.5504101",
"0.54816645",
"0.5463516",
"0.5462277",
"0.5452433",
"0.5444844",
"0.54424834",
"0.54392445",
"0.5425083",
"0.54197526",
"0.54070777",
"0.5398429",
"0.5394569",
"0.5382697",
"0.53767544",
"0.53590304"
] | 0.70721763 | 0 |
Return a list of atomic masses in the system | def getMasses(self):
try:
return self._massList
except AttributeError:
self._massList = [float(x) for x in self._raw_data['MASS']]
return self._massList | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_mol_masses(mol):\n return np.array([a.GetMass() for a in mol.GetAtoms()])",
"def list_masses(self):\n masses = self.contents['Sub_ID']\n for i in range(self.num_atom_types):\n masses = np.where(masses == i, float(self.masses[i]), masses)\n self.contents['Mass'] = masses",
"def get_all_masses(self):\n allMasses = set()\n for interval in self.mz_tree:\n allMasses.add( interval.data[\"mass\"] )\n\n return allMasses",
"def test_atomic_masses():\n first = get_atomic_mass(\"As\")\n assert first == 74.9216\n \n second = get_atomic_mass(\"Be\")\n assert second == 9.012182\n\n third = get_atomic_mass(\"Li\")\n assert third == 6.941",
"def send_masses(self) -> np.ndarray:\n natom = len(self.molecule.geometry)\n masses = self.molecule.masses\n MDI_Send(masses, natom, MDI_DOUBLE, self.comm)\n return masses",
"def molarMasses(self, species = None):\n mm = _cantera.phase_getarray(self._phase_id,22)\n return self.selectSpecies(mm, species)",
"def get_mass(molecular_system, element ='atom', selection = 'all', syntax = 'MolSysMT'):\n\n from molsysmt.basic import get\n from molsysmt.physchem.atoms.mass import physical, units\n\n values=physical\n\n output = []\n if element == 'atom':\n atom_types = get(molecular_system, element=element, selection=selection, syntax=syntax, atom_type=True)\n for ii in atom_types:\n output.append(values[ii.capitalize()])\n elif element in ['group', 'component', 'molecule', 'chain', 'entity']:\n atom_types_in_element = get(molecular_system, element=element, selection=selection,\n syntax=syntaxi, atom_type=True)\n for aux in atom_types_in_element:\n output.append(np.sum([values[ii.capitalize()] for ii in aux]))\n elif element == 'system':\n atom_types_in_element = get(molecular_system, element='atom', selection='all',\n syntax=syntax, atom_type=True)\n output.append(np.sum([values[ii.capitalize()] for ii in atom_types_in_element]))\n\n if element =='system':\n output = output[0]*puw.unit(units)\n else:\n output = puw.quantity(np.array(output), units)\n\n return output",
"def get_mass(atomic_symbol: str) -> float:\n\n if atomic_symbol in _masses.keys():\n return _masses[atomic_symbol]\n\n else:\n return 0",
"def mass(self):\n return _cantera.reactor_mass(self.__reactor_id)",
"def center_of_mass(self, entity, geometric=False):\n\n # Structure, Model, Chain, Residue\n if isinstance(entity, Entity.Entity):\n atom_list = entity.get_atoms()\n # List of Atoms\n elif hasattr(entity, \"__iter__\") and [x for x in entity if x.level == \"A\"]:\n atom_list = entity\n # Some other weirdo object\n else:\n raise ValueError(\n f\"Center of Mass can only be calculated from the following objects:\\n\"\n f\"Structure, Model, Chain, Residue, list of Atoms.\"\n )\n\n masses = []\n positions = [[], [], []] # [ [X1, X2, ..] , [Y1, Y2, ...] , [Z1, Z2, ...] ]\n\n for atom in atom_list:\n masses.append(atom.mass)\n\n for i, coord in enumerate(atom.coord.tolist()):\n positions[i].append(coord)\n\n # If there is a single atom with undefined mass complain loudly.\n if \"ukn\" in set(masses) and not geometric:\n raise ValueError(\n f\"Some atoms don't have an element assigned.\\n\"\n f\"Try adding them manually or calculate the geometrical center of mass instead.\"\n )\n\n if geometric:\n return [sum(coord_list) / len(masses) for coord_list in positions]\n else:\n w_pos = [[], [], []]\n for atom_index, atom_mass in enumerate(masses):\n w_pos[0].append(positions[0][atom_index] * atom_mass)\n w_pos[1].append(positions[1][atom_index] * atom_mass)\n w_pos[2].append(positions[2][atom_index] * atom_mass)\n\n return [sum(coord_list) / sum(masses) for coord_list in w_pos]",
"def mass(self):\n self.check_symbols()\n return self._tree_mass(self._tokentree())",
"def get_all_atomic_numbers(molecule):\n return nps.vtk_to_numpy(molecule.GetAtomicNumberArray())",
"def atoms(self):\n print('processing [ atoms ]')\n x = \"\"\"[ atoms ]\n; nr type resnr residu atom cgnr charge mass\n\"\"\"\n qtot = 0.0\n fmt = '{nr:6d} {type} 1 {residu} {atom} 1 {charge:14.11f} {mass:7.4f} ; qtot {qtot}\\n'\n for i, node in enumerate(self):\n vals = {}\n vals['nr'] = 1+i\n vals['type'] = node.atom.name\n vals['residu'] = self.molname\n vals['atom'] = node.atom.name\n vals['charge'] = node.properties['charge']\n qtot += node.properties['charge']\n vals['qtot'] = qtot\n vals['mass'] = node.properties['mass']\n x += fmt.format(**vals)\n return x + '\\n'",
"def get_atoms(self):\n\n yield from self._molecule_state.get_atoms()",
"def getMolecularMass(self):\n dataDict = self.__dict__\n # get formula dictionary\n dd = {}\n for ca in self.chemAtoms:\n if isinstance(ca, ChemAtom):\n ss = ca.elementSymbol\n ii = dd.get(ss)\n if ii is None:\n dd[ss] = 1\n else:\n dd[ss] = ii + 1\n \n # calculate mass\n xx = self.root.currentChemElementStore\n result = sum(ii * xx.findFirstChemElement(symbol=ss).mass for (ss, ii) in dd.iteritems())\n return result",
"def calculate_molecular_mass(symbols):\n\n mass = 0\n for atom in symbols:\n mass += atom_weigths[atom]\n\n return mass",
"def _masses_string(self):\n return_str = 'Masses\\n\\n'\n for at in self.atom_types:\n return_str += '{} {:9.5f} # {}\\n'.format( at.atom_type_index, float(at.mass), at.label)\n return_str += '\\n'\n return return_str",
"def get_node_masses(self):\n\n # traverse: traverse itree with example and increment masses of visited nodes\n def traverse(example, it_node):\n # base case - in leaf\n if it_node.l == None and it_node.r == None:\n it_node.mass += 1\n # if split attribute value lower than split value\n elif example[it_node.split_attr] < it_node.split_val:\n it_node.mass += 1\n traverse(example, it_node.l) # Traverse left subtree.\n # if split attribute value greater or equal to split value\n else:\n it_node.mass += 1\n traverse(example, it_node.r) # Traverse right subtree.\n\n # compute_masses: compute masses of nodes in itree\n def compute_masses(itree):\n for example in self.data:\n traverse(example, itree)\n\n # TODO: parallelize!\n for itree in self.random_itrees: # Go over itrees and set masses of nodes.\n compute_masses(itree)",
"def read_mass(terms, masses):\n # Check that Masses line is correctly formatted\n try:\n assert len(terms) == 2\n atype = int(terms[0])\n assert atype > 0 and atype <= len(masses)\n mass = float(terms[1])\n assert mass > 0\n except:\n raise FileFormatError('Invalid mass term')\n \n if masses[atype - 1] is None:\n masses[atype - 1] = mass\n else:\n raise FileFormatError(f'Multiple masses listed for atom type {atype}')",
"def atoms(self):\n return self.qc_mol.atoms + self.br_mol.atoms + self.pc_mol.atoms",
"def get_magmom_string():\n\n magmoms = []\n poscar_lines = open('POSCAR').readlines()\n elements = poscar_lines[5].split()\n amounts = poscar_lines[6].split()\n for i in range(len(elements)):\n if Element(elements[i]).is_transition_metal:\n magmoms.append('{}*6.0'.format(amounts[i]))\n else:\n magmoms.append('{}*0.5'.format(amounts[i]))\n return ' '.join(magmoms)",
"def atoms(self, symbol): \n # this is a stub implementation\n #return 10;\n if symbol not in _atomic_mass: raise KeyError( symbol + \" is not in the table\")\n if symbol in _atomic_mass and symbol not in self._gettokens():\n return 0\n #the method is similar to __iter__, just different return\n parse = re.findall(r'([A-Z][a-z]*)(\\d*)|(\\()|(\\))(\\d*)', str(self.dele_mole))\n if symbol in _atomic_mass and symbol in self._gettokens():\n sym_num = [collections.Counter()]\n for name, n1, left_open, right_open, n2 in parse:\n if name:\n sym_num[-1][name] += int(n1 or 1) \n if left_open:\n sym_num.append(collections.Counter())\n if right_open:\n top = sym_num.pop()\n for s in top:\n sym_num[-1][s] += top[s] * int(n2 or 1) \n return sym_num[-1][symbol]",
"def get_all_atomic_positions(molecule):\n return nps.vtk_to_numpy(molecule.GetAtomicPositionArray().GetData())",
"def all_monoms(f):\n return dmp_all_monoms(f.rep, f.lev, f.dom)",
"def _get_system_molecules(self, system):\n positions = system.positions.view(-1, system.max_n_atoms,\n 3) * self.position_conversion\n\n atom_types = system.atom_types.view(-1, system.max_n_atoms)\n atom_masks = system.atom_masks.view(-1, system.max_n_atoms)\n return positions, atom_types, atom_masks",
"def get_all_motors():\n return mc.get('motor_values')",
"def iter_all_atoms(self):\n for model in self.iter_models():\n for atm in model.iter_all_atoms():\n yield atm",
"def atoms(self):\n return self._atoms",
"def getAtoms(self):\n return self.atoms",
"def _get_atoms(self):\n atoms = []\n invarioms = []\n\n for molecule in self.values():\n atoms += [atom for atom in molecule.atoms]\n invarioms += [atom for atom in molecule.atoms if atom.invariom_name is not None]\n self.atoms = atoms\n self.invarioms = invarioms"
] | [
"0.7253688",
"0.7055852",
"0.69788456",
"0.68436766",
"0.67790216",
"0.6521679",
"0.6353674",
"0.63500553",
"0.6138948",
"0.6096922",
"0.60727",
"0.60612094",
"0.60016483",
"0.59859467",
"0.59844255",
"0.59775376",
"0.5960306",
"0.5930356",
"0.5856117",
"0.58186483",
"0.5804293",
"0.5800201",
"0.5799809",
"0.5795888",
"0.5780103",
"0.5765677",
"0.57440364",
"0.5733029",
"0.5687024",
"0.5685389"
] | 0.7420046 | 0 |
Return the atom name for iAtom | def getAtomName(self, iAtom):
atomNames = self.getAtomNames()
return atomNames[iAtom] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def residueName(self,i):\n assert(i >= 0 and i < self.nAtoms())\n assert(self._c_structure is not NULL)\n return freesasa_structure_atom_res_name(self._c_structure,i)",
"def atom(self, atom_name, resnum, chain_id, icode=' ', alt=' ', model_num=0):\n return self.struct[model_num][chain_id][(alt, resnum, icode)][atom_name]",
"def get_mol_name(mol) -> str:\n return mol.GetProp(\"_Name\")",
"def getAtomNames(self):\n return self._raw_data['ATOM_NAME']",
"def get_name(self, asset):\n return self.get_name_and_meta(asset)[0]",
"def get_name() -> str:",
"def atom_name_or(default, molecule, index):\n try:\n return molecule.atom(index).name\n except (TypeError, IndexError):\n return default",
"def element_name(self, atomic_number):\n return self.GetElementName(atomic_number)",
"def get_name():",
"def get_name(node):\n if isinstance(node, ast.Name):\n return node.id",
"def get_element_name(ielem):\n\n return interrogate_element_name(ielem)",
"def get_nx_entry_name(self):\n return self.dp.nxEntryName",
"def icon_name(self, node) -> str:\n icon_name = None\n if isinstance(node, Item):\n if node.type in self.ITEM_ICONS:\n icon_name = self.ITEM_ICONS[node.type]\n else:\n # :(\n logger.warning(f'no such icon found for {node.type}')\n icon_name = 'unchecked'\n elif isinstance(node, Note):\n icon_name = 'text-background'\n return icon_name",
"def get_name(self):\n return self.attributes[\"name\"]",
"def name(node):\n\n return fst(node)",
"def prefix(cls):\n cname = cls.__name__\n return cname[:cname.rfind('Atom')]",
"def name(self):\n return self._imu.IMUName()",
"def _get_name(self):\n return '%s: %s-%s' % (\n self.fcs_number,\n self.parameter_type,\n self.parameter_value_type)",
"def getAtomType(self, iAtom):\n atomTypes=self.getAtomTypes()\n return atomTypes[iAtom]",
"def name(self):\n # type: () -> string_types\n return self._name",
"def name(self):\n # type: () -> string_types\n return self._name",
"def get_name(self):\n\n return ri.RhinoInput(self.last).get_name()",
"def getName(self,item):\n return item.s",
"def name(self) -> str:\n return self.observation.name",
"def get_name() -> str:\n pass",
"def get_name(self):\n return self._qname",
"def getName(self):\n return _libsbml.Reaction_getName(self)",
"def getElementName(self):\n return _libsbml.Reaction_getElementName(self)",
"def symbolic_name(self):\n return self._symbolic_name",
"def name ( self ) :\n return self.__name if self.__name else ''"
] | [
"0.70414406",
"0.67510134",
"0.6505652",
"0.64932865",
"0.6349202",
"0.63090885",
"0.6273081",
"0.6271318",
"0.6240153",
"0.6228405",
"0.6224214",
"0.61911225",
"0.61390066",
"0.61277556",
"0.6111768",
"0.6108843",
"0.6106719",
"0.61022294",
"0.6067352",
"0.60657865",
"0.60657865",
"0.6054424",
"0.60507494",
"0.6047389",
"0.6034676",
"0.6032581",
"0.6026341",
"0.6014772",
"0.6011213",
"0.6008319"
] | 0.88223165 | 0 |
Return the list of the system atom names | def getAtomNames(self):
return self._raw_data['ATOM_NAME'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_system_keys(self) -> list[str]:\n return self.get(\"npc.systems\").keys()",
"def get_system_managers(only_name: bool = False) -> list[str]:\n\tHasRole = DocType(\"Has Role\")\n\tUser = DocType(\"User\")\n\n\tif only_name:\n\t\tfields = [User.name]\n\telse:\n\t\tfields = [User.full_name, User.name]\n\n\tsystem_managers = (\n\t\tfrappe.qb.from_(User)\n\t\t.join(HasRole)\n\t\t.on(HasRole.parent == User.name)\n\t\t.where(\n\t\t\t(HasRole.parenttype == \"User\")\n\t\t\t& (User.enabled == 1)\n\t\t\t& (HasRole.role == \"System Manager\")\n\t\t\t& (User.docstatus < 2)\n\t\t\t& (User.name.notin(frappe.STANDARD_USERS))\n\t\t)\n\t\t.select(*fields)\n\t\t.orderby(User.creation, order=Order.desc)\n\t\t.run(as_dict=True)\n\t)\n\n\tif only_name:\n\t\treturn [p.name for p in system_managers]\n\telse:\n\t\treturn [formataddr((p.full_name, p.name)) for p in system_managers]",
"def list_systems():\n return sorted(systems.keys())",
"def getChemCompSysNames(self):\n dataDict = self.__dict__\n result = frozenset(y for x in self.chemComp.namingSystems for y in x.chemCompSysNames if not y.specificChemCompVars).union(self.specificSysNames)\n return result",
"def nodeNames(self):\n if self.role == Roles.ACTIVE or self.role == Roles.PASSIVE:\n return Backend().configuration.getNodeNames()\n else:\n return [self.node, \"system-manager\"]",
"def all_registered_appnames():\n yield from sorted(Registry.monomers.keys())",
"def atomList(self):\n\n\t\tal = []\t\n\t\tfor chain in self.chain:\n\t\t\tfor res in chain.residue:\n\t\t\t\tfor atom in res.atom:\n\t\t\t\t\tal.append(atom)\n\n\t\treturn al",
"def list_programs():\n return list(INFO)",
"def names(self):\n return list(item.name for item in self.mechanisms)",
"def names(self) -> list[str]:",
"def get_cm_list ( self ) :\n cm_name_list = []\n stmt = \"select name from sdb_continuousmodel where sys002 =\\'T\\'\"\n self.oracle_cursor.arraysize = 100000\n self.oracle_cursor.execute(stmt)\n resultset = self.oracle_cursor.fetchmany()\n if resultset :\n for row in resultset :\n cm_name_list.append(str(row[0]))\n return cm_name_list",
"def namelist(self):\n return self._handle.namelist()",
"def namelist(self):\n return self._handle.namelist()",
"def get_system_fonts():\n fonts = set()\n for x in font_manager.findSystemFonts():\n dot = x.rfind('.')\n slash = x.rfind(sep)\n x = x[slash + 1:dot]\n fonts.add(x)\n return sorted(fonts)",
"def processNames(self):\n # MODIFIED 11/1/16 OLD:\n return list(item.process.name for item in self.process_tuples)\n # # MODIFIED 11/1/16 NEW:\n # return sorted(list(item.process.name for item in self.process_tuples))\n # MODIFIED 11/1/16 END",
"def list_manifests():\n import enaml\n with enaml.imports():\n from .pulses.manifest import PulsesManagerManifest\n from .tasks.manifest import PulsesTasksManifest\n from .measure.manifest import PulsesMeasureManifest\n return [PulsesManagerManifest, PulsesTasksManifest, PulsesMeasureManifest]",
"def get_standard_names(self):\n return [s for (i, s) in list(self._I2SMAP.items())\n if (i != self._CUSTOM) and s.strip()]",
"def monomer_names(self):\n output = set()\n for item in self.monomers():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)",
"def namelist(self):\n return self._handle.getnames()",
"def namelist(self):\n return self._handle.getnames()",
"def _get_system_molecules(self, system):\n positions = system.positions.view(-1, system.max_n_atoms,\n 3) * self.position_conversion\n\n atom_types = system.atom_types.view(-1, system.max_n_atoms)\n atom_masks = system.atom_masks.view(-1, system.max_n_atoms)\n return positions, atom_types, atom_masks",
"def installed_appnames():\n appnames = set()\n for finder in sys.meta_path:\n if hasattr(finder, 'appname'):\n appnames.add(finder.appname)\n return appnames",
"def get_atom_labels(self, full=False):\n import numpy\n\n labels = self.get_attr(\"atom_labels\")\n if full:\n return labels\n return numpy.array(labels)[self._get_equivalent_atom_list()].tolist()",
"def getAtomTypes(self):\n return self._raw_data['AMBER_ATOM_TYPE']",
"def mapped_names(self):\n return [x.distro for x in DistroMapping.distros_mapped_to(self.name, self.version)]",
"def list_syms():\n\tSymStringVec=[];\n\tSymStringVec.append(\"CSYM\");\n\tSymStringVec.append(\"DSYM\");\n\tSymStringVec.append(\"TET_SYM\");\n\tSymStringVec.append(\"OCT_SYM\");\n\tSymStringVec.append(\"ICOS_SYM\");\n\tSymStringVec.append(\"ISYM\");\n\treturn SymStringVec",
"def _list_of_availability_strings():\n names = [availability.name for availability in Availability]\n return names",
"def _atoms_string(self):\n return_str = 'Atoms\\n\\n'\n for atom in self.atoms:\n return_str += '{}\\n'.format(atom.input_string())\n return_str += '\\n'\n return return_str",
"def get_system_name(self):\n\n\t\treturn self.__system_name",
"def RAppNames(self):\n\t\tnames=[]\n\t\tfor item in range(self.rApps.Count):\n\t\t\tnames.append(self.rApps.Item(item).Name)\n\t\treturn names"
] | [
"0.6531567",
"0.61736906",
"0.61188096",
"0.6103787",
"0.6051859",
"0.600426",
"0.5983814",
"0.5960197",
"0.593975",
"0.58576983",
"0.58465",
"0.5842698",
"0.5842698",
"0.5801767",
"0.5748718",
"0.5717563",
"0.5702873",
"0.56862754",
"0.5666746",
"0.5666746",
"0.5654697",
"0.56462777",
"0.5641603",
"0.56348056",
"0.5632725",
"0.56053984",
"0.55989766",
"0.55964893",
"0.5578091",
"0.55780095"
] | 0.6822885 | 0 |
Return the AMBER atom type for iAtom | def getAtomType(self, iAtom):
atomTypes=self.getAtomTypes()
return atomTypes[iAtom] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_atom_type(self, atom):\n if self.check_if_keyword(atom):\n # 1 for keyword\n return 1\n if self.check_if_var(atom):\n # 2 for identifier\n return 2\n if self.check_if_const(atom):\n # 3 for constant\n return 3\n if atom == \" \" or atom == \"\" or atom == '' or atom == \"\\n\" or atom.isspace():\n # 4 for generic separators\n return 4\n return 0",
"def getAtomTypes(self):\n return self._raw_data['AMBER_ATOM_TYPE']",
"def is_atom(self):\n return self._atom",
"def atom(token):\n if REGEX_INTEGER.match(token):\n return int(token)\n else:\n return token",
"def get_type(self):\r\n return self.mm_type + self.meta_model.get_type()",
"def getTypeCode(self):\n return _libsbml.DefaultTerm_getTypeCode(self)",
"def atom(token):\n try:\n return int(token)\n except ValueError:\n try:\n return float(token)\n except ValueError:\n return Symbol(token) # Equivalent to str(token)",
"def atom(self, atom_name, resnum, chain_id, icode=' ', alt=' ', model_num=0):\n return self.struct[model_num][chain_id][(alt, resnum, icode)][atom_name]",
"def atom(token):\n try:\n return int(token)\n except ValueError:\n try:\n return float(token)\n except ValueError:\n return Symbol(token)",
"def token_type(self):\n return TypeInfo.get('Int')",
"def orbital_type(self):\n return self.name[0].upper()",
"def get_atypes(self):\n self.atypes = []\n self.hybs = []\n #self.zs = []\n for ai in self.m.GetAtoms():\n hybi = str( ai.GetHybridization() )\n self.hybs.append( hybi )\n zi = ai.GetAtomicNum()\n #self.zs.append( zi )\n si = ai.GetSymbol()\n if hybi == 'SP2':\n ar = ai.GetIsAromatic()\n ar_suffix = '_R' if ar else '_2'\n ap = si + ar_suffix # atomic_pattern\n elif hybi == 'SP3':\n if zi == 16 and ai.GetExplicitValence() == 6:\n ap = si + 'o3'\n elif zi in [9, 17, 35, 53]:\n ap = si\n else:\n ap = si + '_3'\n elif hybi == 'SP':\n ap = si + '_1'\n elif hybi in ['S', ]: #'UNSPECIFIED']:\n ap = si\n else:\n print((' unknown atom type: `%s`'%hybi))\n raise\n self.atypes.append( ap )",
"def _type(self):\n return self._id[1]",
"def get_natom(self):\n return",
"def __get_type(self):\r\n if self.__tokenizer.token_type() == TYPES_DIC[\"IDENTIFIER\"]:\r\n return self.__tokenizer.identifier()\r\n else:\r\n return self.__tokenizer.keyword()",
"def typecode (self) :\r\n return self.numeric_typecode",
"def getType(self):\n if (self.type == 's'):\n #suit type\n type = \"suit\"\n elif (self.type == 'b'):\n #boss type\n type = \"boss\"\n else:\n notify.error(\"Invalid DNA type: \", self.type)\n\n return type",
"def getTypeCode(self):\n return _libsbml.Reaction_getTypeCode(self)",
"def getTypeCode(self):\n return _libsbml.InitialAssignment_getTypeCode(self)",
"def get_atom_code(self, atom):\n for code, symbol in self.__symbols_dict.items():\n # if keyword, return associated code\n if symbol == atom:\n return code\n\n if self.check_if_var(atom):\n # if identifier, return 0\n return 0\n if self.check_if_const(atom):\n # if constant, return 1\n return 1\n\n # invalid atom\n return -1",
"def get_type(self) -> str:\n # Note: this name conflicts with existing python builtins\n return self[\"Type\"]",
"def celltype(self) -> str:\n\n hcell = self._get_hcell2()\n return hcell[\"celltype\"]",
"def getTypeCode(self):\n return _libsbml.Objective_getTypeCode(self)",
"def rna_type(entry):\n\n basic = basic_rna_type(entry)\n if basic == \"rrna\":\n return \"rRNA\"\n if basic == \"trna\":\n return \"tRNA\"\n if basic == \"ncrna\":\n return \"other\"\n if basic == \"snrna\":\n return \"snRNA\"\n raise ValueError(\"Cannot determine rna type for %s\" % entry)",
"def masttype(self):\n return self._masttype.get_waarde()",
"def get_type(self) -> TypeStr:\n return TYPE.inverse[self.type()]",
"def enum_type(self):\r\n if not hasattr(self, '_enum_type'):\r\n assert self.kind == CursorKind.ENUM_DECL\r\n self._enum_type = conf.lib.clang_getEnumDeclIntegerType(self)\r\n\r\n return self._enum_type",
"def getAtomType(self, outputLine):\n for atom in Types:\n if atom.name.lower() in outputLine:\n return atom\n print(\"Something weird produced/exited: %s\" % outputLine, end=\"\")\n return None",
"def getTypeCode(self):\n return _libsbml.ReactionGlyph_getTypeCode(self)",
"def getTypeCode(self):\n return _libsbml.FunctionTerm_getTypeCode(self)"
] | [
"0.7361625",
"0.71134794",
"0.6390988",
"0.6096414",
"0.60266525",
"0.59143364",
"0.5900974",
"0.5879816",
"0.5861368",
"0.5848359",
"0.58214134",
"0.58087665",
"0.576012",
"0.57093835",
"0.5701356",
"0.5661256",
"0.5641102",
"0.5638334",
"0.5619124",
"0.5604778",
"0.5604421",
"0.55954176",
"0.5587419",
"0.5581565",
"0.55803245",
"0.5571309",
"0.5545309",
"0.5543183",
"0.5521967",
"0.55153394"
] | 0.80560255 | 0 |
Return the list of the AMBER atom types | def getAtomTypes(self):
return self._raw_data['AMBER_ATOM_TYPE'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_atypes(self):\n self.atypes = []\n self.hybs = []\n #self.zs = []\n for ai in self.m.GetAtoms():\n hybi = str( ai.GetHybridization() )\n self.hybs.append( hybi )\n zi = ai.GetAtomicNum()\n #self.zs.append( zi )\n si = ai.GetSymbol()\n if hybi == 'SP2':\n ar = ai.GetIsAromatic()\n ar_suffix = '_R' if ar else '_2'\n ap = si + ar_suffix # atomic_pattern\n elif hybi == 'SP3':\n if zi == 16 and ai.GetExplicitValence() == 6:\n ap = si + 'o3'\n elif zi in [9, 17, 35, 53]:\n ap = si\n else:\n ap = si + '_3'\n elif hybi == 'SP':\n ap = si + '_1'\n elif hybi in ['S', ]: #'UNSPECIFIED']:\n ap = si\n else:\n print((' unknown atom type: `%s`'%hybi))\n raise\n self.atypes.append( ap )",
"def gettypes(self):\n return [str(self.sd.xlate(t[0])) for t in self.sd.types]",
"def ntypes(self): # -> list[str]:\n ...",
"def etypes(self): # -> list[str]:\n ...",
"def get_type_list(cls):\n\n from pygments.lexers import get_all_lexers\n return [(name, aliases[0]) for name, aliases, filetypes, mimetypes in get_all_lexers()]",
"def type_list():\n for type_ in orm.DataFlagType.select():\n click.echo(type_.name)",
"def ntypes(self): # -> list[None]:\n ...",
"def get_types(self) -> List[str]:\n return sorted(list(self._radii.keys()))",
"def types(self) -> List[str]:\n return self._types",
"def get_list(self):\n return self._FF_TYPES",
"def get_list(self):\n return self._FF_TYPES",
"def list_bed_types():\n\n list = [\"ctrl\", \"cresis\", \"cresisp\", \"minus\", \"plus\", \"ba01_bed\", \"970mW_hs\", \"jak_1985\", \"no_bath\", \"wc\"]\n\n return list",
"def apm_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'ApmType']]]]]:\n return pulumi.get(self, \"apm_types\")",
"def etypes(self): # -> list[None]:\n ...",
"def opinion_type_list():\n for type_ in orm.DataFlagOpinionType.select():\n click.echo(type_.name)",
"def get_atom_types(p_state, idx_image=-1, idx_chain=-1):\n nos = system.get_nos(p_state, idx_image, idx_chain)\n ArrayType = ctypes.c_int*nos\n Data = _Get_Atom_Types(ctypes.c_void_p(p_state), ctypes.c_int(idx_image), ctypes.c_int(idx_chain))\n array_pointer = ctypes.cast(Data, ctypes.POINTER(ArrayType))\n array = np.frombuffer(array_pointer.contents, dtype=ctypes.c_int)\n array_view = array.view()\n return array_view",
"def getTypesList():\n return Gw2Spidy._request('types')['results']",
"def getProposalTypesVocab(self):\n list = DisplayList()\n # Acquire the types\n types = self.aq_inner.aq_parent.getProposalTypes()\n for type in types:\n list.add(type, type)\n return list",
"def type_core(self):\n type_core = ' '.join(['{}'.format(atom.atom_type_index) for atom in self.atom_types\n if 'shell' not in atom.label])\n return type_core",
"def get_types(self):\n return self.types",
"def etypes(self) -> Sequence[str]:\n\n return [can_etype[1] for can_etype in self.canonical_etypes]",
"def types(self) -> list:\n if self._types is None:\n fdist = self.fdist # ranked order\n types_ = list(fdist.type.values)\n self._types = types_\n return self._types",
"def getListOfCompartmentTypes(self, *args):\n return _libsbml.Model_getListOfCompartmentTypes(self, *args)",
"def get_types(self) :\n\n return list(self.types)[1:]",
"def ntypes(self) -> Sequence[str]:\n ntypes = list(self.num_nodes_dict.keys())\n return ntypes",
"def ntypes(self): # -> None:\n ...",
"def listFeaturableContentTypes():",
"def get_token_types() -> List[type]:\n return ExtensionTokenTypes.__TOKEN_TYPES",
"def typedAntennaNames() :\n a=s.getAntennaAssignments()\n namelist = []\n for i in a:\n namelist.append( i.typedAntennaName )\n return namelist",
"def atomList(self):\n\n\t\tal = []\t\n\t\tfor chain in self.chain:\n\t\t\tfor res in chain.residue:\n\t\t\t\tfor atom in res.atom:\n\t\t\t\t\tal.append(atom)\n\n\t\treturn al"
] | [
"0.7295159",
"0.68550354",
"0.6841875",
"0.6571123",
"0.6437429",
"0.6416755",
"0.6404617",
"0.6334528",
"0.6310776",
"0.6259186",
"0.6259186",
"0.6229412",
"0.6226737",
"0.62120646",
"0.62048405",
"0.6127053",
"0.60887516",
"0.6074563",
"0.60686255",
"0.60447425",
"0.60129046",
"0.59779775",
"0.5935199",
"0.5933715",
"0.5881639",
"0.5870994",
"0.5863921",
"0.5859729",
"0.5835124",
"0.58329916"
] | 0.8205202 | 0 |
Return iAtom's residue number | def getResidueNumber(self, iAtom):
return self._getResiduePointer(iAtom)+1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def residueNumber(self,i):\n assert(i >= 0 and i < self.nAtoms())\n assert(self._c_structure is not NULL)\n return freesasa_structure_atom_res_number(self._c_structure,i)",
"def residueName(self,i):\n assert(i >= 0 and i < self.nAtoms())\n assert(self._c_structure is not NULL)\n return freesasa_structure_atom_res_name(self._c_structure,i)",
"def residue(self, resnum, chain_id, icode=' ', alt=' ', model_num = 0):\n res = self.struct[model_num][chain_id][(alt, resnum, icode)]\n res.chain_id = chain_id\n return res",
"def get_atomic_number(molecule, atom_index):\n return molecule.GetAtomAtomicNumber(atom_index)",
"def _get_natom(self, file):\n f = open_general(file)\n tmptxt = f.readlines()\n f.close()\n itmp = search_string('NATOM is', tmptxt)\n natom = int(tmptxt.pop(itmp).split()[-1])\n return natom",
"def residue_amino(residue) :\n if residue :\n return residue.name[0:3].upper()\n else :\n return None",
"def getResidue(self, resi):\n\n\t\tif self.numChains == 0:\n\t\t\tprint \"WARNING: Molecule has no chains\"\n\t\t\treturn None\n\n\t\tresi = int(resi)\n\t\tfor chn in self.chain:\n\t\t\tfor res in chn.residue:\n\t\t\t\tif int(res.file_id) == resi:\n\t\t\t\t\treturn res\n\n\t\treturn None",
"def get_natom(self):\n return",
"def countAtom (dico_count, PDB_parsed, debug = 0):\n count = 0\n \n for atom in PDB_parsed : \n residue = tool.transformAA(atom[\"resName\"])\n if debug : print residue\n \n if residue in dico_count : \n atom_Name = atom[\"name\"]\n if atom_Name in dico_count[residue] : \n count = count + 1\n return count",
"def annulus_ident(self) -> int:\n return self._ann_ident",
"def find_highest_resnum(atom_list):\n resnum = 0\n for element in atom_list:\n if (int(element.residue_number) > int(resnum)):\n resnum = int(element.residue_number)\n return resnum",
"def atom(self, atom_name, resnum, chain_id, icode=' ', alt=' ', model_num=0):\n return self.struct[model_num][chain_id][(alt, resnum, icode)][atom_name]",
"def get_residue_info(self):\n return",
"def getNumAtoms(self):\n return int(self._getPointerValue('NATOM'))",
"def getResidueLabel(self, iAtom=None, iRes=None):\n if iRes is None and iAtom is None:\n raise Exception(\"only specify iRes or iAtom, not both\")\n if iRes is not None and iAtom is not None:\n raise Exception(\"iRes or iAtom must be set\")\n if iRes is not None:\n return self._raw_data['RESIDUE_LABEL'][iRes]\n else:\n return self.getResidueLabel(iRes=self._getResiduePointer(iAtom))",
"def get_residue(self, resnum, chain):\n res_chain = self.get_chain(chain)\n residue = []\n for line in res_chain.split(\"\\n\"):\n if str(resnum) == str(line[22:26].strip()):\n residue.append(line)\n return \"\\n\".join(residue)",
"def get_atom_code(self, atom):\n for code, symbol in self.__symbols_dict.items():\n # if keyword, return associated code\n if symbol == atom:\n return code\n\n if self.check_if_var(atom):\n # if identifier, return 0\n return 0\n if self.check_if_const(atom):\n # if constant, return 1\n return 1\n\n # invalid atom\n return -1",
"def rn(self):\n return self._rn",
"def atomic_number(self) -> int:\n return elements.index(self.label) + 1",
"def getResidue(self, resnum, icode=None):\n\n return self._hv.getResidue(self.getChid(), resnum, icode,\n self.getSegname())",
"def ied_num(self) -> str:\n return pulumi.get(self, \"ied_num\")",
"def instrID(self):\n return self.query('*IDN?')",
"def getResonanceResidue(resonance):\n\n residue = None\n if resonance.resonanceSet:\n residue = resonance.resonanceSet.findFirstAtomSet().findFirstAtom().residue\n \n return residue",
"def getAtomIndices( structure, resname ):\n atom_indices_ligand = []\n topology = structure.topology\n for atom in topology.atoms():\n if str(resname) in atom.residue.name:\n atom_indices_ligand.append(atom.index)\n\n return atom_indices_ligand",
"def get_num(elem):\n if isinstance(elem, str):\n return _find_index(elem)\n else:\n for atm in elem:\n if atm not in sym and atm[0] not in ['X', 'D']:\n raise ValueError('Unrecognized atomic symbol \\'' + atm +\n '\\'. Use X prefix for dummy atoms.')\n return np.array([_find_index(atm) for atm in elem])",
"def atomic_number(self):\n return atomic_number(self.sym)",
"def readInt(self) -> int:\n return self._unpack('!i', 4)",
"def next_residue(residue) :\n #Proteins go N terminal --> C terminal\n #The next reside is bonded to the C of this atom...\n for a in residue.peptide.C.bondedTo():\n if a.parent.parent != residue:\n return a.parent.parent\n return None",
"def ipts_number(self):\n return self._iptsNumber",
"def index(self, atom):\n return self.atom_list.index(atom)"
] | [
"0.84820133",
"0.68570244",
"0.6592199",
"0.61061615",
"0.60807073",
"0.6075778",
"0.59794825",
"0.5964086",
"0.59245735",
"0.59171975",
"0.5898385",
"0.5881746",
"0.5792914",
"0.5792746",
"0.57736784",
"0.57659894",
"0.56949157",
"0.56466013",
"0.560639",
"0.5595741",
"0.55512303",
"0.5538604",
"0.5531712",
"0.54977256",
"0.54904854",
"0.54837996",
"0.54408574",
"0.542401",
"0.5416301",
"0.5411028"
] | 0.8190536 | 1 |
Return residue label for iAtom OR iRes | def getResidueLabel(self, iAtom=None, iRes=None):
if iRes is None and iAtom is None:
raise Exception("only specify iRes or iAtom, not both")
if iRes is not None and iAtom is not None:
raise Exception("iRes or iAtom must be set")
if iRes is not None:
return self._raw_data['RESIDUE_LABEL'][iRes]
else:
return self.getResidueLabel(iRes=self._getResiduePointer(iAtom)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getResidueLabel(self):\n residueLabel = self.getFlagData('RESIDUE_LABEL')\n residueLabel = map(str, residueLabel)\n if residueLabel[0] != residueLabel[0].upper():\n self.printWarn(\"residue label '%s' in '%s' is not all UPPERCASE\" %\n (residueLabel[0], self.inputFile))\n self.printWarn(\"this may raise problem with some applications like CNS\")\n self.residueLabel = residueLabel",
"def residueName(self,i):\n assert(i >= 0 and i < self.nAtoms())\n assert(self._c_structure is not NULL)\n return freesasa_structure_atom_res_name(self._c_structure,i)",
"def residue_amino(residue) :\n if residue :\n return residue.name[0:3].upper()\n else :\n return None",
"def get_label(urs):\n return assign_term(urs)[1]",
"def atom(self, atom_name, resnum, chain_id, icode=' ', alt=' ', model_num=0):\n return self.struct[model_num][chain_id][(alt, resnum, icode)][atom_name]",
"def get_reaction_label(rmg_reaction):\n reactants = rmg_reaction.reactants\n products = rmg_reaction.products\n if len(reactants) > 1:\n reactants_string = '+'.join([reactant.molecule[0].toSMILES() for reactant in reactants])\n else:\n reactants_string = reactants[0].molecule[0].toSMILES()\n if len(products) > 1:\n products_string = '+'.join([product.molecule[0].toSMILES() for product in products])\n else:\n products_string = products[0].molecule[0].toSMILES()\n reaction_label = '_'.join([reactants_string, products_string])\n return reaction_label",
"def residue(self, resnum, chain_id, icode=' ', alt=' ', model_num = 0):\n res = self.struct[model_num][chain_id][(alt, resnum, icode)]\n res.chain_id = chain_id\n return res",
"def relation_label(self, rid):\n relations = self._load_relations()\n return relations[rid][\"label\"]",
"def check_residue(self, chain_id, resid, resname):\n if resname in PTM_lookup.keys():\n return\n else:\n try:\n ref_resname = PTM_reverse_lookup[resname]\n except KeyError:\n # print \"skipping unrecognized residue\", resname\n return\n ptm_dict = PTM_lookup[ref_resname][resname]\n return (chain_id, resid, ref_resname, ptm_dict[\"goto_atom\"], ptm_dict[\"name\"])",
"def extract_label(node):\n if (isinstance(node, UnaryOp) and\n isinstance(node.op, USub) and\n isinstance(node.operand, UnaryOp) and\n isinstance(node.operand.op, USub) and\n isinstance(node.operand.operand, Name)):\n return node.operand.operand.id\n else:\n return None",
"def get_atom_description(self, atom):\n return \"_\".join(\n [str(x) for x in [atom.GetAtomicNum(), atom.GetHybridization()]]\n )",
"def chainLabel(self,i):\n assert(i >= 0 and i < self.nAtoms())\n assert(self._c_structure is not NULL)\n cdef char label[2]\n label[0] = freesasa_structure_atom_chain(self._c_structure,i)\n label[1] = '\\0'\n return label",
"def get_label(genotype_type):\n if genotype_type == \"Hom\":\n return 0\n elif genotype_type == \"Het\":\n return 1\n elif genotype_type == \"Hom_alt\":\n return 2",
"def getCharmmParams(residue, name):\n resname = residue.get(\"name\")\n atomname = name\n\n # Nucleic Acid Substitutions\n\n if residue.get(\"type\") == 4:\n resname = resname[0]\n if resname == \"A\":\n resname = \"ADE\"\n elif resname == \"C\":\n resname = \"CYT\"\n elif resname == \"G\":\n resname = \"GUA\"\n elif resname == \"T\":\n resname = \"THY\"\n if atomname == \"C7\":\n atomname = \"C5M\"\n elif atomname == \"H71\":\n atomname = \"H51\"\n elif atomname == \"H72\":\n atomname = \"H52\"\n elif atomname == \"H73\":\n atomname = \"H53\"\n elif resname == \"U\":\n resname = \"URA\"\n\n if atomname == \"H5'1\":\n atomname = \"H5'\"\n elif atomname == \"H5'2\":\n atomname = \"H5''\"\n elif atomname == \"H2'1\":\n atomname = \"H2'\"\n elif atomname in [\"H2'2\", \"HO'2\"]:\n atomname = \"H2''\"\n\n if residue.getAtom(\"O2'\") is None:\n if atomname in [\"C2'\", \"H2'\", \"H2''\"]: resname = \"DEO1\"\n\n if residue.getAtom(\"H5T\") is not None:\n if atomname in [\"H5T\", \"O5'\", \"C5'\"]: resname = \"5TER\"\n if residue.getAtom(\"H3T\") is not None:\n if atomname in [\"H3T\", \"O3'\", \"C3'\"]: resname = \"3TER\"\n\n # Terminal/Water Substitutions\n\n if residue.get(\"isNterm\"):\n if resname == \"GLY\" and atomname in [\"N\", \"H\", \"H2\", \"H3\", \"CA\", \"HA2\", \"HA3\"]:\n resname = \"GLYP\"\n if atomname == \"H\":\n atomname = \"HT1\"\n elif atomname == \"H2\":\n atomname = \"HT2\"\n elif atomname == \"H3\":\n atomname = \"HT3\"\n elif resname == \"PRO\" and atomname in [\"N\", \"HN1\", \"HN2\", \"CD\", \"CA\", \"HD1\", \"HD2\", \"HA\", \"H2\", \"H3\"]:\n resname = \"PROP\"\n if atomname == \"H2\":\n atomname = \"HN1\"\n elif atomname == \"H3\":\n atomname = \"HN2\"\n elif resname == \"ACE\":\n if atomname == \"CH3\":\n atomname = \"CAY\"\n elif atomname == \"HH31\":\n atomname = \"HY1\"\n elif atomname == \"HH32\":\n atomname = \"HY2\"\n elif atomname == \"HH33\":\n atomname = \"HY3\"\n elif atomname == \"C\":\n atomname = \"CY\"\n elif atomname == \"O\":\n atomname = \"OY\"\n else:\n if atomname in [\"N\", \"H\", \"H2\", \"H3\", \"CA\", \"HA\"]:\n resname = \"NTER\"\n if atomname == \"H\":\n atomname = \"HT1\"\n elif atomname == \"H2\":\n atomname = \"HT2\"\n elif atomname == \"H3\":\n atomname = \"HT3\"\n elif residue.get(\"isCterm\"):\n if atomname in [\"O\", \"OXT\", \"C\"]:\n resname = \"CTER\"\n if atomname == \"O\":\n atomname = \"OT1\"\n elif atomname == \"OXT\":\n atomname = \"OT2\"\n elif residue.get(\"type\") == 3:\n resname = \"TP3M\"\n if atomname == \"O\": atomname = \"OH2\"\n\n # Residue substitutions\n\n if resname == \"ILE\":\n if atomname == \"CD1\":\n atomname = \"CD\"\n elif atomname == \"HD11\":\n atomname = \"HD1\"\n elif atomname == \"HD12\":\n atomname = \"HD2\"\n elif atomname == \"HD13\":\n atomname = \"HD3\"\n elif atomname == \"HG12\":\n atomname = \"HG11\"\n elif atomname == \"HG13\":\n atomname = \"HG12\"\n elif resname == \"CYS\" and \"HG\" not in residue.get(\"map\"):\n resname = \"CYS\"\n if atomname == \"CB\":\n resname = \"DISU\"\n atomname = \"1CB\"\n elif atomname == \"SG\":\n resname = \"DISU\"\n atomname = \"1SG\"\n elif resname == \"HIS\":\n if \"HD1\" in residue.get(\"map\") and \"HE2\" in residue.get(\"map\"):\n resname = \"HSP\"\n elif \"HD1\" in residue.get(\"map\"):\n resname = \"HSD\"\n elif \"HE2\" in residue.get(\"map\"):\n resname = \"HSE\"\n elif resname == \"GLU\" or resname == \"GLH\":\n if \"HE1\" in residue.get(\"map\"):\n if atomname == \"HE1\":\n atomname = \"HE2\"\n elif atomname == \"OE1\":\n atomname = \"OE2\"\n elif atomname == \"OE2\":\n atomname = \"OE1\"\n if atomname in [\"CG\", \"HG3\", \"HG1\", \"HG2\", \"CD\", \"OE1\", \"OE2\", \"HE2\"]:\n resname = \"GLUP\"\n else:\n resname = \"GLU\"\n elif \"HE2\" in residue.get(\"map\"):\n if atomname in [\"CG\", \"HG3\", \"HG1\", \"HG2\", \"CD\", \"OE1\", \"OE2\", \"HE2\"]:\n resname = \"GLUP\"\n else:\n resname = \"GLU\"\n elif resname == \"ASP\" or resname == \"ASH\":\n if \"HD1\" in residue.get(\"map\"):\n if atomname == \"HD1\":\n atomname = \"HD2\"\n elif atomname == \"OD1\":\n atomname = \"OD2\"\n elif atomname == \"OD2\":\n atomname = \"OD1\"\n if atomname in [\"CB\", \"HB3\", \"HB1\", \"HB2\", \"CG\", \"OD1\", \"OD2\", \"HD2\"]:\n resname = \"ASPP\"\n else:\n resname = \"ASP\"\n elif \"HD2\" in residue.get(\"map\"):\n if atomname in [\"CB\", \"HB3\", \"HB1\", \"HB2\", \"CG\", \"OD1\", \"OD2\", \"HD2\"]:\n resname = \"ASPP\"\n else:\n resname = \"ASP\"\n\n # HETATM Substitutions\n\n if resname == \"ACE\":\n if atomname == \"CH3\":\n atomname = \"CAY\"\n elif atomname == \"HH31\":\n atomname = \"HY1\"\n elif atomname == \"HH32\":\n atomname = \"HY2\"\n elif atomname == \"HH33\":\n atomname = \"HY3\"\n elif atomname == \"C\":\n atomname = \"CY\"\n elif atomname == \"O\":\n atomname = \"OY\"\n elif resname == \"ADP\":\n atomname = atomname.replace(\"*\", \"\\'\")\n elif resname == \"NME\":\n resname = \"CT3\"\n if atomname == \"HH31\":\n atomname = \"HT1\"\n elif atomname == \"HH32\":\n atomname = \"HT2\"\n elif atomname == \"HH33\":\n atomname = \"HT3\"\n elif atomname == \"CH3\":\n atomname = \"CAT\"\n elif atomname == \"N\":\n atomname = \"NT\"\n elif atomname == \"H\":\n atomname = \"HNT\"\n\n # Hydrogen Substitutions\n\n if atomname == \"H\":\n atomname = \"HN\"\n elif atomname == \"HA2\":\n atomname = \"HA1\"\n elif atomname == \"HA3\":\n atomname = \"HA2\"\n elif atomname == \"HB2\" and resname not in [\"ALA\"]:\n atomname = \"HB1\"\n elif atomname == \"HB3\" and resname not in [\"ALA\"]:\n atomname = \"HB2\"\n elif atomname == \"HD2\" and resname not in [\"HSP\", \"HSE\", \"HSD\", \"ASPP\"]:\n atomname = \"HD1\"\n elif atomname == \"HD3\" and resname not in [\"HIS\", \"HSE\", \"HSD\"]:\n atomname = \"HD2\"\n elif atomname == \"HE2\" and resname not in [\"TRP\", \"HSP\", \"HSE\", \"HSD\", \"GLUP\"]:\n atomname = \"HE1\"\n elif atomname == \"HE3\" and resname not in [\"TRP\", \"HSP\", \"HSE\", \"HSD\"]:\n atomname = \"HE2\"\n elif atomname == \"HG2\":\n atomname = \"HG1\"\n elif atomname == \"HG3\":\n atomname = \"HG2\"\n elif atomname == \"HG\" and resname in [\"SER\", \"CYS\"]:\n atomname = \"HG1\"\n\n return resname, atomname",
"def generate_label(self, molecule: vtkMolecule, atom_index: int) -> str:\n if self.style == LabelsStyle.Empty:\n array = molecule.GetVertexData().GetArray(\"scine_labels\")\n return \"\" if array is None else str(array.GetValue(atom_index))\n\n atom = molecule.GetAtom(atom_index)\n if self.style == LabelsStyle.Symbol:\n return str(vtkPeriodicTable().GetSymbol(atom.GetAtomicNumber()))\n if self.style == LabelsStyle.AtomicNumber:\n return str(atom.GetAtomicNumber())\n if self.style == LabelsStyle.IndexNumber:\n return str(atom_index)\n\n assert False, \"Unknown label style.\"\n return \"\"",
"def getRaceLabel(x,binary=False):\r\n ID = x.split(\"/\")[-1].split(\"_\")[0]\r\n label = truth[truth.DummyID == int(ID)]['Medview_Race'].values[0]\r\n\r\n if label == 'African American':\r\n return 0\r\n elif label == \"White\":\r\n return 1\r\n else:\r\n return 2",
"def getParseParams(residue, name):\n atomname = name\n resname = residue.name\n\n # Terminal/Water Substitutions\n\n nterm = residue.get(\"isNterm\")\n cterm = residue.get(\"isCterm\")\n if nterm and resname != \"ACE\":\n if resname == \"PRO\" and nterm == 2:\n resname = \"PR+\"\n if atomname == \"H2\":\n atomname = \"HN1\"\n elif atomname == \"H3\":\n atomname = \"HN2\"\n elif resname == \"PRO\" and nterm == 1:\n resname = \"PRN\"\n if atomname == \"H2\" or atomname == \"H3\": atomname = \"HN\"\n elif nterm == 2: # Neutral\n if atomname in [\"N\", \"H\", \"H2\", \"H3\", \"CA\", \"HA\", \"C\", \"O\"]:\n resname = \"BKN\"\n if atomname == \"H\":\n atomname = \"H1\"\n if atomname == 'H3':\n atomname = 'H2'\n elif nterm == 3: # Positive\n if atomname in [\"N\", \"H\", \"H2\", \"H3\", \"CA\", \"HA\", \"C\", \"O\"]:\n resname = \"BK+\"\n if atomname == \"H\": atomname = \"H1\"\n elif cterm:\n if atomname == \"O\":\n atomname = \"O1\"\n elif atomname == \"OXT\":\n atomname = \"O2\"\n if cterm == 1 and atomname in [\"N\", \"H\", \"HA\", \"CA\", \"C\", \"O1\", \"O2\"]:\n resname = \"BK-\"\n elif cterm == 2 and atomname in [\"N\", \"H\", \"HA\", \"CA\", \"C\", \"O1\", \"O2\", \"HO\"]:\n if atomname == \"HO\": atomname = \"H2\"\n resname = \"BKC\"\n # print 'Cterm resname is',resname\n elif residue.get(\"type\") == 3:\n resname = \"H2O\"\n if atomname == \"O\":\n atomname = \"OH\"\n elif atomname == \"H1\":\n atomname = \"HH1\"\n elif atomname == \"H2\":\n atomname = \"HH2\"\n\n # Residue Substitutions\n if resname == \"HSD\":\n resname = \"HID\"\n elif resname in [\"HIE\", \"HSE\"]:\n resname = \"HIS\"\n elif resname in [\"HIP\", \"HSP\"]:\n resname = \"HI+\"\n elif resname == \"ILE\":\n if atomname == \"HG12\":\n atomname = \"HG11\"\n elif atomname == \"HG13\":\n atomname = \"HG12\"\n elif atomname == \"CD\":\n atomname = \"CD1\"\n elif resname == \"CYS\" and \"HG\" not in residue.get(\"map\"):\n resname = \"CSS\"\n #\n # Histidine\n #\n elif resname == \"HIS\":\n if \"HD1\" in residue.get(\"map\") and \"HE2\" in residue.get(\"map\"):\n resname = \"HI+\"\n elif \"HD1\" in residue.get(\"map\"):\n resname = \"HID\"\n elif \"HE2\" in residue.get(\"map\"):\n resname = \"HIS\"\n elif resname == \"GLU\" or resname == \"GLH\":\n if \"HE1\" in residue.get(\"map\"):\n resname = \"GL0\"\n if atomname == \"HE1\":\n atomname = \"HE2\"\n elif atomname == \"OE1\":\n atomname = \"OE2\"\n elif atomname == \"OE2\":\n atomname = \"OE1\"\n elif \"HE2\" in residue.get(\"map\"):\n resname = \"GL0\"\n elif resname == \"ASP\" or resname == \"ASH\":\n if \"HD1\" in residue.get(\"map\"):\n resname = \"AS0\"\n if atomname == \"HD1\":\n atomname = \"HD2\"\n elif atomname == \"OD1\":\n atomname = \"OD2\"\n elif atomname == \"OD2\":\n atomname = \"OD1\"\n elif \"HD2\" in residue.get(\"map\"):\n resname = \"AS0\"\n elif resname == \"ACE\":\n if atomname == \"HH31\":\n atomname = \"HA1\"\n elif atomname == \"HH32\":\n atomname = \"HA2\"\n elif atomname == \"HH33\":\n atomname = \"HA3\"\n elif atomname == \"CH3\":\n atomname = \"CA\"\n elif resname == \"TYR\":\n if not \"HH\" in residue.get(\"map\"):\n resname = \"TYM\"\n elif resname == \"TYM\":\n resname = \"TY-\"\n elif resname == \"CYM\":\n resname = \"CY-\"\n elif resname == \"LYN\":\n resname = \"LY0\"\n #\n # Neutral LYS and neutral ARG detection based on hydrogens - added by Jens\n #\n elif resname == \"LYS\":\n if not \"HZ3\" in residue.get(\"map\"):\n resname = \"LY0\"\n elif resname == \"ARG\":\n if not \"HE\" in residue.get(\"map\"):\n resname = \"AR0\"\n elif resname == \"NME\":\n resname = \"N-M\"\n if atomname == \"CH3\":\n atomname = \"CA\"\n elif atomname == \"H\":\n atomname = \"H1\"\n elif atomname.startswith(\"HH\"):\n atomname = \"HA\" + atomname[-1]\n\n # Hydrogen Substitutions\n\n if atomname == \"H\":\n atomname = \"HN\"\n elif atomname == \"HA2\":\n atomname = \"HA1\"\n elif atomname == \"HA3\":\n atomname = \"HA2\"\n elif atomname == \"HB2\" and resname not in [\"ALA\"]:\n atomname = \"HB1\"\n elif atomname == \"HB3\" and resname not in [\"ALA\"]:\n atomname = \"HB2\"\n elif atomname == \"HD2\" and resname not in [\"HIS\", \"HI+\", \"HID\", \"AS0\"]:\n atomname = \"HD1\"\n elif atomname == \"HD3\" and resname not in [\"HIS\", \"HI+\", \"HID\"]:\n atomname = \"HD2\"\n elif atomname == \"HE2\" and resname not in [\"TRP\", \"HIS\", \"HI+\", \"HID\", \"GL0\"]:\n atomname = \"HE1\"\n elif atomname == \"HE3\" and resname not in [\"TRP\", \"HIS\", \"HI+\", \"HID\"]:\n atomname = \"HE2\"\n elif atomname == \"HG2\":\n atomname = \"HG1\"\n elif atomname == \"HG3\":\n atomname = \"HG2\"\n elif atomname == \"HZ2\" and resname == \"LY0\":\n atomname = \"HZ1\"\n elif atomname == \"HZ3\" and resname == \"LY0\":\n atomname = \"HZ2\"\n\n return resname, atomname",
"def residueNumber(self,i):\n assert(i >= 0 and i < self.nAtoms())\n assert(self._c_structure is not NULL)\n return freesasa_structure_atom_res_number(self._c_structure,i)",
"def getFixedResonanceName(resonance):\n \n resonanceSet = resonance.resonanceSet\n if resonanceSet:\n atomSets = tuple(resonanceSet.atomSets)\n\n if len(atomSets) > 3: # Whole residue selections\n return '*'\n\n name = makeAtomSetsGuiName(atomSets)\n if len(atomSets) > 1:\n resonances = resonanceSet.sortedResonances()\n i = resonances.index(resonance)\n name = name[:-1] + chr(ord('a')+i)\n if len(atomSets[0].atoms) > 1:\n name = name + '*'\n\n else:\n name = '[%d]' % resonance.serial\n\n return name",
"def getOriNameIndx( self, name ):\n \n if not self.oriNames:\n self.getOriNames( )\n\n if name in self.oriNames:\n return self.oriNames[ name ]\n elif name in self.oriNames.values():\n return name\n else:\n return -1",
"def getResonanceResidue(resonance):\n\n residue = None\n if resonance.resonanceSet:\n residue = resonance.resonanceSet.findFirstAtomSet().findFirstAtom().residue\n \n return residue",
"def get_label(name):\n lower = name.lower()\n vals = lower.split('_')\n if 'ho' in vals:\n name = 'Independent Estimate'\n elif 'alldata' in vals:\n name = 'Extra-Data Estimate'\n elif 'ris' in vals[0]:\n name = 'RIS'\n if 'w' in vals[0]:\n name += ' WIS'\n if 'pd' in vals[0]:\n name += ' PDIS'\n elif 'is' in vals[0]:\n name = 'OIS'\n if 'w' in vals[0]:\n name += ' WIS'\n if 'pd' in vals[0]:\n name += ' PDIS'\n if 'dr' in vals:\n name += ' DR'\n if 'wdr' in vals:\n name += ' WDR'\n return name",
"def getNames(self, resname, atomname):\n rname = None\n aname = None\n if resname in self.map:\n res = self.map[resname]\n if res.hasAtom(atomname):\n atom = res.atoms[atomname]\n aname = atom.name\n rname = atom.resname\n return rname, aname",
"def getAtomIndices( structure, resname ):\n atom_indices_ligand = []\n topology = structure.topology\n for atom in topology.atoms():\n if str(resname) in atom.residue.name:\n atom_indices_ligand.append(atom.index)\n\n return atom_indices_ligand",
"def get_group_label(group):\n indices = [a.index for a in group.atoms]\n names = [a.name for a in group.atoms]\n label = []\n for i in range(len(indices)):\n label.append('%d/%s' % (indices[i], names[i]))\n return(' '.join(label))",
"def getAmberParams(residue, name):\n atomname = name\n type = residue.get(\"type\")\n if type == 4:\n resname = residue.get(\"naname\")\n else:\n resname = residue.get(\"name\")\n\n # Residue Substitutions\n\n if residue.get(\"name\") == \"CYS\" and \"HG\" not in residue.get(\"map\"):\n resname = \"CYX\"\n elif residue.get(\"name\") == \"HIS\":\n if \"HD1\" in residue.get(\"map\") and \"HE2\" in residue.get(\"map\"):\n resname = \"HIP\"\n elif \"HD1\" in residue.get(\"map\"):\n resname = \"HID\"\n elif \"HE2\" in residue.get(\"map\"):\n resname = \"HIE\"\n else:\n resname = \"HID\" # Default for no hydrogens\n elif residue.get(\"name\") == \"HSP\":\n resname = \"HIP\"\n elif residue.get(\"name\") == \"HSE\":\n resname = \"HIE\"\n elif residue.get(\"name\") == \"HSD\":\n resname = \"HID\"\n elif residue.get(\"name\") == \"GLU\" or residue.get(\"name\") == \"GLH\":\n if \"HE1\" in residue.get(\"map\"):\n resname = \"GLH\"\n if atomname == \"HE1\":\n atomname = \"HE2\"\n elif atomname == \"OE1\":\n atomname = \"OE2\"\n elif atomname == \"OE2\":\n atomname = \"OE1\"\n elif \"HE2\" in residue.get(\"map\"):\n resname = \"GLH\"\n elif residue.get(\"name\") == \"ASP\" or residue.get(\"name\") == \"ASH\":\n if \"HD1\" in residue.get(\"map\"):\n resname = \"ASH\"\n if atomname == \"HD1\":\n atomname = \"HD2\"\n elif atomname == \"OD1\":\n atomname = \"OD2\"\n elif atomname == \"OD2\":\n atomname = \"OD1\"\n elif \"HD2\" in residue.get(\"map\"):\n resname = \"ASH\"\n\n if residue.get(\"isCterm\"):\n resname = \"C\" + resname\n elif residue.get(\"isNterm\"):\n resname = \"N\" + resname\n\n # Atom Substitutions\n\n if resname == \"WAT\":\n if atomname == \"O\":\n atomname = \"OW\"\n elif atomname == \"H1\":\n atomname = \"HW\"\n elif atomname == \"H2\":\n atomname = \"HW\"\n elif resname == \"ILE\":\n if atomname == \"CD\": atomname = \"CD1\"\n if resname[0] == \"N\" and resname != \"NME\": # N-terminal\n if atomname == \"H\": atomname = \"H1\"\n if (resname == \"CCYS\" or resname == \"NCYS\") and atomname == \"HG\": atomname = \"HSG\"\n if resname == \"CYM\" and atomname == \"H\": atomname = \"HN\"\n if residue.get(\"isNterm\") and resname == \"NPRO\" and atomname == \"HN2\":\n atomname = \"H2\"\n if residue.get(\"isNterm\") and resname == \"NPRO\" and atomname == \"HN1\":\n atomname = \"H3\"\n return resname, atomname",
"def residue_ramachandran_type(residue) :\n if residue_amino(residue)==\"GLY\" :\n return rama_GLYCINE\n elif residue_amino(residue)==\"PRO\" :\n return rama_PROLINE\n elif residue_amino(next_residue(residue))==\"PRO\" :\n #exlcudes those that are Pro or Gly\n return rama_PRE_PRO\n else :\n return rama_GENERAL",
"def getlabel(scores):\n main_score = scores['compound']\n if main_score > 0.1:\n return 'pos'\n elif main_score < -0.1:\n return 'neg'\n else:\n return 'neu'",
"def getResonanceName(resonance):\n\n if resonance.className == 'FixedResonance':\n resonance2 = resonance.getResonance()\n if resonance2 is None:\n return getFixedResonanceName(resonance)\n \n else:\n resonanceSet = resonance.resonanceSet\n resonanceSet2 = resonance2.resonanceSet\n \n if not resonanceSet2:\n return getFixedResonanceName(resonance)\n \n elif not resonanceSet:\n resonance = resonance2\n \n else:\n atoms = [aSet.atoms for aSet in resonanceSet.sortedAtomSets()]\n atoms2 = [aSet.atoms for aSet in resonanceSet2.sortedAtomSets()]\n \n if atoms == atoms2:\n resonance = resonance2\n else:\n return getFixedResonanceName(resonance)\n\n if hasattr(resonance, 'label'):\n return resonance.label\n\n resonanceSet = resonance.resonanceSet\n \n if resonanceSet:\n atomSets = tuple(resonanceSet.atomSets)\n name = makeAtomSetsGuiName(atomSets)\n if len(atomSets) > 1:\n name = name[:-1] + getAmbigProchiralLabel(resonance)\n if len(atomSets[0].atoms) > 1:\n name = name + '*'\n \n elif resonance.assignNames:\n assignNames = tuple(resonance.assignNames)\n name = '[%d]' % resonance.serial\n\n N = len(assignNames)\n if N > 1:\n for i in range(N):\n name = assignNames[i] + name\n if i < (N-1):\n name = '|' + name\n else:\n name = resonance.assignNames[0] + name\n \n elif resonance.name and (resonance.name != 'r%d' % resonance.serial):\n name = '[%d:%s]' % (resonance.serial,resonance.name)\n\n else:\n name = '[%d]' % resonance.serial\n\n resonance.label = name\n \n return name",
"def _get_label(self):\n if self.model.name == '':\n return \"KPI\"\n return \"KPI: {} ({})\".format(self.model.name, self.model.objective)"
] | [
"0.69392586",
"0.68396",
"0.65880954",
"0.626143",
"0.61504877",
"0.59839684",
"0.5865712",
"0.5863954",
"0.5814182",
"0.5686845",
"0.56801265",
"0.5625831",
"0.56091756",
"0.55119324",
"0.5510289",
"0.5485855",
"0.53934896",
"0.5372719",
"0.5372673",
"0.53568137",
"0.53537863",
"0.5340993",
"0.53378063",
"0.5317373",
"0.5312651",
"0.5312429",
"0.5288689",
"0.52877843",
"0.5278342",
"0.52516574"
] | 0.78172517 | 0 |
Return list of all rVdw, epsilon pairs for each atom. If offdiagonal elements of the LennardJones A and B coefficient matrices are found, NbfixPresent exception is raised | def getNonbondTerms(self):
if self._has_nbfix_terms:
raise NbfixPresent('Off-diagonal Lennard-Jones elements found. '
'Cannot determine LJ parameters for individual atoms.')
try:
return self._nonbondTerms
except AttributeError:
pass
# Check if there are any non-zero HBOND terms
for x, y in zip(self._raw_data['HBOND_ACOEF'], self._raw_data['HBOND_BCOEF']):
if float(x) or float(y):
raise Exception('10-12 interactions are not supported')
self._nonbondTerms=[]
lengthConversionFactor = units.angstrom.conversion_factor_to(units.nanometer)
energyConversionFactor = units.kilocalorie_per_mole.conversion_factor_to(units.kilojoule_per_mole)
numTypes = self.getNumTypes()
atomTypeIndexes=self._getAtomTypeIndexes()
type_parameters = [(0, 0) for i in range(numTypes)]
for iAtom in range(self.getNumAtoms()):
index=(numTypes+1)*(atomTypeIndexes[iAtom]-1)
nbIndex=int(self._raw_data['NONBONDED_PARM_INDEX'][index])-1
if nbIndex<0:
raise Exception("10-12 interactions are not supported")
acoef = float(self._raw_data['LENNARD_JONES_ACOEF'][nbIndex])
bcoef = float(self._raw_data['LENNARD_JONES_BCOEF'][nbIndex])
try:
rMin = (2*acoef/bcoef)**(1/6.0)
epsilon = 0.25*bcoef*bcoef/acoef
except ZeroDivisionError:
rMin = 1.0
epsilon = 0.0
type_parameters[atomTypeIndexes[iAtom]-1] = (rMin/2.0, epsilon)
rVdw = rMin/2.0*lengthConversionFactor
epsilon = epsilon*energyConversionFactor
self._nonbondTerms.append( (rVdw, epsilon) )
# Check if we have any off-diagonal modified LJ terms that would require
# an NBFIX-like solution
for i in range(numTypes):
for j in range(numTypes):
index = int(self._raw_data['NONBONDED_PARM_INDEX'][numTypes*i+j]) - 1
if index < 0: continue
rij = type_parameters[i][0] + type_parameters[j][0]
wdij = sqrt(type_parameters[i][1] * type_parameters[j][1])
a = float(self._raw_data['LENNARD_JONES_ACOEF'][index])
b = float(self._raw_data['LENNARD_JONES_BCOEF'][index])
if a == 0 or b == 0:
if a != 0 or b != 0 or (wdij != 0 and rij != 0):
self._has_nbfix_terms = True
raise NbfixPresent('Off-diagonal Lennard-Jones elements'
' found. Cannot determine LJ '
'parameters for individual atoms.')
elif (abs((a - (wdij * rij ** 12)) / a) > 1e-6 or
abs((b - (2 * wdij * rij**6)) / b) > 1e-6):
self._has_nbfix_terms = True
raise NbfixPresent('Off-diagonal Lennard-Jones elements '
'found. Cannot determine LJ parameters '
'for individual atoms.')
return self._nonbondTerms | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_all_params_from_kx(params):\r\n new_params = find_kzs(deepcopy(params))\r\n w = new_params['w']\r\n d_list = new_params['d_list']\r\n kx = new_params['kx']\r\n kz_list = new_params['kz_list']\r\n ex_list = new_params['ex_list']\r\n ez_list = new_params['ez_list']\r\n mu_list = new_params['mu_list']\r\n N = len(mu_list)\r\n \r\n mat = bc_matrix(new_params)\r\n eigenvals, eigenvecs = np.linalg.eig(mat)\r\n which_eigenval_is_zero = np.argmin(np.abs(eigenvals))\r\n null_vector = eigenvecs[:,which_eigenval_is_zero]\r\n if False:\r\n print('null vector:')\r\n print(null_vector)\r\n print('matrix entry absolute values:')\r\n print(np.abs(mat))\r\n print('abs(mat . null_vector) should be 0:')\r\n print(np.abs(np.dot(mat, null_vector)))\r\n print('calculated eigenvalue:')\r\n print(eigenvals[which_eigenval_is_zero])\r\n H_up_list = [0]\r\n H_up_list.extend(null_vector[i] for i in range(1, 2*N-2, 2))\r\n H_down_list = [null_vector[i] for i in range(0, 2*N-2, 2)]\r\n H_down_list.append(0)\r\n assert N == len(H_up_list) == len(H_down_list)\r\n \r\n Ex_up_list = [H_up_list[i] * kz_list[i] / (w * ex_list[i] * nu.eps0)\r\n for i in range(N)]\r\n Ex_down_list = [-H_down_list[i] * kz_list[i] / (w * ex_list[i] * nu.eps0)\r\n for i in range(N)]\r\n Ez_up_list = [-H_up_list[i] * kx / (w * ez_list[i] * nu.eps0)\r\n for i in range(N)]\r\n Ez_down_list = [-H_down_list[i] * kx / (w * ez_list[i] * nu.eps0)\r\n for i in range(N)]\r\n \r\n # normalize E and H.\r\n largest_Ez_up_index = np.argmax(np.abs(np.array(Ez_up_list)))\r\n scale_factor = (1 * nu.V/nu.nm) / Ez_up_list[largest_Ez_up_index]\r\n for X_list in [H_up_list, H_down_list, Ex_up_list, Ex_down_list,\r\n Ez_up_list, Ez_down_list]:\r\n for i in range(N):\r\n X_list[i] *= scale_factor\r\n new_params['H_up_list'] = H_up_list\r\n new_params['H_down_list'] = H_down_list\r\n new_params['Ex_up_list'] = Ex_up_list\r\n new_params['Ex_down_list'] = Ex_down_list\r\n new_params['Ez_up_list'] = Ez_up_list\r\n new_params['Ez_down_list'] = Ez_down_list\r\n \r\n # x-component of complex Poynting vector, integrated over a layer\r\n Sx_list = []\r\n for i in range(N):\r\n Ez_up = Ez_up_list[i]\r\n Ez_down = Ez_down_list[i]\r\n H_up_star = H_up_list[i].conjugate()\r\n H_down_star = H_down_list[i].conjugate()\r\n kz = kz_list[i]\r\n d = d_list[i]\r\n Sx = 0\r\n # add each term only if it's nonzero, to avoid 0 * nan in top and\r\n # bottom layers\r\n if Ez_up * H_up_star != 0:\r\n Sx += ((-Ez_up * H_up_star) / (4 * kz.imag)\r\n * (1 - cmath.exp(-2 * kz.imag * d)))\r\n if Ez_down * H_down_star != 0:\r\n Sx += ((-Ez_down * H_down_star) / (4 * kz.imag)\r\n * (1 - cmath.exp(-2 * kz.imag * d)))\r\n if Ez_down * H_up_star != 0:\r\n Sx += ((-Ez_down * H_up_star) / (4j * kz.real)\r\n * (1 - cmath.exp(-2j * kz.real * d))\r\n * cmath.exp(1j * kz * d))\r\n if Ez_up * H_down_star != 0:\r\n Sx += ((-Ez_up * H_down_star) / (4j * kz.real)\r\n * (1 - cmath.exp(-2j * kz.real * d))\r\n * cmath.exp(1j * kz * d))\r\n Sx_list.append(Sx)\r\n new_params['Sx_list'] = Sx_list\r\n # x-component of complex Poynting vector, integrated over all layers\r\n Sx_total = sum(Sx_list)\r\n new_params['Sx_total'] = Sx_total\r\n \r\n layer_bottom_list = [-inf, 0]\r\n for i in range(1,N-1):\r\n layer_bottom_list.append(layer_bottom_list[-1] + d_list[i])\r\n \r\n new_params['layer_bottom_list'] = layer_bottom_list\r\n return new_params",
"def test_born_newton(self):\n\n n0 = 3.4\n omega = 2*np.pi*200e12\n dl = 0.01\n chi3 = 2.8E-18\n\n width = 1\n L = 5\n L_chi3 = 4\n\n width_voxels = int(width/dl)\n L_chi3_voxels = int(L_chi3/dl)\n\n Nx = int(L/dl)\n Ny = int(3.5*width/dl)\n\n eps_r = np.ones((Nx, Ny))\n eps_r[:, int(Ny/2-width_voxels/2):int(Ny/2+width_voxels/2)] = np.square(n0)\n\n nl_region = np.zeros(eps_r.shape)\n nl_region[int(Nx/2-L_chi3_voxels/2):int(Nx/2+L_chi3_voxels/2), int(Ny/2-width_voxels/2):int(Ny/2+width_voxels/2)] = 1\n\n simulation = Simulation(omega, eps_r, dl, [15, 15], 'Ez')\n simulation.add_mode(n0, 'x', [17, int(Ny/2)], width_voxels*3)\n simulation.setup_modes()\n simulation.add_nl(chi3, nl_region, eps_scale=True, eps_max=np.max(eps_r))\n\n srcval_vec = np.logspace(1, 3, 3)\n pwr_vec = np.array([])\n T_vec = np.array([])\n for srcval in srcval_vec:\n simulation.setup_modes()\n simulation.src *= srcval\n\n # Newton\n simulation.solve_fields_nl(solver_nl='newton')\n E_newton = simulation.fields[\"Ez\"]\n\n # Born\n simulation.solve_fields_nl(solver_nl='born')\n E_born = simulation.fields[\"Ez\"]\n\n # More solvers (if any) should be added here with corresponding calls to assert_allclose() below\n\n assert_allclose(E_newton, E_born, rtol=1e-3)",
"def _epsilon(vds) -> np.ndarray:\n return vds[\"rhod_tot\"] / vds[\"rho\"]",
"def test_difficulties_without_eps_multi(self):\n\n def get_beamformer(A, B):\n return get_mvdr_vector_souden(\n A, B,\n eps=0,\n return_ref_channel=True\n )\n\n for args in [\n (\n [self.PhiXX * 0, self.PhiXX],\n [self.PhiNN, self.PhiNN],\n ),\n (\n [self.PhiXX, self.PhiXX],\n [self.PhiNN * 0, self.PhiNN],\n ),\n (\n [self.PhiXX * 0, self.PhiXX],\n [self.PhiNN * 0, self.PhiNN],\n ),\n (\n [self.PhiXX * np.inf, self.PhiXX],\n [self.PhiNN, self.PhiNN],\n ),\n (\n [self.PhiXX, self.PhiXX],\n [self.PhiNN * np.inf, self.PhiNN],\n ),\n (\n [self.PhiXX * np.inf, self.PhiXX],\n [self.PhiNN * np.inf, self.PhiNN],\n ),\n ]:\n with tc.assert_raises(AssertionError):\n get_beamformer(*args)",
"def test_8():\n answer_pdb_str = \"\"\"\nATOM 1 N ARG A 1 26.061 12.824 1.988 1.00 0.00 N\nATOM 2 CA ARG A 1 27.253 12.525 2.773 1.00 0.00 C\nATOM 3 C ARG A 1 28.520 12.882 2.003 1.00 0.00 C\nATOM 4 O ARG A 1 28.853 12.243 1.005 1.00 0.00 O\nATOM 5 CB ARG A 1 27.280 11.041 3.156 1.00 10.00 C\nATOM 6 CG ARG A 1 26.107 10.591 4.022 1.00 10.00 C\nATOM 7 CD ARG A 1 26.118 11.230 5.409 1.00 10.00 C\nATOM 8 NE ARG A 1 27.283 10.828 6.201 1.00 10.00 N\nATOM 9 CZ ARG A 1 27.735 11.441 7.298 1.00 10.00 C\nATOM 10 NH1 ARG A 1 27.146 12.525 7.803 1.00 10.00 N\nATOM 11 NH2 ARG A 1 28.808 10.956 7.908 1.00 10.00 N\nATOM 12 N ALA A 2 29.223 13.907 2.474 1.00 0.00 N\nATOM 13 CA ALA A 2 30.455 14.351 1.832 1.00 0.00 C\nATOM 14 C ALA A 2 31.652 14.171 2.758 1.00 0.00 C\nATOM 15 O ALA A 2 31.775 14.859 3.772 1.00 0.00 O\nATOM 16 CB ALA A 2 30.331 15.807 1.408 1.00 0.00 C\nATOM 17 N HIS A 3 32.534 13.242 2.403 1.00 0.00 N\nATOM 18 CA HIS A 3 33.724 12.970 3.202 1.00 0.00 C\nATOM 19 C HIS A 3 34.993 13.295 2.422 1.00 0.00 C\nATOM 20 O HIS A 3 35.327 12.618 1.450 1.00 0.00 O\nATOM 21 CB HIS A 3 33.744 11.503 3.640 1.00 0.00 C\nATOM 22 CG HIS A 3 32.618 11.130 4.554 1.00 0.00 C\nATOM 23 ND1 HIS A 3 32.586 11.494 5.882 1.00 0.00 N\nATOM 24 CD2 HIS A 3 31.485 10.424 4.330 1.00 0.00 C\nATOM 25 CE1 HIS A 3 31.481 11.029 6.437 1.00 0.00 C\nATOM 26 NE2 HIS A 3 30.795 10.375 5.517 1.00 0.00 N\nATOM 27 N ALA A 4 35.698 14.335 2.856 1.00 0.00 N\nATOM 28 CA ALA A 4 36.932 14.752 2.201 1.00 0.00 C\nATOM 29 C ALA A 4 38.127 14.604 3.136 1.00 0.00 C\nATOM 30 O ALA A 4 38.248 15.329 4.124 1.00 0.00 O\nATOM 31 CB ALA A 4 36.812 16.192 1.723 1.00 0.00 C\nATOM 32 N ASP A 5 39.007 13.660 2.818 1.00 0.00 N\nATOM 33 CA ASP A 5 40.194 13.415 3.630 1.00 0.00 C\nATOM 34 C ASP A 5 41.467 13.708 2.841 1.00 0.00 C\nATOM 35 O ASP A 5 41.801 12.995 1.896 1.00 0.00 O\nATOM 36 CB ASP A 5 40.211 11.966 4.122 1.00 0.00 C\nATOM 37 CG ASP A 5 41.346 11.691 5.089 1.00 0.00 C\nATOM 38 OD1 ASP A 5 41.256 12.134 6.254 1.00 0.00 O\nATOM 39 OD2 ASP A 5 42.327 11.032 4.685 1.00 0.00 O\nATOM 40 N ALA A 6 42.172 14.763 3.238 1.00 0.00 N\nATOM 41 CA ALA A 6 43.409 15.152 2.570 1.00 0.00 C\nATOM 42 C ALA A 6 44.601 15.036 3.514 1.00 0.00 C\nATOM 43 O ALA A 6 44.722 15.797 4.474 1.00 0.00 O\nATOM 44 CB ALA A 6 43.294 16.573 2.039 1.00 0.00 C\nATOM 45 N GLU A 7 45.480 14.079 3.234 1.00 0.00 N\nATOM 46 CA GLU A 7 46.665 13.862 4.057 1.00 0.00 C\nATOM 47 C GLU A 7 47.940 14.122 3.261 1.00 0.00 C\nATOM 48 O GLU A 7 48.275 13.373 2.344 1.00 0.00 O\nATOM 49 CB GLU A 7 46.677 12.432 4.604 1.00 0.00 C\nATOM 50 CG GLU A 7 45.565 12.140 5.599 1.00 0.00 C\nATOM 51 CD GLU A 7 45.595 10.711 6.103 1.00 0.00 C\nATOM 52 OE1 GLU A 7 46.403 9.912 5.585 1.00 0.00 O\nATOM 53 OE2 GLU A 7 44.809 10.384 7.019 1.00 0.00 O\nATOM 54 N ALA A 8 48.647 15.189 3.620 1.00 0.00 N\nATOM 55 CA ALA A 8 49.886 15.550 2.941 1.00 0.00 C\nATOM 56 C ALA A 8 51.076 15.468 3.892 1.00 0.00 C\nATOM 57 O ALA A 8 51.196 16.264 4.823 1.00 0.00 O\nATOM 58 CB ALA A 8 49.776 16.951 2.356 1.00 0.00 C\nATOM 59 N ALA A 10 55.122 15.615 4.002 1.00 0.00 N\nATOM 60 CA ALA A 10 56.363 15.948 3.313 1.00 0.00 C\nATOM 61 C ALA A 10 57.551 15.898 4.269 1.00 0.00 C\nATOM 62 O ALA A 10 57.671 16.728 5.170 1.00 0.00 O\nATOM 63 CB ALA A 10 56.258 17.326 2.676 1.00 0.00 C\nATOM 64 N ASN A 11 58.427 14.919 4.065 1.00 0.00 N\nATOM 65 CA ASN A 11 59.606 14.759 4.908 1.00 0.00 C\nATOM 66 C ASN A 11 60.886 14.953 4.102 1.00 0.00 C\nATOM 67 O ASN A 11 61.222 14.136 3.244 1.00 0.00 O\nATOM 68 CB ASN A 11 59.609 13.379 5.562 1.00 0.00 C\nATOM 69 CG ASN A 11 58.532 13.236 6.620 1.00 0.00 C\nATOM 70 OD1 ASN A 11 58.296 14.149 7.410 1.00 0.00 O\nATOM 71 ND2 ASN A 11 57.872 12.083 6.640 1.00 0.00 N\nATOM 72 N ALA A 12 61.597 16.041 4.383 1.00 0.00 N\nATOM 73 CA ALA A 12 62.841 16.345 3.686 1.00 0.00 C\nATOM 74 C ALA A 12 64.025 16.328 4.646 1.00 0.00 C\nATOM 75 O ALA A 12 64.145 17.191 5.515 1.00 0.00 O\nATOM 76 CB ALA A 12 62.740 17.698 2.997 1.00 0.00 C\nATOM 77 N GLN A 13 64.899 15.340 4.481 1.00 0.00 N\nATOM 78 CA GLN A 13 66.076 15.209 5.332 1.00 0.00 C\nATOM 79 C GLN A 13 67.359 15.370 4.522 1.00 0.00 C\nATOM 80 O GLN A 13 67.695 14.521 3.697 1.00 0.00 O\nATOM 81 CB GLN A 13 66.071 13.849 6.037 1.00 0.00 C\nATOM 82 CG GLN A 13 67.212 13.651 7.023 1.00 0.00 C\nATOM 83 CD GLN A 13 67.140 12.317 7.739 1.00 0.00 C\nATOM 84 OE1 GLN A 13 66.251 11.506 7.477 1.00 0.00 O\nATOM 85 NE2 GLN A 13 68.078 12.082 8.650 1.00 0.00 N\nATOM 86 N ALA A 14 68.071 16.466 4.765 1.00 0.00 N\nATOM 87 CA ALA A 14 69.318 16.740 4.059 1.00 0.00 C\nATOM 88 C ALA A 14 70.500 16.757 5.022 1.00 0.00 C\nATOM 89 O ALA A 14 70.620 17.652 5.859 1.00 0.00 O\nATOM 90 CB ALA A 14 69.222 18.067 3.320 1.00 0.00 C\nATOM 91 N LEU A 15 71.372 15.761 4.897 1.00 0.00 N\nATOM 92 CA LEU A 15 72.547 15.660 5.755 1.00 0.00 C\nATOM 93 C LEU A 15 73.832 15.788 4.943 1.00 0.00 C\nATOM 94 O LEU A 15 74.168 14.907 4.151 1.00 0.00 O\nATOM 95 CB LEU A 15 72.541 14.325 6.508 1.00 0.00 C\nATOM 96 CG LEU A 15 71.415 14.114 7.526 1.00 0.00 C\nATOM 97 CD1 LEU A 15 71.462 12.699 8.081 1.00 0.00 C\nATOM 98 CD2 LEU A 15 71.487 15.136 8.654 1.00 0.00 C\nATOM 99 N ALA A 16 74.546 16.890 5.146 1.00 0.00 N\nATOM 100 CA ALA A 16 75.795 17.135 4.434 1.00 0.00 C\nATOM 101 C ALA A 16 76.975 17.185 5.399 1.00 0.00 C\nATOM 102 O ALA A 16 77.095 18.110 6.202 1.00 0.00 O\nATOM 103 CB ALA A 16 75.704 18.434 3.646 1.00 0.00 C\nATOM 104 N PHE A 17 77.845 16.184 5.313 1.00 0.00 N\nATOM 105 CA PHE A 17 79.018 16.111 6.177 1.00 0.00 C\nATOM 106 C PHE A 17 80.304 16.206 5.364 1.00 0.00 C\nATOM 107 O PHE A 17 80.640 15.296 4.606 1.00 0.00 O\nATOM 108 CB PHE A 17 79.005 14.807 6.980 1.00 0.00 C\nATOM 109 CG PHE A 17 77.889 14.721 7.981 1.00 0.00 C\nATOM 110 CD1 PHE A 17 77.992 15.351 9.210 1.00 0.00 C\nATOM 111 CD2 PHE A 17 76.735 14.009 7.693 1.00 0.00 C\nATOM 112 CE1 PHE A 17 76.966 15.272 10.133 1.00 0.00 C\nATOM 113 CE2 PHE A 17 75.706 13.927 8.612 1.00 0.00 C\nATOM 114 CZ PHE A 17 75.822 14.560 9.833 1.00 0.00 C\nATOM 115 N ALA A 18 81.021 17.314 5.528 1.00 0.00 N\nATOM 116 CA ALA A 18 82.272 17.529 4.810 1.00 0.00 C\nATOM 117 C ALA A 18 83.450 17.613 5.775 1.00 0.00 C\nATOM 118 O ALA A 18 83.570 18.567 6.543 1.00 0.00 O\nATOM 119 CB ALA A 18 82.186 18.797 3.973 1.00 0.00 C\nATOM 120 N TYR A 19 84.318 16.606 5.729 1.00 0.00 N\nATOM 121 CA TYR A 19 85.488 16.564 6.598 1.00 0.00 C\nATOM 122 C TYR A 19 86.777 16.625 5.785 1.00 0.00 C\nATOM 123 O TYR A 19 87.113 15.687 5.063 1.00 0.00 O\nATOM 124 CB TYR A 19 85.471 15.292 7.450 1.00 0.00 C\nATOM 125 CG TYR A 19 84.363 15.258 8.479 1.00 0.00 C\nATOM 126 CD1 TYR A 19 83.662 16.411 8.812 1.00 0.00 C\nATOM 127 CD2 TYR A 19 84.016 14.074 9.116 1.00 0.00 C\nATOM 128 CE1 TYR A 19 82.648 16.386 9.751 1.00 0.00 C\nATOM 129 CE2 TYR A 19 83.002 14.039 10.057 1.00 0.00 C\nATOM 130 CZ TYR A 19 82.323 15.197 10.369 1.00 0.00 C\nATOM 131 OH TYR A 19 81.313 15.166 11.305 1.00 0.00 O\nATOM 132 N ALA A 20 87.496 17.737 5.909 1.00 0.00 N\nATOM 133 CA ALA A 20 88.749 17.922 5.187 1.00 0.00 C\nATOM 134 C ALA A 20 89.925 18.039 6.151 1.00 0.00 C\nATOM 135 O ALA A 20 90.046 19.021 6.883 1.00 0.00 O\nATOM 136 CB ALA A 20 88.668 19.159 4.303 1.00 0.00 C\nATOM 137 N VAL A 21 90.791 17.030 6.145 1.00 0.00 N\nATOM 138 CA VAL A 21 91.959 17.017 7.018 1.00 0.00 C\nATOM 139 C VAL A 21 93.250 17.045 6.207 1.00 0.00 C\nATOM 140 O VAL A 21 93.585 16.079 5.521 1.00 0.00 O\nATOM 141 CB VAL A 21 91.967 15.769 7.925 1.00 0.00 C\nATOM 142 CG1 VAL A 21 93.241 15.722 8.760 1.00 0.00 C\nATOM 143 CG2 VAL A 21 90.735 15.749 8.820 1.00 0.00 C\nATOM 144 N ALA A 22 93.971 18.159 6.291 1.00 0.00 N\nATOM 145 CA ALA A 22 95.226 18.315 5.565 1.00 0.00 C\nATOM 146 C ALA A 22 96.400 18.465 6.527 1.00 0.00 C\nATOM 147 O ALA A 22 96.521 19.473 7.222 1.00 0.00 O\nATOM 148 CB ALA A 22 95.150 19.517 4.636 1.00 0.00 C\nTER\nATOM 149 N ARG B 1 27.961 0.504 1.988 1.00 0.00 N\nATOM 150 CA ARG B 1 29.153 0.205 2.773 1.00 0.00 C\nATOM 151 C ARG B 1 30.420 0.562 2.003 1.00 0.00 C\nATOM 152 O ARG B 1 30.753 -0.077 1.005 1.00 0.00 O\nATOM 153 CB ARG B 1 29.180 -1.279 3.156 1.00 10.00 C\nATOM 154 CG ARG B 1 28.007 -1.729 4.022 1.00 10.00 C\nATOM 155 CD ARG B 1 28.018 -1.090 5.409 1.00 10.00 C\nATOM 156 NE ARG B 1 29.183 -1.492 6.201 1.00 10.00 N\nATOM 157 CZ ARG B 1 29.635 -0.879 7.298 1.00 10.00 C\nATOM 158 NH1 ARG B 1 30.708 -1.364 7.908 1.00 10.00 N\nATOM 159 NH2 ARG B 1 29.046 0.205 7.803 1.00 10.00 N\nATOM 160 N ALA B 2 31.123 1.587 2.474 1.00 0.00 N\nATOM 161 CA ALA B 2 32.355 2.031 1.832 1.00 0.00 C\nATOM 162 C ALA B 2 33.552 1.851 2.758 1.00 0.00 C\nATOM 163 O ALA B 2 33.675 2.539 3.772 1.00 0.00 O\nATOM 164 CB ALA B 2 32.231 3.487 1.408 1.00 0.00 C\nATOM 165 N HIS B 3 34.434 0.922 2.403 1.00 0.00 N\nATOM 166 CA HIS B 3 35.624 0.650 3.202 1.00 0.00 C\nATOM 167 C HIS B 3 36.893 0.975 2.422 1.00 0.00 C\nATOM 168 O HIS B 3 37.227 0.298 1.450 1.00 0.00 O\nATOM 169 CB HIS B 3 35.644 -0.817 3.640 1.00 0.00 C\nATOM 170 CG HIS B 3 34.518 -1.190 4.554 1.00 0.00 C\nATOM 171 ND1 HIS B 3 34.311 -0.928 5.866 1.00 0.00 C\nATOM 172 CD2 HIS B 3 33.431 -1.925 4.134 1.00 0.00 N\nATOM 173 CE1 HIS B 3 33.113 -1.504 6.211 1.00 0.00 N\nATOM 174 NE2 HIS B 3 32.603 -2.100 5.148 1.00 0.00 C\nATOM 175 N ALA B 4 37.598 2.015 2.856 1.00 0.00 N\nATOM 176 CA ALA B 4 38.832 2.432 2.201 1.00 0.00 C\nATOM 177 C ALA B 4 40.027 2.284 3.136 1.00 0.00 C\nATOM 178 O ALA B 4 40.148 3.009 4.124 1.00 0.00 O\nATOM 179 CB ALA B 4 38.712 3.872 1.723 1.00 0.00 C\nATOM 180 N ASP B 5 40.907 1.340 2.818 1.00 0.00 N\nATOM 181 CA ASP B 5 42.094 1.095 3.630 1.00 0.00 C\nATOM 182 C ASP B 5 43.367 1.388 2.841 1.00 0.00 C\nATOM 183 O ASP B 5 43.701 0.675 1.896 1.00 0.00 O\nATOM 184 CB ASP B 5 42.111 -0.354 4.122 1.00 0.00 C\nATOM 185 CG ASP B 5 43.246 -0.629 5.089 1.00 0.00 C\nATOM 186 OD1 ASP B 5 43.158 -0.186 6.253 1.00 0.00 O\nATOM 187 OD2 ASP B 5 44.227 -1.288 4.683 1.00 0.00 O\nATOM 188 N ALA B 6 44.072 2.443 3.238 1.00 0.00 N\nATOM 189 CA ALA B 6 45.309 2.832 2.570 1.00 0.00 C\nATOM 190 C ALA B 6 46.501 2.716 3.514 1.00 0.00 C\nATOM 191 O ALA B 6 46.622 3.477 4.474 1.00 0.00 O\nATOM 192 CB ALA B 6 45.194 4.253 2.039 1.00 0.00 C\nATOM 193 N GLU B 7 47.380 1.759 3.234 1.00 0.00 N\nATOM 194 CA GLU B 7 48.565 1.542 4.057 1.00 0.00 C\nATOM 195 C GLU B 7 49.840 1.802 3.261 1.00 0.00 C\nATOM 196 O GLU B 7 50.175 1.053 2.344 1.00 0.00 O\nATOM 197 CB GLU B 7 48.577 0.112 4.604 1.00 0.00 C\nATOM 198 CG GLU B 7 47.465 -0.180 5.599 1.00 0.00 C\nATOM 199 CD GLU B 7 47.495 -1.609 6.103 1.00 0.00 C\nATOM 200 OE1 GLU B 7 48.305 -2.409 5.584 1.00 0.00 O\nATOM 201 OE2 GLU B 7 46.711 -1.936 7.018 1.00 0.00 O\nATOM 202 N ALA B 8 50.547 2.869 3.620 1.00 0.00 N\nATOM 203 CA ALA B 8 51.786 3.230 2.941 1.00 0.00 C\nATOM 204 C ALA B 8 52.976 3.148 3.892 1.00 0.00 C\nATOM 205 O ALA B 8 53.096 3.944 4.823 1.00 0.00 O\nATOM 206 CB ALA B 8 51.676 4.631 2.356 1.00 0.00 C\nATOM 207 N ALA B 10 57.022 3.295 4.002 1.00 0.00 N\nATOM 208 CA ALA B 10 58.263 3.628 3.313 1.00 0.00 C\nATOM 209 C ALA B 10 59.451 3.578 4.269 1.00 0.00 C\nATOM 210 O ALA B 10 59.571 4.408 5.170 1.00 0.00 O\nATOM 211 CB ALA B 10 58.158 5.006 2.676 1.00 0.00 C\nATOM 212 N ASN B 11 60.327 2.599 4.065 1.00 0.00 N\nATOM 213 CA ASN B 11 61.506 2.439 4.908 1.00 0.00 C\nATOM 214 C ASN B 11 62.786 2.633 4.102 1.00 0.00 C\nATOM 215 O ASN B 11 63.122 1.816 3.244 1.00 0.00 O\nATOM 216 CB ASN B 11 61.509 1.059 5.562 1.00 0.00 C\nATOM 217 CG ASN B 11 60.432 0.916 6.620 1.00 0.00 C\nATOM 218 OD1 ASN B 11 60.252 1.957 7.425 1.00 0.00 N\nATOM 219 ND2 ASN B 11 59.769 -0.116 6.713 1.00 0.00 O\nATOM 220 N ALA B 12 63.497 3.721 4.383 1.00 0.00 N\nATOM 221 CA ALA B 12 64.741 4.025 3.686 1.00 0.00 C\nATOM 222 C ALA B 12 65.925 4.008 4.646 1.00 0.00 C\nATOM 223 O ALA B 12 66.045 4.871 5.515 1.00 0.00 O\nATOM 224 CB ALA B 12 64.640 5.378 2.997 1.00 0.00 C\nATOM 225 N GLN B 13 66.799 3.020 4.481 1.00 0.00 N\nATOM 226 CA GLN B 13 67.976 2.889 5.332 1.00 0.00 C\nATOM 227 C GLN B 13 69.259 3.050 4.522 1.00 0.00 C\nATOM 228 O GLN B 13 69.595 2.201 3.697 1.00 0.00 O\nATOM 229 CB GLN B 13 67.971 1.529 6.037 1.00 0.00 C\nATOM 230 CG GLN B 13 69.112 1.331 7.023 1.00 0.00 C\nATOM 231 CD GLN B 13 69.040 -0.003 7.739 1.00 0.00 C\nATOM 232 OE1 GLN B 13 68.046 -0.811 7.388 1.00 0.00 N\nATOM 233 NE2 GLN B 13 69.869 -0.305 8.598 1.00 0.00 O\nATOM 234 N ALA B 14 69.971 4.146 4.765 1.00 0.00 N\nATOM 235 CA ALA B 14 71.218 4.420 4.059 1.00 0.00 C\nATOM 236 C ALA B 14 72.400 4.437 5.022 1.00 0.00 C\nATOM 237 O ALA B 14 72.520 5.332 5.859 1.00 0.00 O\nATOM 238 CB ALA B 14 71.122 5.747 3.320 1.00 0.00 C\nATOM 239 N LEU B 15 73.272 3.441 4.897 1.00 0.00 N\nATOM 240 CA LEU B 15 74.447 3.340 5.755 1.00 0.00 C\nATOM 241 C LEU B 15 75.732 3.468 4.943 1.00 0.00 C\nATOM 242 O LEU B 15 76.068 2.587 4.151 1.00 0.00 O\nATOM 243 CB LEU B 15 74.441 2.005 6.508 1.00 0.00 C\nATOM 244 CG LEU B 15 73.315 1.794 7.526 1.00 0.00 C\nATOM 245 CD1 LEU B 15 72.426 0.619 7.136 1.00 0.00 C\nATOM 246 CD2 LEU B 15 72.491 3.063 7.674 1.00 0.00 C\nATOM 247 N ALA B 16 76.446 4.570 5.146 1.00 0.00 N\nATOM 248 CA ALA B 16 77.695 4.815 4.434 1.00 0.00 C\nATOM 249 C ALA B 16 78.875 4.865 5.399 1.00 0.00 C\nATOM 250 O ALA B 16 78.995 5.790 6.202 1.00 0.00 O\nATOM 251 CB ALA B 16 77.604 6.114 3.646 1.00 0.00 C\nATOM 252 N PHE B 17 79.745 3.864 5.313 1.00 0.00 N\nATOM 253 CA PHE B 17 80.918 3.791 6.177 1.00 0.00 C\nATOM 254 C PHE B 17 82.204 3.886 5.364 1.00 0.00 C\nATOM 255 O PHE B 17 82.540 2.976 4.606 1.00 0.00 O\nATOM 256 CB PHE B 17 80.905 2.487 6.980 1.00 0.00 C\nATOM 257 CG PHE B 17 79.789 2.401 7.981 1.00 0.00 C\nATOM 258 CD1 PHE B 17 79.893 3.032 9.211 1.00 0.00 C\nATOM 259 CD2 PHE B 17 78.636 1.690 7.694 1.00 0.00 C\nATOM 260 CE1 PHE B 17 78.868 2.956 10.134 1.00 0.00 C\nATOM 261 CE2 PHE B 17 77.607 1.611 8.614 1.00 0.00 C\nATOM 262 CZ PHE B 17 77.724 2.244 9.835 1.00 0.00 C\nATOM 263 N ALA B 18 82.921 4.994 5.528 1.00 0.00 N\nATOM 264 CA ALA B 18 84.172 5.209 4.810 1.00 0.00 C\nATOM 265 C ALA B 18 85.350 5.293 5.775 1.00 0.00 C\nATOM 266 O ALA B 18 85.470 6.247 6.543 1.00 0.00 O\nATOM 267 CB ALA B 18 84.086 6.477 3.973 1.00 0.00 C\nATOM 268 N TYR B 19 86.218 4.286 5.729 1.00 0.00 N\nATOM 269 CA TYR B 19 87.388 4.244 6.598 1.00 0.00 C\nATOM 270 C TYR B 19 88.677 4.305 5.785 1.00 0.00 C\nATOM 271 O TYR B 19 89.013 3.367 5.063 1.00 0.00 O\nATOM 272 CB TYR B 19 87.371 2.972 7.450 1.00 0.00 C\nATOM 273 CG TYR B 19 86.263 2.938 8.479 1.00 0.00 C\nATOM 274 CD1 TYR B 19 85.564 4.090 8.814 1.00 0.00 C\nATOM 275 CD2 TYR B 19 85.918 1.753 9.118 1.00 0.00 C\nATOM 276 CE1 TYR B 19 84.550 4.063 9.756 1.00 0.00 C\nATOM 277 CE2 TYR B 19 84.907 1.716 10.059 1.00 0.00 C\nATOM 278 CZ TYR B 19 84.228 2.874 10.374 1.00 0.00 C\nATOM 279 OH TYR B 19 83.220 2.843 11.312 1.00 0.00 O\nATOM 280 N ALA B 20 89.396 5.417 5.909 1.00 0.00 N\nATOM 281 CA ALA B 20 90.649 5.602 5.187 1.00 0.00 C\nATOM 282 C ALA B 20 91.825 5.719 6.151 1.00 0.00 C\nATOM 283 O ALA B 20 91.946 6.701 6.883 1.00 0.00 O\nATOM 284 CB ALA B 20 90.568 6.839 4.303 1.00 0.00 C\nATOM 285 N VAL B 21 92.691 4.710 6.145 1.00 0.00 N\nATOM 286 CA VAL B 21 93.859 4.697 7.018 1.00 0.00 C\nATOM 287 C VAL B 21 95.150 4.725 6.207 1.00 0.00 C\nATOM 288 O VAL B 21 95.485 3.759 5.521 1.00 0.00 O\nATOM 289 CB VAL B 21 93.867 3.449 7.925 1.00 0.00 C\nATOM 290 CG1 VAL B 21 95.105 2.602 7.660 1.00 0.00 C\nATOM 291 CG2 VAL B 21 92.599 2.630 7.720 1.00 0.00 C\nATOM 292 N ALA B 22 95.871 5.839 6.291 1.00 0.00 N\nATOM 293 CA ALA B 22 97.126 5.995 5.565 1.00 0.00 C\nATOM 294 C ALA B 22 98.300 6.145 6.527 1.00 0.00 C\nATOM 295 O ALA B 22 98.421 7.153 7.222 1.00 0.00 O\nATOM 296 CB ALA B 22 97.050 7.197 4.636 1.00 0.00 C\nTER\n \"\"\"\n h = iotbx.pdb.input(source_info=None, lines=test_pdb_5).construct_hierarchy()\n anwer_h = iotbx.pdb.input(source_info=None,\n lines=answer_pdb_str).construct_hierarchy()\n h.write_pdb_file(\"test_8_before.pdb\")\n\n ncs_inp = iotbx.ncs.input(\n hierarchy=h,\n params=ncs_pars.ncs_search)\n ncs_groups = ncs_inp.get_ncs_restraints_group_list()\n\n nu.flip_atoms_in_ncs_groups(h, ncs_groups)\n h.write_pdb_file(\"test_8_result.pdb\")\n rmsd_smart = calculate_rmsd_smart(anwer_h, h)\n print(rmsd_smart)\n assert rmsd_smart < 0.01",
"def davidson_guess(mult_by_A,N,neig,Adiag=None):\n Mmax = min(N,2000)\n tol = 1e-6\n\n #Adiagcheck = np.zeros(N,np.complex128)\n #for i in range(N):\n # test = np.zeros(N,np.complex128)\n # test[i] = 1.0\n # Adiagcheck[i] = mult_by_A(test)[i]\n #print \"Analytical Adiag == numerical Adiag?\", np.allclose(Adiag,Adiagcheck)\n\n if Adiag is None:\n Adiag = np.zeros(N,np.complex128)\n for i in range(N):\n test = np.zeros(N,np.complex128)\n test[i] = 1.0\n Adiag[i] = mult_by_A(test)[i]\n\n xi = np.zeros(N,np.complex128)\n\n evals = np.zeros(neig,np.complex128)\n evecs = np.zeros((N,neig),np.complex128)\n\n Mtot = 0\n for guess in range(neig):\n print(\"Working on guess =\", guess+1, \"/\", neig)\n for M in range(1,Mmax+1):\n if M == 1:\n # Unit vector 'target' as the guess\n b = np.zeros((N,1))\n b[guess,0] = 1.0\n Ab = np.zeros((N,1),np.complex128)\n Ab[:,0] = mult_by_A(b[:,0])\n else:\n Ab = np.column_stack( (Ab,mult_by_A(b[:,M-1])) )\n\n Atilde = np.dot(b.conj().transpose(),Ab)\n lamda, alpha = diagonalize_asymm(Atilde)\n\n overlap_guess_j_max = -99\n target = 0\n for j, overlap_guess_j in enumerate(alpha[0,:]):\n if overlap_guess_j > overlap_guess_j_max:\n overlap_guess_j_max = overlap_guess_j\n target = j\n\n lamda_k = lamda[target]\n alpha_k = alpha[:,target]\n\n if M == Mmax:\n print(\" -- M reached Mmax\")\n break\n\n q = np.dot( Ab-lamda_k*b, alpha_k )\n if np.linalg.norm(q) < tol:\n evals[guess] = lamda_k\n evecs[:,guess] = np.dot(b,alpha_k)\n Mtot += M\n print(\" -- Converged in\", M, \"iterations\")\n break\n\n for i in range(N):\n eps = 0.\n if np.allclose(lamda_k,Adiag[i]):\n eps = 1e-8\n xi[i] = q[i]/(lamda_k-Adiag[i]+eps)\n\n # orthonormalize xi wrt b\n bxi,R = np.linalg.qr(np.column_stack((b,xi)))\n # append orthonormalized xi to b\n b = np.column_stack((b,bxi[:,-1]))\n\n if M > 1 and M == Mmax:\n print(\"WARNING: Davidson algorithm reached max basis size \"\n \"M = %d without converging.\"%(M))\n\n return evals, evecs, Mtot",
"def V_lennard_jones(atoms):\n \n Vw = 0 # this is the variable we will store the sum of all the energies in\n N = len(atoms)\n for i in range(N):\n for j in range(i+1, N):\n r = norm(atoms.coords[i] - atoms.coords[j]) # distance from atom i to atom j\n \n Vw += DW*((R0/r)**12 -2*(R0/r)**6) # the Lennard-Jones interaction!\n \n return Vw",
"def test_RelaxedNeighborhood(self):\n self.setup()\n self.graph = 'relaxed beta skeleton'\n graph_rep = nglpy.Graph(self.points, self.graph, self.max_neighbors,\n self.beta)\n expected_graph = {0: (1, 3), 1: (0, 2, 4), 2: (1, 3), 3: (0, 2),\n 4: (1,)}\n\n for i in range(len(self.points)):\n expected = list(expected_graph[i])\n actual = sorted(graph_rep.neighbors(i))\n msg = '\\nNode {} Connectivity:'.format(i)\n msg += '\\n\\texpected: {}\\n\\tactual: {} '.format(expected, actual)\n self.assertEqual(expected, actual, msg)\n\n self.assertEqual(graph_rep.neighbors(), expected_graph)",
"def _compute_bare_spectrum_constant(self) -> List[Tuple[ndarray, ndarray]]:\n eigendata = []\n for subsys in self._hilbertspace:\n if subsys not in self.subsys_update_list:\n evals_count = subsys.truncated_dim\n eigendata.append(subsys.eigensys(evals_count=evals_count))\n else:\n eigendata.append(None) # type: ignore\n return eigendata",
"def get_framework_neighbours(atom, useH=True):\n neighbourlist = []\n for atom2 in atom.partner[:5]:\n #if not 'H(' in atom2.name and np.linalg.norm(atom.cart-atom2.cart)<=1.6:\n if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius[atom.element]) + float(\n covalence_radius[atom2.element]) + .1:\n if not 'H' == atom2.element or useH:\n neighbourlist.append(atom2)\n return neighbourlist",
"def test_eigen_multiple_neighborhoods(self):\n # vectorized version\n t0 = time.time()\n extract_vect = EigenValueVectorizeFeatureExtractor()\n eigvals_vect = extract_vect.extract(self.point_cloud, self.neigh, None, None, None)\n print('Timing Vectorize : {}'.format((time.time() - t0)))\n eigvals_vect = np.vstack(eigvals_vect[:3]).T\n\n # serial version\n eigvals = []\n t0 = time.time()\n for n in self.neigh:\n extract = EigenValueSerial()\n eigvals.append(extract.extract(self.point_cloud, n, None, None, None))\n print('Timing Serial : {}'.format((time.time() - t0)))\n eigvals = np.array(eigvals)\n\n np.testing.assert_allclose(eigvals_vect, eigvals)",
"def getBondVectors(struct,tol,prec): \n \n \n binary_matrix= getDistMat(struct,tol)\n bond_dir = {}\n distance_matrix = struct.distance_matrix\n lattice = np.array(struct.lattice.as_dict()['matrix'])\n iterations = list(itertools.product([1,0,-1],repeat=3))\n # Loop over list of atoms\n for i in range(len(binary_matrix)):\n for j in range(i+1,len(binary_matrix)):\n # Proceed if the entries are listed as \"bonded\" \n if binary_matrix[i][j]==1: \n s1 = struct.species[i]\n s2 = struct.species[j]\n # Organize dictionary so it is always in order of increasing\n # atomic number\n if s1.number>s2.number:\n s1 = struct.species[j]\n s2 = struct.species[i] \n if s1 not in bond_dir:\n bond_dir[s1]={}\n if s2 not in bond_dir[s1]:\n bond_dir[s1][s2]=[]\n valid_vs = []\n \n # Get the vector between atomic positions\n \n bond_vector = np.array(struct.sites[j].coords-\n struct.sites[i].coords) \n \n # The positions of the atoms may not be in the right locations\n # to be the minimum distance from each other. As a result,\n # a translation is applied to the resulting \"bond vector\" \n # (alternatively, one of the atoms is translated)\n for shift in iterations:\n bondShift = bond_vector + np.dot(lattice.T,shift)\n if abs(distance_matrix[i][j]-magni(bondShift))<=prec:\n valid_vs.append(bondShift)\n break\n # See if the vector is already present in the collection of \n # vectors. If so, add the coordinates to the entry. Else,\n # create a new entry for the direction of the bond.\n for v in valid_vs:\n if np.any([magni(v-x[0])<=prec for x in bond_dir[s1][s2]]):\n for k in range(len(bond_dir[s1][s2])):\n if magni(v-bond_dir[s1][s2][k][0])<=prec:\n bond_dir[s1][s2][k][1].append([i,j])\n break\n \n else:\n bond_dir[s1][s2].append([v,[[i,j]]])\n return(bond_dir)",
"def create_preference_weighted_epsilon_neighbourhood(self):\n \n A = self.pref_weighted_similarity_measures # distances matrix\n A[A>self.epsilon] = 0 # set distances greater than epsilon to 0\n A.eliminate_zeros() # then remove these entries from matrix\n # For each entry in data get neighbor indices with preference weighted distance less than epsilon\n weighted_eps_neighbh = np.split(A.indices, A.indptr)[1:-1] \n\n return weighted_eps_neighbh",
"def epsilonEffectiveVectorialOtherOneNormalWall(ListOfEpsilons):\n epsilon2= 0.9\n ListOFEffectiveEpsilons= []\n for anyEpsilon in ListOfEpsilons:\n result=1/(1/anyEpsilon+1/epsilon2-1)\n ListOFEffectiveEpsilons.append(result)\n return ListOFEffectiveEpsilons",
"def get_closest_neighbours(atomlist, neighbours=2):\n print('atomlist', atomlist)\n neighbourlist = []\n for atom in atomlist:\n listline = [atom[0][0]]\n dists = []\n distsc = []\n for partner in atomlist:\n dists.append(np.linalg.norm(atom[1] - partner[1]))\n distsc.append(np.linalg.norm(atom[1] - partner[1]))\n dists.remove(min(dists))\n for _ in range(neighbours):\n if min(dists) < 2.5:\n listline.append(atomlist[distsc.index(min(dists))][0][0])\n dists.remove(min(dists))\n #listline.append(atomlist[distsc.index(min(dists))][0][0])\n neighbourlist.append(listline)\n return neighbourlist",
"def create_epsilon_neighbourhoods(self):\n self.neigbors_clf = NearestNeighbors(radius=self.epsilon, algorithm='ball_tree')\n self.neigbors_clf.fit(self.data)\n _, neigh_idx = self.neigbors_clf.radius_neighbors(self.data)\n return neigh_idx",
"def check_relevant_forms(L):\n L2 = list()\n for F in L.values():\n # needed_ev=(\n S = F.shim_corr\n # print \"S=\",S\n ok_ev = 0\n for g in S:\n if g.atkin_lehner_eigenvalue() == -1:\n ok_ev = ok_ev + 1\n if ok_ev > 0:\n print(\"Number of ok forms on \", F.space.WR.N, \" :\", ok_ev)\n F.list_coefficents('D', fd=True, neg=False, nmin=0, nmax=1000, latex=False, nd=50, prime=True)\n L2.append(F)\n return L2",
"def get_contour(atom_list):\n initial = [atom for atom in atom_list if ((0 < len(identify_bonds(atom, atom_list)) < 3) and (check_connected(atom, identify_bonds(atom, atom_list)) == False))]\n \n extra_1 = []\n for atom in atom_list:\n neighbours = [bond[0] for bond in identify_bonds(atom, atom_list)]\n for i in neighbours:\n neighbours2 = [bond[0] for bond in identify_bonds(i, atom_list)]\n for j in neighbours2:\n if j in initial:\n extra_1.append(atom)\n\n extra_2 = []\n for atom in atom_list:\n neighbours = [bond[0] for bond in identify_bonds(atom, atom_list)]\n check = 0\n for i in neighbours:\n if i in initial:\n check += 1\n if ((check == 2) and (atom not in initial)):\n extra_2.append(atom) \n return (initial + extra_1 + extra_2)",
"def potentialSolver3(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged",
"def potentialSolver3(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged",
"def test_difficulties_eps_multi(self):\n well_w = self.get_w_well_behaviour()\n\n def get_beamformer(A, B):\n return get_mvdr_vector_souden(\n A, B,\n return_ref_channel=True\n )\n\n for args in [\n (\n [self.PhiXX * 0, self.PhiXX],\n [self.PhiNN, self.PhiNN],\n ),\n (\n [self.PhiXX, self.PhiXX],\n [self.PhiNN * 0, self.PhiNN],\n ),\n (\n [self.PhiXX * 0, self.PhiXX],\n [self.PhiNN * 0, self.PhiNN],\n ),\n ]:\n w, ref_channel = get_beamformer(*args)\n assert ref_channel == 2, ref_channel\n np.testing.assert_allclose(\n w,\n np.array([[0., 0., 0.], well_w])\n )\n\n for args in [\n (\n [self.PhiXX * np.inf, self.PhiXX],\n [self.PhiNN, self.PhiNN],\n ),\n (\n [self.PhiXX, self.PhiXX],\n [self.PhiNN * np.inf, self.PhiNN],\n ),\n (\n [self.PhiXX * np.inf, self.PhiXX],\n [self.PhiNN * np.inf, self.PhiNN],\n ),\n ]:\n with tc.assert_raises(AssertionError):\n get_beamformer(*args)",
"def peridym_get_neighbor_data(mesh, horizon, omega_fun, struct_grd=False):\n\n if(struct_grd):\n cell_cent = structured_cell_centroids(mesh)\n cell_vol = structured_cell_volumes(mesh)\n else:\n cell_cent = get_cell_centroids(mesh)\n cell_vol = get_cell_volumes(mesh)\n\n tree = QuadTree()\n extents = get_domain_bounding_box(mesh)\n tree.put(extents, horizon)\n linear_tree = tree.get_linear_tree()\n nbr_lst, nbr_beta_lst = tree_nbr_search(tree.get_linear_tree(), cell_cent, horizon)\n\n start = tm.default_timer()\n print(\"computing the remaining peridynamic neighbor data for the mesh with horizon: %4.2f\" %horizon)\n\n mw = peridym_compute_weighted_volume(mesh, nbr_lst, nbr_beta_lst, horizon, omega_fun, struct_grd)\n\n nbr_bnd_vector_lst = []\n nbr_bnd_len_lst = []\n nbr_infl_fld_lst = []\n\n for i in range(len(cell_cent)):\n curr_node_coord = cell_cent[i]\n \n #declare empty lists for current node neighbor\n #attributes like neighbor bond vector, bond len,\n #and influence field \n #refer ch5 algo1 of handbook of peridynamic modelling\n #by silling etal \n\n curr_nbr_lst = nbr_lst[i] \n curr_nbr_bnd_vct = cell_cent[curr_nbr_lst] - curr_node_coord\n curr_nbr_bnd_len = la.norm(curr_nbr_bnd_vct, 2, axis=1)\n\n nbr_bnd_vector_lst.append(curr_nbr_bnd_vct)\n nbr_bnd_len_lst.append(curr_nbr_bnd_len)\n\n print(\"time taken for computation of remaining neighbor data for the given mesh is %4.3f seconds\"%(tm.default_timer()-start))\n \n return nbr_lst, nbr_beta_lst, nbr_bnd_vector_lst, nbr_bnd_len_lst, mw",
"def get_influence_atoms(atomlist):\n enviromentlist = []\n trunclist = []\n neighbourlist = get_closest_neighbours(atomlist, 4)\n for neighbours in neighbourlist:\n if neighbours[0][0] == \"H\":\n neighbours = neighbours[:2]\n if neighbours[0][0] == \"O\":\n neighbours = neighbours[:3]\n trunclist.append(neighbours)\n for atom in trunclist:\n newatom = []\n for atom1partner in atom[1:]:\n for partner in trunclist:\n if partner[0] == atom1partner:\n counter = 0\n\n for atomi in partner:\n if atomi[0] == 'H':\n counter += 1\n\n if counter < 2 or (partner[0] in atom and atom[0][0] == 'H'):\n newatom += atom + partner[1:]\n\n newatom = make_list_unique(newatom)\n newatom.sort()\n enviromentlist.append(newatom)\n return enviromentlist",
"def assemble_Poisson_6th_order_FD_solver_matrices(Nx, BC):\n\n Poisson_6th_order_FD_solver_matrices = {}\n\n # Nx is the number of active nodes in configuration\n if BC['phi']['x']['type'] == 'PBC':\n\n # assemble D, a matrix of difference coefficients on phi\n D = np.zeros([Nx,Nx])\n for i in range(Nx):\n if i == 0: # first row\n D[i,i] = -2\n D[i,i+1] = 1\n D[i,-1] = 1\n\n elif i == Nx - 1: # last row\n D[i,i] = -2\n D[i,i-1] = 1\n D[i,0] = 1\n else: # interior rows\n D[i,i-1] = 1\n D[i,i] = -2\n D[i,i+1] = 1\n\n # assemble B, a matrix of difference coefficients on the total density\n B = np.zeros([Nx, Nx])\n for i in range(Nx):\n if i == 0: # first row\n B[i,-2] = -1/240.\n B[i,-1] = 1/10.\n B[i,i] = 97/120.\n B[i,i+1] = 1/10.\n B[i,i+2] = -1/240.\n\n elif i == 1: # second row\n B[i,-1] = -1/240.\n B[i,i-1] = 1/10.\n B[i,i] = 97/120.\n B[i,i+1] = 1/10.\n B[i,i+2] = -1/240.\n\n elif 1 < i < (Nx - 2): # 2 <= row <= third before last\n B[i,i-2] = -1/240.\n B[i,i-1] = 1/10.\n B[i,i] = 97/120.\n B[i,i+1] = 1/10.\n B[i,i+2] = -1/240.\n\n elif i == (Nx - 2): # second before last row\n B[i,i-2] = -1/240.\n B[i,i-1] = 1/10.\n B[i,i] = 97/120.\n B[i,i+1] = 1/10.\n B[i,0] = -1/240.\n\n elif i == (Nx - 1): # last row\n B[i,i-2] = -1/240.\n B[i,i-1] = 1/10.\n B[i,i] = 97/120.\n B[i,0] = 1/10.\n B[i,1] = -1/240.\n\n\n elif BC['phi']['x']['type'] == 'LDBC_UDBC':\n\n # assemble D, a matrix of difference coefficients on phi\n D = np.zeros([Nx,Nx])\n for i in range(Nx):\n if i == 0 or i == Nx - 1: # last row\n D[i,i] = 1\n else: # interior rows\n D[i,i-1] = 1\n D[i,i] = -2\n D[i,i+1] = 1\n\n\n # assemble B, a matrix of difference coefficients on the total density\n B = np.zeros([Nx, Nx])\n for i in range(Nx):\n\n # redundant, included for transparency\n if i == 0 or i == Nx - 1:\n B[i,i] = 0\n\n elif i == 1:\n B[i,i-1] = 3/40.\n B[i,i] = 209/240.\n B[i,i+1] = 1/60.\n B[i,i+2] = 7/120.\n B[i,i+3] = -1/40.\n B[i,i+4] = 1/240.\n\n elif i == Nx-1:\n B[i,i] = 0\n\n elif 1 < i < Nx-2:\n B[i,i-2] = -1/240.\n B[i,i-1] = 1/10.\n B[i,i] = 97/120.\n B[i,i+1] = 1/10.\n B[i,i+2] = -1/240.\n\n elif i == Nx-2:\n B[i,i-4] = 1/240.\n B[i,i-3] = -1/40.\n B[i,i-2] = 7/120.\n B[i,i-1] = 1/60.\n B[i,i] = 209/240.\n B[i,i+1] = 3/40.\n\n elif BC['phi']['x']['type'] == 'LNBC_UDBC':\n\n # assemble D, a matrix of difference coefficients on phi\n D = np.zeros((Nx,Nx))\n\n # LNBC row\n D[0,0] = -97/10.\n D[0,1] = 16.\n D[0,2] = -10\n D[0,3] = 5.\n D[0,4] = -3/2.\n D[0,5] = 1/5.\n\n # UDBC row\n D[-1,-1] = 1.\n\n # Poisson's equation rows\n for i in range(1,Nx-1):\n D[i,i-1] = 1\n D[i,i] = -2\n D[i,i+1] = 1\n\n # assemble B, a matrix of difference coefficients on the total density\n B = np.zeros((Nx,Nx))\n for i in range(B.shape[0]):\n if i == 0:\n B[i,i] = 317 / 240.\n B[i,i+1] = -133/120.\n B[i,i+2] = 187 / 120.\n B[i,i+3] = -23 / 20.\n B[i,i+4] = 109 / 240.\n B[i,i+5] = -3/40.\n\n elif i == 1:\n\n B[i, i-1] = 3 / 40.\n B[i, i] = 209 / 240.\n B[i,i+1] = 1 / 60.\n B[i,i+2] = 7 / 120.\n B[i,i+3] = -1 / 40.\n B[i,i+4] = 1 / 240.\n\n elif 2 <= i <= Nx-3:\n\n B[i,i-2] = -1/240.\n B[i,i-1] = 1/10.\n B[i,i] = 97/120.\n B[i,i+1] = 1/10.\n B[i,i+2] = -1/240.\n\n elif i == Nx-2:\n\n B[i,i+1] = 3 / 40.\n B[i,i] = 209 / 240.\n B[i,i-1] = 1 / 60.\n B[i,i-2] = 7 / 120.\n B[i,i-3] = -1 / 40.\n B[i,i-4] = 1 / 240.\n\n # else i == Nx-1: row of zeros\n\n elif BC['phi']['x']['type'] == 'LDBC_UNBC':\n\n # assemble D, a matrix of difference coefficients on phi\n D = np.zeros((Nx,Nx))\n\n # UDBC row\n D[0,0] = 1.\n\n # LNBC row\n D[-1,-1] = -97/10.\n D[-1,-2] = 16.\n D[-1,-3] = -10\n D[-1,-4] = 5.\n D[-1,-5] = -3/2.\n D[-1,-6] = 1/5.\n\n # Poisson's equation rows\n for i in range(1,Nx-1):\n D[i,i-1] = 1\n D[i,i] = -2\n D[i,i+1] = 1\n\n\n # assemble B, a matrix of difference coefficients on the total density\n B = np.zeros((Nx,Nx))\n for i in range(B.shape[0]):\n # i == 0 row contains all zeros\n\n if i == 1:\n\n B[i, i-1] = 3 / 40.\n B[i, i] = 209 / 240.\n B[i,i+1] = 1 / 60.\n B[i,i+2] = 7 / 120.\n B[i,i+3] = -1 / 40.\n B[i,i+4] = 1 / 240.\n\n elif 2 <= i <= Nx-3:\n\n B[i,i-2] = -1/240.\n B[i,i-1] = 1/10.\n B[i,i] = 97/120.\n B[i,i+1] = 1/10.\n B[i,i+2] = -1/240.\n\n elif i == Nx-2:\n\n B[i,i+1] = 3 / 40.\n B[i,i] = 209 / 240.\n B[i,i-1] = 1 / 60.\n B[i,i-2] = 7 / 120.\n B[i,i-3] = -1 / 40.\n B[i,i-4] = 1 / 240.\n\n if i == Nx-1:\n B[i,i-5] = -3/40.\n B[i,i-4] = 109 / 240.\n B[i,i-3] = -23 / 20.\n B[i,i-2] = 187 / 120.\n B[i,i-1] = -133/120.\n B[i,i] = 317 / 240.\n\n elif BC['phi']['x']['type'] == 'LDBC_LNBC':\n\n # assemble D, a matrix of difference coefficients on phi\n D = np.zeros((Nx,Nx))\n\n # LDBC row, (row 0)\n D[0,0] = 1.\n\n # LNBC row, (row 1)\n D[1,0] = -97/10.\n D[1,1] = 16.\n D[1,2] = -10\n D[1,3] = 5.\n D[1,4] = -3/2.\n D[1,5] = 1/5.\n\n # Poisson's equation rows\n for i in range(2,Nx):\n D[i,i-2] = 1\n D[i,i-1] = -2\n D[i,i] = 1\n\n # assemble B, a matrix of difference coefficients on the total density\n B = np.zeros((Nx,Nx))\n for i in range(1,B.shape[0]):\n # if i == 0: row of zeros, density is not involved (corresponds to DBC)\n\n if i == 1:\n B[i,i-1] = 317 / 240.\n B[i,i] = -133/120.\n B[i,i+1] = 187 / 120.\n B[i,i+2] = -23 / 20.\n B[i,i+3] = 109 / 240.\n B[i,i+4] = -3/40.\n\n if i == 2:\n B[i, i-2] = 3 / 40.\n B[i, i-1] = 209 / 240.\n B[i,i] = 1 / 60.\n B[i,i+1] = 7 / 120.\n B[i,i+2] = -1 / 40.\n B[i,i+3] = 1 / 240.\n\n elif 3 <= i <= Nx-2:\n B[i,i-3] = -1/240.\n B[i,i-2] = 1/10.\n B[i,i-1] = 97/120.\n B[i,i] = 1/10.\n B[i,i+1] = -1/240.\n\n elif i == Nx-1:\n B[i,i-5] = 1/240.\n B[i,i-4] = -1/40.\n B[i,i-3] = 7/120.\n B[i,i-2] = 1/60.\n B[i,i-1] = 209/240.\n B[i,i] = 3/40.\n\n elif BC['phi']['x']['type'] == 'UDBC_UNBC':\n\n # assemble D, a matrix of difference coefficients on phi\n D = np.zeros((Nx,Nx))\n\n # LDBC row, (row Nx-1)\n D[-1,-1] = 1.\n\n # LNBC row, (row Nx-2)\n D[-2,-1] = -97/10.\n D[-2,-2] = 16.\n D[-2,-3] = -10\n D[-2,-4] = 5.\n D[-2,-5] = -3/2.\n D[-2,-6] = 1/5.\n\n # Poisson's equation rows\n for i in range(Nx-2):\n D[i,i] = 1\n D[i,i+1] = -2\n D[i,i+2] = 1\n\n\n # assemble B, a matrix of difference coefficients on the total density\n B = np.zeros((Nx,Nx))\n for i in range(B.shape[0]):\n if i == 0:\n B[i,i] = 3/40.\n B[i,i+1] = 209/240.\n B[i,i+2] = 1/60.\n B[i,i+3] = 7/120.\n B[i,i+4] = -1/40.\n B[i,i+5] = 1/240.\n\n if 1 <= i < Nx-3:\n B[i,i-1] = -1/240.\n B[i,i] = 1/10.\n B[i,i+1] = 97/120.\n B[i,i+2] = 1/10.\n B[i,i+3] = -1/240.\n\n elif i == Nx-3:\n B[i,i-3] = 1/240.\n B[i,i-2] = -1/40.\n B[i,i-1] = 7/120.\n B[i,i] = 1/60.\n B[i,i+1] = 209/240.\n B[i,i+2] = 3/40.\n\n elif i == Nx-2:\n B[i,i+1] = 317 / 240.\n B[i,i] = -133/120.\n B[i,i-1] = 187 / 120.\n B[i,i-2] = -23 / 20.\n B[i,i-3] = 109 / 240.\n B[i,i-4] = -3/40.\n\n # else i == Nx - 1: row of zeros, density is not involved (corresponds to DBC)\n\n Poisson_6th_order_FD_solver_matrices['D'] = D\n Poisson_6th_order_FD_solver_matrices['B'] = B\n\n return Poisson_6th_order_FD_solver_matrices",
"def hr_egn(A, B, R, x0):\n # A - Adjacency matrix, np.ndarray (N,N)\n # B - A 2D or 3D matrix with all payoff matrices, np.ndarray (S,S,N)\n # R - Relationship or preference matrix, np.ndarray (N,N)\n # x0 - Initial state of our system, np.ndarray (N,S), must be double\n\n # Number of players\n N = A[:, 0].size\n # Number of strategies\n S = x0[0, :].size\n # Degree and degree of preferences\n d = np.zeros([N, 2])\n d[:, 0] = np.dot(A, np.ones(N))\n\n for v in range(N):\n d[v, 1] = np.dot(np.ceil(np.abs(R[v, :])), A[v, :])\n\n # Player v neighborhood\n k = np.zeros([N, S], dtype='double')\n for v in range(N):\n for u in range(N):\n k[v, :] = np.add(k[v, :], np.multiply(A[v, u], x0[u, :]))\n # Weights the neighborhood\n k[v, :] = np.multiply(np.divide(1, d[v, 0]), k[v, :])\n\n # This variable is the increments that x0 receives, the derivative\n x = np.zeros([N, S], dtype='double')\n # This is the unit vector with 1 in some entry\n es = np.zeros(S, dtype='int')\n\n # Phi and gamma\n p = 0\n g = 0\n\n # Auxiliary variables for better clarity\n aux1 = 0\n aux2 = 0\n\n # Here is the derivative calculation\n # We first test if all payoffs are the same so we do less comparisons\n if B.ndim == 2:\n for v in range(N):\n for s in range(S):\n # Set es value\n es[s] = 1\n for u in range(N):\n if v == u:\n # Same payoff personal equation\n # First we will do the dot products\n # e_s*B*k_v\n aux1 = np.dot(es, np.dot(B, k[v, :]))\n # x_v*B*k_v\n aux2 = np.dot(x0[v, :], np.dot(B, k[v, :]))\n # Finally we subtract them to multiply by r_vv\n p = np.multiply(R[v, u], np.subtract(aux1, aux2))\n elif A[v, u] != 0:\n # Same payoff social equation\n # x_u*B*e_s\n aux1 = np.dot(x0[u, :], np.dot(B, es))\n # x_u*B*x_v\n aux2 = np.dot(x0[u, :], np.dot(B, x0[v, :]))\n # Subtract then multiply\n aux1 = np.subtract(aux1, aux2)\n aux2 = np.multiply(R[v, u], A[v, u])\n g = np.add(g, np.multiply(aux2, aux1))\n # Weights the social part\n if d[v, 1] != 0:\n g = np.multiply(np.divide(1, d[v, 1]), g)\n # Estimates the derivative\n x[v, s] = np.multiply(x0[v, s], np.add(p, g))\n # Prepare variables to next iteration\n p = 0\n g = 0\n es[s] = 0\n else:\n for v in range(N):\n for s in range(S):\n # Same thing as before, but now with individual payoffs\n es[s] = 1\n for u in range(N):\n if v == u:\n # Individual payoffs personal equation\n # e_s*B_v*k_v\n aux1 = np.dot(es, np.dot(B[:, :, v], k[v, :]))\n # x_u*B_v*k_v\n aux2 = np.dot(x0[v, :], np.dot(B[:, :, v], k[v, :]))\n p = np.multiply(R[v, u], np.subtract(aux1, aux2))\n elif A[v, u] != 0:\n # Individual payoffs social equation\n # x_u*B_u*e_s\n aux1 = np.dot(x0[u, :], np.dot(B[:, :, u], es))\n # x_u*B_u*x_v\n aux2 = np.dot(x0[u, :], np.dot(B[:, :, u], x0[v, :]))\n # Subtract then multiply\n aux1 = np.subtract(aux1, aux2)\n aux2 = np.multiply(R[v, u], A[v, u])\n g = np.add(g, np.multiply(aux2, aux1))\n # Weights the social part\n if d[v, 1] != 0:\n g = np.multiply(np.divide(1, d[v, 1]), g)\n # Estimates the derivative\n x[v, s] = np.multiply(x0[v, s], np.add(p, g))\n # Prepare variables to next iteration\n p = 0\n g = 0\n es[s] = 0\n return x",
"def potentialSolver4(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged",
"def mvee(atoms, tol = 0.00001):\n points_asarray = np.array([atom.coordinates for atom in atoms])\n points = np.asmatrix(points_asarray)\n N, d = points.shape\n Q = np.column_stack((points, np.ones(N))).T\n err = tol+1.0\n u = np.ones(N)/N\n try:\n while err > tol:\n # assert u.sum() == 1 # invariant\n X = Q * np.diag(u) * Q.T\n M = np.diag(Q.T * la.inv(X) * Q)\n jdx = np.argmax(M)\n step_size = (M[jdx]-d-1.0)/((d+1)*(M[jdx]-1.0))\n new_u = (1-step_size)*u\n new_u[jdx] += step_size\n err = la.norm(new_u-u)\n u = new_u\n c = u*points\n A = la.inv(points.T*np.diag(u)*points - c.T*c)/d \n except: # For singular matrix errors i.e. motif is ellipse rather than ellipsoid\n centroid = np.average(points_asarray,axis=0)\n plane = Plane(atoms)\n normal = np.array([plane.a,plane.b,plane.c])\n norm_mag = np.sqrt(np.dot(normal,normal))\n for i, norm in enumerate(normal):\n normal[i] = norm * 1 / norm_mag\n centroid = np.average(points,axis=0).reshape(-1,3)\n p1 = centroid + normal*0.00001\n p2 = centroid - normal*0.00001\n points_asarray = np.concatenate([points_asarray,p1,p2],axis=0)\n points = np.asmatrix(points_asarray)\n N, d = points.shape\n Q = np.column_stack((points, np.ones(N))).T\n err = tol+1.0\n u = np.ones(N)/N\n while err > tol:\n # assert u.sum() == 1 # invariant\n X = Q * np.diag(u) * Q.T\n M = np.diag(Q.T * la.inv(X) * Q)\n jdx = np.argmax(M)\n step_size = (M[jdx]-d-1.0)/((d+1)*(M[jdx]-1.0))\n new_u = (1-step_size)*u\n new_u[jdx] += step_size\n err = la.norm(new_u-u)\n u = new_u\n c = u*points\n A = la.inv(points.T*np.diag(u)*points - c.T*c)/d \n \n return np.asarray(A), np.squeeze(np.asarray(c))",
"def _solve_implicit(self, initial_conditions):\n coeff = self.a ** 2 * self.tau / self.h ** 2\n l_and_u = (1, 1)\n ab = np.empty((3, self.n_x))\n # main diagonal\n ab[1] = 1 + 2.0 * coeff\n # upper and lower diagonals\n ab[0] = ab[2] = -coeff\n\n # left bc\n if self.left_bc_type == \"DIRICHLET\":\n ab[0][1] = 0 # upper diagonal\n ab[1][0] = 1 # main diagonal\n elif self.left_bc_type == \"NEUMANN\":\n ab[0][1] = 1 # upper diagonal\n ab[1][0] = -1 # main diagonal\n\n # right bc\n if self.right_bc_type == \"DIRICHLET\":\n ab[1][-1] = 1 # main diagonal\n ab[2][-2] = 0 # lower diagonal\n elif self.right_bc_type == \"NEUMANN\":\n ab[1][-1] = 1 # main diagonal\n ab[2][-2] = -1 # lower diagonal\n\n current_solution = initial_conditions\n solutions = []\n\n for t in self.t_grid:\n b = current_solution + self.tau * self.rhs(self.x_grid, t)\n # left bc\n if self.left_bc_type == \"DIRICHLET\":\n b[0] = self.left_bc(t)\n elif self.left_bc_type == \"NEUMANN\":\n b[0] = self.h * self.left_bc(t)\n # right bc\n if self.right_bc_type == \"DIRICHLET\":\n b[-1] = self.right_bc(t)\n elif self.right_bc_type == \"NEUMANN\":\n b[-1] = self.h * self.right_bc(t)\n\n next_solution = solve_banded(l_and_u, ab, b)\n if self.mode == \"VISUALIZATION\":\n solutions.append((t, next_solution.copy()))\n current_solution = next_solution\n if self.mode == \"TEST\":\n # print(\"Result: \", current_solution.tolist())\n # print(\"Right answer: \", self.anl_solution.tolist())\n self._norma(current_solution)\n elif self.mode == \"VISUALIZATION\":\n return solutions",
"def get_framework_neighbors(atom, useH=True):\n neighborlist = []\n for atom2 in atom.partner[:5]:\n if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius[atom.element]) + float(\n covalence_radius[atom2.element]) + .1:\n if not 'H' == atom2.element or useH:\n neighborlist.append(atom2)\n return neighborlist",
"def _compute_bare_spectrum_constant(self):\n eigendata = []\n for subsys in self._hilbertspace:\n if subsys not in self.subsys_update_list:\n evals_count = subsys.truncated_dim\n eigendata.append(subsys.eigensys(evals_count=evals_count))\n else:\n eigendata.append(None)\n return eigendata"
] | [
"0.5617916",
"0.540578",
"0.53865945",
"0.5325545",
"0.52052456",
"0.51788265",
"0.51686805",
"0.5112846",
"0.508617",
"0.5080375",
"0.5077638",
"0.50694567",
"0.50590456",
"0.50240624",
"0.50136775",
"0.49985772",
"0.49803355",
"0.49557477",
"0.49493426",
"0.49493426",
"0.49472663",
"0.49452952",
"0.49380517",
"0.49371994",
"0.49327734",
"0.492903",
"0.48954266",
"0.48831236",
"0.48799583",
"0.48715338"
] | 0.6525174 | 0 |
Return number of CMAPs. Return 0 if CMAP does not exist | def getNumMaps(self):
try:
return self._numCMAP
except AttributeError:
pass
flag = 'CMAP_COUNT'
if flag not in self._raw_data and self.chamber:
flag = 'CHARMM_CMAP_COUNT'
if flag in self._raw_data:
self._numCMAP = int(self._raw_data[flag][1])
return self._numCMAP
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def map_count(filename):\n f = open(filename, \"r+\")\n buf = mmap.mmap(f.fileno(), 0)\n lines = 0\n readline = buf.readline\n while readline():\n lines += 1\n return lines",
"def len():\n if not CpuMap.arr:\n CpuMap.arr = CpuMap._cpus()\n return len(CpuMap.arr)",
"def __len__(self):\n return len(self._maps)",
"def get_number_of_items(self):\n return len(self.__item_map)",
"def __len__(self):\n count = 0\n for recovery_set in self.recovery_sets.values():\n count += len(recovery_set.packets)\n return count",
"def count(self):\n return self.connection.llen(self.key)",
"def count(self):\n # TODO not implemented yet\n return 0",
"def count(self, conn, key):\n return conn.llen(key)",
"def count(self):\n return len([i for i in self.iteritems()])",
"def count(self):\n return self.connection._llen(self.key)",
"def count(self):\n return len(self.wallpapers)",
"def get_count():\n _check_init()\n return _pypm.CountDevices()",
"def get_num_cams(self, data):\n cams = set()\n for items in data:\n camid = items[2]\n cams.add(camid)\n return len(cams)",
"def get_counts(filename, alphabet, kmin, kmax):\n # get the list of kmers to count with length between kmin and kmax\n kmers_list = get_all_possible_kmers(alphabet, kmin, kmax)\n # initialyze the counter with all possible kmer with length\n # between kmin and kmax with zero counts\n counter = Counter(dict([(km, 0) for km in kmers_list]))\n # open and read in the kmers/string in the file\n with gzip.open(filename, 'rt') as fh:\n # iterates through the strings\n for line in fh:\n # make the adjustments int the strings\n kmer = line.replace('\\n', '')\n # check if kmer/string is in the counter\n if kmer in counter:\n # if kmer is in add 1 other wise keep the zero count\n counter[kmer] += 1\n return counter",
"def numnems(self):\n count = 0\n for o in self._objs.values():\n count += len(o.netifs())\n return count",
"def get_num_countries(self):\n return len(self.countries)",
"def get_unread_count(imap):\n status, messages = imap.select('Inbox')\n status, response = imap.uid('search', None, 'UNSEEN')\n unread_msg_nums = response[0].split()\n return len(unread_msg_nums)",
"def buses_count(self):\n\n count = 0\n for line in self.__bus_dict.values():\n # for item in buses:\n count += len(line)\n return count",
"def _get_count(results):\n return len(results)",
"def size(self):\n\t\treturn len(self.cache)",
"def count_maps_for_lpar(mappings, lpar_id):\n return len([1 for amap in mappings\n if amap.server_adapter.lpar_id == lpar_id])",
"def __len__(self) -> int:\n return len(self.mapping)",
"def count(self):\n return self.db.zcard(REDIS_KEY)",
"def get_types_count():\n return len(type_dict.keys())",
"def get_number_of_locations():\n count = 0\n tree = ET.parse('./devset_topics.xml')\n root = tree.getroot()\n for item in root.findall('./topic'):\n count = count + 1\n return count",
"def count(self):\n return self.size()",
"def get_num_classes(self):\n return len(self.class_map_dict)",
"def count(self):\n return self.database.zcard(self.key)",
"def count(cls, client) :\n\t\ttry :\n\t\t\tobj = bfdsession()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e",
"def __len__(self):\n return len(self._mapping)"
] | [
"0.6600952",
"0.6299485",
"0.6144771",
"0.6009182",
"0.6002554",
"0.5962962",
"0.59225476",
"0.5918922",
"0.59180444",
"0.5889421",
"0.5880783",
"0.58007944",
"0.5787575",
"0.5780228",
"0.57729685",
"0.5754686",
"0.57487535",
"0.5747835",
"0.5746384",
"0.5744093",
"0.5739804",
"0.5720929",
"0.5716793",
"0.57011396",
"0.5700248",
"0.5698187",
"0.5694827",
"0.56860274",
"0.56624156",
"0.5659987"
] | 0.75202054 | 0 |
Return CMAP resolution info. Return 0 if CMAP does not exist | def getCMAPResolutions(self):
try:
return self._cmapResolution
except AttributeError:
pass
flag = 'CMAP_RESOLUTION'
if flag not in self._raw_data and self.chamber:
flag = 'CHARMM_CMAP_RESOLUTION'
if flag in self._raw_data:
self._cmapResolution=self._raw_data[flag]
return self._cmapResolution
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getNumMaps(self):\n try:\n return self._numCMAP\n except AttributeError:\n pass\n flag = 'CMAP_COUNT'\n if flag not in self._raw_data and self.chamber:\n flag = 'CHARMM_CMAP_COUNT'\n if flag in self._raw_data:\n self._numCMAP = int(self._raw_data[flag][1])\n return self._numCMAP\n return 0",
"def get_map(self, frames, resolution=None):\n self.lock.acquire()\n req = GetOccupancyMapRequest()\n req.frames = frames\n req.resolution = -1 if resolution is None else resolution\n try:\n resp = self.get_map_client.call(req)\n except rospy.ServiceException as e:\n logerror(\"Failed to call get_map service {}\".format(e))\n raise\n\n x0 = resp.occ.info.origin.position.x\n y0 = resp.occ.info.origin.position.y\n width = resp.occ.info.width\n height = resp.occ.info.height\n resolution = resp.occ.info.resolution\n occ_arr = np.array(resp.occ.data).reshape(height, width)\n occ_arr[occ_arr < 0] = 50\n occ_arr = np.clip(occ_arr / 100.0, 0.0, 1.0)\n self.lock.release()\n return x0, y0, resolution, occ_arr",
"def scanResolution(self):\n return self._getAttribute(Attribute.scanResolution)",
"def Resolution(self):\n\t\treturn self._get_attribute('resolution')",
"def get_resolution(filename):\n cmd = ('ffprobe -v 0 -of flat=s=_ -select_streams v:0 -show_entries '\n 'stream=height,width ' + filename).split()\n pid = subprocess.run(cmd, stdout=subprocess.PIPE,\n universal_newlines=True)\n if pid.returncode != 0:\n return None\n\n resolution_exp = pid.stdout\n width = int(resolution_exp.split('width=')[1].split('\\n')[0])\n height = int(resolution_exp.split('height=')[1].split('\\n')[0])\n return (width, height)",
"def get_calibration_info():\n mjpeg_info_dict = redis_tools.get_dict(db,'mjpeg_info_dict')\n calibration_info = mct_introspection.get_homography_calibration_info()\n for camera in mjpeg_info_dict:\n if not camera in calibration_info:\n calibration_info[camera] = {'modified': ''}\n return calibration_info",
"def resolution(self):\n return self._resolution",
"def getResolution(self):\n # load it each time, since this setting is not limited to a single user\n projectSettingsDB = self.loadProjectSettings()\n try:\n resolution = projectSettingsDB[\"Resolution\"]\n return resolution\n except KeyError:\n msg = \"Database Error while reading projectSettings.json\"\n logger.error(msg)\n return None",
"def get_resolution_component(details):\n\n try:\n res = details['model_component'][realm]['description'].split('; ')[part].split(' ')[res_loc]\n try:\n _ = int(res)\n except:\n res = 0\n except:\n res = 0\n\n return res",
"def resolution(self) -> int:\n return self._resolution",
"def resolution(self) -> int:\n return self.options.resolution",
"def getResolution(self):\n return self.resolution",
"def get_resolution(path_name, file_name):\n with open(path_name + 'resolution.txt') as file, \\\n mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) as s:\n file.seek(0, s.find(file_name.encode()))\n line = file.readline().split(' ')\n return [line[1], line[2].rstrip('\\n')]",
"def get_resolution(self):\n return self.__resolution",
"def get_resolution(self, curvename):\n\n if curvename == 'flank':\n return self.points_flank\n elif curvename == 'fillet':\n return self.points_fillet\n elif curvename == 'tip':\n return self.points_tip\n elif curvename == 'root':\n return self.points_root\n elif curvename == 'shaft':\n return self.points_shaft\n elif curvename == 'width':\n return self.points_width",
"def get_campaign_resolution(self):\n campaign_resolutions = dict(CAMPAIGN_RESOLUTION_CHOICES)\n return campaign_resolutions.get(self.campaign_resolution, \"N/A\")",
"def _resolution(self):\n _, xres, _, _, _, yres = self.geotransform\n return xres, yres",
"def resolution(self):\n return next(iter(self.resolutions()), None)",
"def get_current_resolution(self):\n return self.display_info[\"width\"], self.display_info[\"height\"]",
"def _proc_info(self):\n ret = cext.proc_info(self.pid)\n assert len(ret) == len(pinfo_map)\n return ret",
"def get_info(file_path):\n \n with h5py.File(file_path, 'r') as f:\n max_zoom = f.attrs.get('max-zoom')\n \n if max_zoom is None:\n logger.info('no zoom found')\n raise ValueError(\n 'The `max_zoom` attribute is missing.'\n )\n \n c = cooler.Cooler(f[\"0\"])\n \n (chroms, chrom_sizes, chrom_cum_lengths) = get_chromosome_names_cumul_lengths(c)\n \n total_length = int(chrom_cum_lengths[-1])\n max_zoom = f.attrs['max-zoom']\n bin_size = int(f[str(max_zoom)].attrs['bin-size'])\n \n max_width = bin_size * TILE_SIZE * 2**max_zoom\n \n info = {\n 'min_pos': [0.0, 0.0],\n 'max_pos': [total_length, total_length],\n 'max_zoom': max_zoom,\n 'max_width': max_width,\n 'bins_per_dimension': TILE_SIZE,\n }\n \n return info",
"def get():\n\n l2ca_info = caps.l2ca_info()\n\n res = {\n 'cache_size': l2ca_info['cache_size'],\n 'cw_size': l2ca_info['cache_way_size'],\n 'cw_num': l2ca_info['cache_ways_num'],\n 'clos_num': l2ca_info['clos_num'],\n 'cdp_supported': l2ca_info['cdp_supported'],\n 'cdp_enabled': l2ca_info['cdp_enabled']\n }\n return res, 200",
"def tileset_info(chromsizes, resolution):\n min_tile_cover = np.ceil(sum(chromsizes) / TILE_SIZE)\n step_max_zoom = int(np.floor(np.log2(resolution)))\n max_zoom = int(np.ceil(np.log2(min_tile_cover)))\n tileset_info = {\n \"min_pos\": [0],\n \"max_pos\": [TILE_SIZE * 2 ** max_zoom],\n \"max_width\": TILE_SIZE * 2 ** max_zoom,\n \"tile_size\": TILE_SIZE,\n \"max_zoom\": max_zoom - step_max_zoom,\n }\n return tileset_info",
"def resolution(self):\n return {'x': self.width, 'y': self.height}",
"def get_meminfo():\n\n mem_info = {}\n re_keyval = re.compile(r'^\\s*(\\S+)\\s*[=:]\\s*(\\d+)')\n try:\n with open(MEMINFO, 'r') as mem_file:\n for line in mem_file:\n match = re_keyval.search(line)\n if match:\n keyfile = match.group(1)\n val = match.group(2)\n mem_info[keyfile] = int(val)\n except IOError as err:\n LOG.error('%s: Cannot read meminfo, error=%s',\n 'platform memory usage', err)\n return mem_info\n\n return mem_info",
"def getResolution(self):\n return self._lowLevelGetDeviceResolution()",
"def findMap(obc, nWin):\n try:\n return config.mapLoockUp[(obc,nWin)]\n except KeyError:\n return None",
"def get():\n\n l3ca_info = caps.l3ca_info()\n\n res = {\n 'cache_size': l3ca_info['cache_size'],\n 'cw_size': l3ca_info['cache_way_size'],\n 'cw_num': l3ca_info['cache_ways_num'],\n 'clos_num': l3ca_info['clos_num'],\n 'cdp_supported': l3ca_info['cdp_supported'],\n 'cdp_enabled': l3ca_info['cdp_enabled']\n }\n return res, 200",
"def getMappedInfo(self):\n \n return self.mapped_info",
"def check_for_map(self):\n try:\n map = self.global_dict['fsp_out_map']\n except AttributeError:\n out_msg = \"Not computing nearest distance. Need to have loaded an ESDF\"\n print(out_msg)\n return None\n\n if map is None:\n out_msg = \"Not computing nearest distance. Need to have loaded an ESDF\"\n print(out_msg)\n return None\n\n return map"
] | [
"0.64817363",
"0.5936191",
"0.5852588",
"0.56650823",
"0.5629899",
"0.55542696",
"0.5476778",
"0.54461724",
"0.54426503",
"0.5433428",
"0.54026765",
"0.5396244",
"0.5388371",
"0.53813756",
"0.5373326",
"0.53725666",
"0.53344905",
"0.5324073",
"0.5314475",
"0.5301652",
"0.5289623",
"0.5287089",
"0.5268739",
"0.51964575",
"0.50974804",
"0.50946146",
"0.50851715",
"0.50793684",
"0.5004785",
"0.49412316"
] | 0.75949377 | 0 |
Return CMAP type, list of first four atoms, and list of second four atoms | def getCMAPDihedrals(self):
try:
return self._cmapList
except AttributeError:
pass
flag = 'CMAP_INDEX'
if flag not in self._raw_data and self.chamber:
flag = 'CHARMM_CMAP_INDEX'
cmapPointers = self._raw_data[flag]
self._cmapList=[]
forceConstConversionFactor = (units.kilocalorie_per_mole).conversion_factor_to(units.kilojoule_per_mole)
for ii in range(0,len(cmapPointers),6):
if any([int(cmapPointers[ii+jj])<0 for jj in range(5)]):
raise ValueError("Found negative cmap atom pointers %s"
% ((cmapPointers[ii],
cmapPointers[ii+1],
cmapPointers[ii+2],
cmapPointers[ii+3],
cmapPointers[ii+4]),))
iType=int(cmapPointers[ii+5])-1
self._cmapList.append((int(iType),
int(cmapPointers[ii])-1,
int(cmapPointers[ii+1])-1,
int(cmapPointers[ii+2])-1,
int(cmapPointers[ii+3])-1,
int(cmapPointers[ii+1])-1,
int(cmapPointers[ii+2])-1,
int(cmapPointers[ii+3])-1,
int(cmapPointers[ii+4])-1))
return self._cmapList | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_carboxyl_map(atom_list):\n carboxyl_map = [[atom_list[x], atom_list[x+1], atom_list[x+2], atom_list[x+3]] for x in range(len(atom_list)-3) if ((atom_list[x].residue_name == atom_list[x+1].residue_name == atom_list[x+2].residue_name == atom_list[x+3].residue_name == \"C1A\") and (atom_list[x].residue_number == atom_list[x+1].residue_number == atom_list[x+2].residue_number == atom_list[x+3].residue_number) and (atom_list[x].atom_name != \"CY\" != atom_list[x+1].atom_name != atom_list[x+2].atom_name != \"CY\" != atom_list[x+3].atom_name ))]\n return carboxyl_map",
"def get_map_central(atom_list):\n central_map = [atom for atom in atom_list if ((len(identify_bonds(atom, atom_list)) == 3) and (check_connected(atom, identify_bonds(atom, atom_list)) == False))]\n return central_map",
"def get_info(atom):\n return [atom.GetIdx(), atom.GetNeighbors()[0].GetIdx()]",
"def smi2patt(smi):\n # By default, RDKit won't recognize \"C=[NH]=C\",\n # setting sanitize=F recitify this\n _m = Chem.MolFromSmiles(smi) #, sanitize=False)\n zs = []; cns = []; repls = []; atypes = []\n for ai in _m.GetAtoms():\n zi = ai.GetAtomicNum()\n cni = ai.GetTotalDegree()\n # here we use '<' & '>' instead of '[' & ']'\n # is due to the fact that we need to sequentially\n # replace the content within [] by `repl\n repls.append( '<#%d;X%d>'%(zi,cni) )\n atypes.append( '%02d;X%d'%(zi,cni) )\n zs.append( zi )\n cns.append( cni )\n zs = np.array(zs,dtype=int)\n na = len(zs)\n assert np.all(zs>1), '#ERROR: found H?'\n for bi in _m.GetBonds():\n bi.SetBondType(bo2bt['1.0'])\n # The line below is necessary! If not, we may end up with\n # smarts like '[#6]12:[#6]:[#6]:[#6]=1:[#6]:[#6]:2', originating\n # from input SMILES 'C12C=CC=1C=C2'\n bi.SetIsAromatic(False)\n sma = Chem.MolToSmarts(_m)\n #print ' repls = ', repls\n for i in range(na):\n sma = re.sub('\\[.*?\\]', repls[i], sma, count=1)\n #print ' sma = ', sma\n patts = [ '<', '>', '-'] #'-\\[', '\\]-' ]\n repls = [ '[', ']', '~'] #'~[', ']~' ]\n n = len(patts)\n for i in range(n):\n sma = re.sub(patts[i], repls[i], sma)\n return atypes, sma",
"def get_epoxy_map(atom_list):\n epoxy_map = [[atom_list[x]] for x in range(len(atom_list)) if ((atom_list[x].residue_name == \"E1A\") and (atom_list[x].atom_name != \"CY\"))]\n return epoxy_map",
"def convert_atoms_to_pdb_molecules(atoms: t.List[Atom]) -> t.List[str]:\r\n # 1) GROUP ATOMS BT MOLECULES\r\n molecules = defaultdict(list)\r\n for a in atoms:\r\n molecules[a.resi].append(a)\r\n\r\n # 2) CONSTUCT PDB BLOCKS\r\n #ref: https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/tutorials/pdbintro.html\r\n pdb_format = \"ATOM {:>5d} {:<2}{:1}{:>3} {:1}{:>3d}{:1} {:>7.3f}{:>7.3f}{:>7.3f}{:>5}{:>6}{:<3}{:>2} {:>2d}\"\r\n dummy_occupancy= dummy_bfactor= dummy_charge = 0.0\r\n dummy_alt_location= dummy_chain= dummy_insertion_code= dummy_segment = \"\"\r\n\r\n pdb_molecules: t.List[str] = []\r\n for m_ID in sorted(molecules):\r\n m = molecules[m_ID]\r\n atoms_as_lines: t.List[str] = []\r\n for a in sorted(m, key= lambda x: x.id):\r\n atoms_as_lines.append(pdb_format.format(int(a.id), a.name, dummy_alt_location, a.resn, dummy_chain, int(a.resi), dummy_insertion_code, a.x, a.y, a.z, dummy_occupancy, dummy_bfactor, dummy_segment, a.elem, int(dummy_charge)))\r\n\r\n # Sort by Id: => convert str up do first space to int\r\n #atoms_as_lines = sorted(atoms_as_lines, key=lambda x: int(x[:x.index('\\t')]))\r\n molecule_as_str = \"TITLE \"+a.resn+\"\\n\"+'\\n'.join(atoms_as_lines) + '\\nEND'\r\n # molecule_as_str = molecule_as_str.replace('\\t',' ')\r\n pdb_molecules.append(molecule_as_str)\r\n\r\n print(pdb_molecules[-1])\r\n\r\n return pdb_molecules",
"def read_pdb(self, pdb):\n pdb_a = {}\n for line in pdb:\n at = re.compile(\"(ATOM|HETATM)\")\n if at.match(line):\n nm = re.sub(r'\\s', '', line[6:12])\n aname = re.sub(r'\\s', '', line[12:17])\n ri_c = re.sub(r'\\s', '', line[20:27])\n x = re.sub(r'\\s', '', line[30:38])\n y = re.sub(r'\\s', '', line[38:46])\n z = re.sub(r'\\s', '', line[46:55])\n if ri_c and aname and x and y and z:\n pdb_a[int(nm)] = [aname, Vector(float(x), float(y), float(z)), ri_c]\n return [pdb_a, nm]",
"def get_species_list() -> list:\n c2h2_xyz = {'symbols': ('C', 'C', 'H', 'H'), 'isotopes': (12, 12, 1, 1),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.203142), (0.0, -0.0, 2.265747), (-0.0, -0.0, -1.062605))}\n ch4_xyz = {'symbols': ('C', 'H', 'H', 'H', 'H'), 'isotopes': (12, 1, 1, 1, 1),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.08744517), (1.02525314, 0.0, -0.36248173),\n (-0.51262658, 0.88789525, -0.36248173), (-0.51262658, -0.88789525, -0.36248173))}\n co2_xyz = {'symbols': ('C', 'O', 'O'), 'isotopes': (12, 16, 16),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.1594846), (0.0, 0.0, -1.1594846))}\n co_xyz = {'symbols': ('O', 'C'), 'isotopes': (16, 12), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.12960815))}\n f2_xyz = {'symbols': ('F', 'F'), 'isotopes': (19, 19), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.3952041))}\n ch2o_xyz = {'symbols': ('O', 'C', 'H', 'H'), 'isotopes': (16, 12, 1, 1),\n 'coords': ((0.0, 0.0, 0.674622), (0.0, 0.0, -0.529707),\n (0.0, 0.935488, -1.109367), (0.0, -0.935488, -1.109367))}\n h2o_xyz = {'symbols': ('O', 'H', 'H'), 'isotopes': (16, 1, 1),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.95691441), (0.92636305, 0.0, -0.23986808))}\n h2_xyz = {'symbols': ('H', 'H'), 'isotopes': (1, 1), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.74187646))}\n hcn_xyz = {'symbols': ('C', 'N', 'H'), 'isotopes': (12, 14, 1),\n 'coords': ((0.0, 0.0, -0.500365), (0.0, 0.0, 0.65264), (0.0, 0.0, -1.566291))}\n hf_xyz = {'symbols': ('F', 'H'), 'isotopes': (19, 1), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.91538107))}\n n2o_xyz = {'symbols': ('N', 'N', 'O'), 'isotopes': (14, 14, 16),\n 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.12056262), (0.0, 0.0, 2.30761092))}\n n2_xyz = {'symbols': ('N', 'N'), 'isotopes': (14, 14), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.09710935))}\n nh3_xyz = {'symbols': ('N', 'H', 'H', 'H'), 'isotopes': (14, 1, 1, 1),\n 'coords': ((0.0, 0.0, 0.11289), (0.0, 0.938024, -0.263409),\n (0.812353, -0.469012, -0.263409), (-0.812353, -0.469012, -0.263409))}\n oh_xyz = {'symbols': ('O', 'H'), 'isotopes': (16, 1), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 0.967))}\n cl2_xyz = {'symbols': ('Cl', 'Cl'), 'isotopes': (35, 35), 'coords': ((0.0, 0.0, 0.0), (0.0, 0.0, 1.1))}\n\n c2h2 = ARCSpecies(label='C2H2', smiles='C#C', multiplicity=1, charge=0)\n c2h2.initial_xyz = c2h2_xyz\n\n ch4 = ARCSpecies(label='CH4', smiles='C', multiplicity=1, charge=0)\n ch4.initial_xyz = ch4_xyz\n\n co2 = ARCSpecies(label='CO2', smiles='O=C=O', multiplicity=1, charge=0)\n co2.initial_xyz = co2_xyz\n\n co = ARCSpecies(label='CO', smiles='[C-]#[O+]', multiplicity=1, charge=0)\n co.initial_xyz = co_xyz\n\n f2 = ARCSpecies(label='F2', smiles='[F][F]', multiplicity=1, charge=0)\n f2.initial_xyz = f2_xyz\n\n ch2o = ARCSpecies(label='CH2O', smiles='C=O', multiplicity=1, charge=0)\n ch2o.initial_xyz = ch2o_xyz\n\n h2o = ARCSpecies(label='H2O', smiles='O', multiplicity=1, charge=0)\n h2o.initial_xyz = h2o_xyz\n\n h2 = ARCSpecies(label='H2', smiles='[H][H]', multiplicity=1, charge=0)\n h2.initial_xyz = h2_xyz\n\n hcn = ARCSpecies(label='HCN', smiles='C#N', multiplicity=1, charge=0)\n hcn.initial_xyz = hcn_xyz\n\n hf = ARCSpecies(label='HF', smiles='F', multiplicity=1, charge=0)\n hf.initial_xyz = hf_xyz\n\n n2o = ARCSpecies(label='N2O', smiles='[N-]=[N+]=O', multiplicity=1, charge=0)\n n2o.initial_xyz = n2o_xyz\n\n n2 = ARCSpecies(label='N2', smiles='N#N', multiplicity=1, charge=0)\n n2.initial_xyz = n2_xyz\n\n nh3 = ARCSpecies(label='NH3', smiles='N', multiplicity=1, charge=0)\n nh3.initial_xyz = nh3_xyz\n\n oh = ARCSpecies(label='OH', smiles='[OH]', multiplicity=2, charge=0)\n oh.initial_xyz = oh_xyz\n\n cl2 = ARCSpecies(label='Cl2', smiles='[Cl][Cl]', multiplicity=1, charge=0)\n cl2.initial_xyz = cl2_xyz\n\n species_list = [c2h2, ch4, co2, co, f2, ch2o, h2o, h2, hcn, hf, n2o, n2, nh3, oh, cl2]\n\n return species_list",
"def getAtoms(self):\n atomNameList = self.getFlagData('ATOM_NAME')\n atomTypeNameList = self.getFlagData('AMBER_ATOM_TYPE')\n self._atomTypeNameList = atomTypeNameList\n massList = self.getFlagData('MASS')\n chargeList = self.getFlagData('CHARGE')\n resIds = self.getFlagData('RESIDUE_POINTER') + [0]\n #uniqAtomTypeId = self.getFlagData('ATOM_TYPE_INDEX') # for LJ\n balanceChargeList = self.balanceCharges(chargeList)\n coords = self.getCoords()\n ACOEFs, BCOEFs = self.getABCOEFs()\n\n atoms = []\n atomTypes = []\n tmpList = [] # a list with unique atom types\n totalCharge = 0.0\n countRes = 0\n id = 0\n for atomName in atomNameList:\n atomTypeName = atomTypeNameList[id]\n if id + 1 == resIds[countRes]:\n resid = countRes #self.residueLabel[countRes]\n countRes += 1\n mass = massList[id]\n charge = balanceChargeList[id]\n totalCharge += charge\n coord = coords[id]\n ACOEF = ACOEFs[id]\n BCOEF = BCOEFs[id]\n atomType = AtomType(atomTypeName, mass, ACOEF, BCOEF)\n if atomTypeName not in tmpList:\n tmpList.append(atomTypeName)\n atomTypes.append(atomType)\n atom = Atom(atomName, atomType, id + 1, resid, mass, charge, coord)\n atoms.append(atom)\n id += 1\n\n if atomTypeName[0].islower():\n self.atomTypeSystem = 'gaff'\n else:\n self.atomTypeSystem = 'amber'\n\n self.printDebug('Balanced TotalCharge %13.10f' % float(totalCharge / qConv))\n self.totalCharge = int(totalCharge)\n\n self.atoms = atoms\n self.atomTypes = atomTypes\n\n self.pbc = None\n if len(coords) == len(atoms) + 2:\n self.pbc = [coords[-2], coords[-1]]\n self.printDebug(\"PBC = '%s\" % self.pbc)\n self.printDebug(\"getAtoms done\")",
"def print_pairing_info(melon_types):\n\n # Fill in the rest",
"def setAtomType4Gromacs(self):\n atNames = [at.atomTypeName for at in self.atomTypes]\n #print atNames\n delAtomTypes = []\n modAtomTypes = []\n atomTypesGromacs = []\n dictAtomTypes = {}\n for at in self.atomTypes:\n atName = at.atomTypeName\n dictAtomTypes[atName] = at\n if atName.islower() and atName.upper() in atNames:\n #print atName, atName.upper()\n atUpper = self.atomTypes[atNames.index(atName.upper())]\n #print at.atomTypeName,at.mass, at.ACOEF, at.BCOEF\n #print atUpper.atomTypeName, atUpper.mass, atUpper.ACOEF, atUpper.BCOEF\n if at.ACOEF is atUpper.ACOEF and at.BCOEF is at.BCOEF:\n delAtomTypes.append(atName)\n else:\n newAtName = atName+'_'\n modAtomTypes.append(atName)\n atomType = AtomType(newAtName, at.mass, at.ACOEF, at.BCOEF)\n atomTypesGromacs.append(atomType)\n dictAtomTypes[newAtName] = atomType\n else:\n atomTypesGromacs.append(at)\n\n atomsGromacs = []\n for a in self.atoms:\n atName = a.atomType.atomTypeName\n if atName in delAtomTypes:\n atom = Atom(a.atomName, dictAtomTypes[atName.upper()], a.id, \\\n a.resid, a.mass, a.charge, a.coords)\n atomsGromacs.append(atom)\n elif atName in modAtomTypes:\n atom = Atom(a.atomName, dictAtomTypes[atName + '_'], a.id, \\\n a.resid, a.mass, a.charge, a.coords)\n atomsGromacs.append(atom)\n else:\n atomsGromacs.append(a)\n\n self.atomTypesGromacs = atomTypesGromacs\n self.atomsGromacs = atomsGromacs\n #print [i.atomTypeName for i in atomTypesGromacs]\n #print modAtomTypes\n #print delAtomTypes",
"def get_map_edge(atom_list):\n edge_map = [atom for atom in atom_list if ((0 < len(identify_bonds(atom, atom_list)) < 3) and (check_connected(atom, identify_bonds(atom, atom_list)) == False))]\n return edge_map",
"def get_atoms_list(chain):\n type_chain = check_type(chain)\n if type_chain == \"protein\":\n atom_id = \"CA\"\n elif type_chain == \"nucleic_acid\":\n atom_id = \"P\"\n atoms = chain.get_atoms()\n atoms_list = []\n for atom in atoms:\n if atom.id == atom_id:\n atoms_list.append(atom)\n return atoms_list",
"def process_atoms(self, molecule_info):\n # Set atoms from molecule information.\n atoms = [x.split()[3] for x in molecule_info]\n atom_ids = [re.search(r'[\\D_]', x).group(0) for x in atoms]\n\n return atoms, atom_ids",
"def get_atoms_list(mmtf_dict):\n\n return [{\n \"x\": x, \"y\": y, \"z\": z, \"alt_loc\": a or None, \"bvalue\": b, \"occupancy\": o,\n \"id\": i, \"is_hetatm\": False\n } for x, y, z, a, b, i, o in zip(\n mmtf_dict[\"xCoordList\"], mmtf_dict[\"yCoordList\"], mmtf_dict[\"zCoordList\"],\n mmtf_dict[\"altLocList\"], mmtf_dict[\"bFactorList\"], mmtf_dict[\"atomIdList\"],\n mmtf_dict[\"occupancyList\"]\n )]",
"def atoms(self):\n return self.qc_mol.atoms + self.br_mol.atoms + self.pc_mol.atoms",
"def contact_types(dist,seq,nframes):\n\tseq = np.array(seq) ; dist = np.array(dist) ; inds = []\n\tut = np.triu_indices(dist.shape[0]) ; dist[ut] = dist.T[ut] # symmetrize matrix. diagonal already 0.\n\trtype1 = [hydrophobic, hydrophilic, negcharge, poscharge, negcharge, poscharge, methionine, aromatic, aromatic]\n\trtype2 = [hydrophobic, hydrophilic, negcharge, poscharge, poscharge, negcharge, aromatic, aromatic, methionine]\n\tlabels = ['phob-phob', 'phil-phil', '- -', '+ +', '- +', '+ -', 'met-aro', 'aro-aro', 'aro-met']\n\t#rtype1 = [negcharge, poscharge, methionine, aromatic]\n\t#rtype2 = [poscharge, negcharge, aromatic, methionine]\n\t#labels = [ '- +', '+ -', 'met-aro', 'aro-met']\n\t# iterate through residue type\n\tfor r1,r2,l in zip(rtype1,rtype2,labels):\n\t\t# extract indices of each atom for each type. Complicated because 'flatten' doesnt seem to work\n\t\tind1 = [item for sublist in [np.where(seq == a)[0] for a in r1] for item in sublist]\n\t\tind2 = [item for sublist in [np.where(seq == a)[0] for a in r2] for item in sublist]\n\t\tprint l, np.sum(dist[ind1].T[ind2]), len(ind1)*len(ind2), np.sum(dist[ind1].T[ind2])/(len(ind1)*len(ind2))\n\t\t#print ind1, ind2, \"\\n\", \"\\n\"\n\treturn None",
"def get_map_anywhere(atom_list):\n anywhere_map = [atom for atom in atom_list if (check_connected(atom, identify_bonds(atom, atom_list)) == False)]\n return anywhere_map",
"def changetype(atms,t1,t2, nt2):\n\n # find total number of atoms\n ntotal = len(atms)\n t1ids = []\n nt1 = 0;\n \n # find total number of t1 atoms and their ids\n for ia in range(ntotal):\n if atms[ia][3] == t1:\n t1ids.append(ia)\n nt1 +=1\n \n nt2 = int(nt2)\n if nt2 < 1:\n print \"$$$ ERROR: not enough atoms of type ## \" + str(t1) + \" ## \" + str(nt1) + \" are available to alloy $$$\"\n return\n\n np.random.shuffle(t1ids)\n rnd = t1ids[:nt2]\n for zombie in rnd:\n atms[zombie][3] = t2\n\n return (atms,rnd)",
"def extract_imeis(cls, imei_tac_map):\n imeis = []\n for tac in imei_tac_map.keys():\n tac_imeis = imei_tac_map.get(tac)\n if tac_imeis:\n imeis = imeis + tac_imeis\n return imeis",
"def get_atypes(self):\n self.atypes = []\n self.hybs = []\n #self.zs = []\n for ai in self.m.GetAtoms():\n hybi = str( ai.GetHybridization() )\n self.hybs.append( hybi )\n zi = ai.GetAtomicNum()\n #self.zs.append( zi )\n si = ai.GetSymbol()\n if hybi == 'SP2':\n ar = ai.GetIsAromatic()\n ar_suffix = '_R' if ar else '_2'\n ap = si + ar_suffix # atomic_pattern\n elif hybi == 'SP3':\n if zi == 16 and ai.GetExplicitValence() == 6:\n ap = si + 'o3'\n elif zi in [9, 17, 35, 53]:\n ap = si\n else:\n ap = si + '_3'\n elif hybi == 'SP':\n ap = si + '_1'\n elif hybi in ['S', ]: #'UNSPECIFIED']:\n ap = si\n else:\n print((' unknown atom type: `%s`'%hybi))\n raise\n self.atypes.append( ap )",
"def get_molecule_dict(chemfile):\n molecule_dict={}\n with open(chemfile,'r') as f:\n for line in f:\n line=line.strip().split('\\t')\n ikey=line[0]\n smi=line[1]\n mol = Chem.MolFromSmiles(smi)\n if not mol:\n raise ValueError(\"Could not generate Mol from SMILES string:\", smi)\n #Chem.SanitizeMol(mol)\n\n atoms={} #atom_idx -> atom features\n bonds={} #bond_idx -> bond features\n atoms2bond={} #(atom_idx1,atom_idx2) -> bond_idx\n \n nodes_by_degree = {d: [] for d in degrees}\n for atom in mol.GetAtoms():\n atom_feature = atom_features(atom)\n atom_id = smi+str(atom.GetIdx())\n atoms[atom.GetIdx()]=atom_feature \n atom_neighbors={aid: [] for aid in atoms.keys()} #atom_idx -> neighbor atom idxs\n bond_neighbors={aid: [] for aid in atoms.keys()} #atom_idx -> neighbor bond idxs\n\n for bond in mol.GetBonds():\n src_atom_idx = bond.GetBeginAtom().GetIdx()\n tgt_atom_idx = bond.GetEndAtom().GetIdx()\n bond_idx = bond.GetIdx()\n bond_neighbors[src_atom_idx].append(bond_idx)\n bond_neighbors[tgt_atom_idx].append(bond_idx)\n bond_feature = bond_features(bond)\n bonds[bond.GetIdx()] = bond_feature\n atom_neighbors[src_atom_idx].append(tgt_atom_idx)\n atom_neighbors[tgt_atom_idx].append(src_atom_idx)\n atoms2bond[(src_atom_idx,tgt_atom_idx)]=bond_idx\n atoms2bond[(tgt_atom_idx,src_atom_idx)]=bond_idx\n \n atoms_by_degree={d: [] for d in degrees}\n bonds_by_degree={d: [] for d in degrees}\n for aid in atom_neighbors:\n neighbor_atoms = atom_neighbors[aid]\n d = len(neighbor_atoms) #degree of the atom\n atoms_by_degree[d].append(aid) #current atom is degree=d\n neighbor_bonds=[]\n for neighbor in neighbor_atoms:\n bond_idx=atoms2bond[(aid,neighbor)]\n neighbor_bonds.append(bond_idx)\n bonds_by_degree[d].append(neighbor_bonds)\n\n neighbor_by_degree = []\n for degree in degrees:\n neighbor_by_degree.append({\n 'atom': atoms_by_degree[degree],\n 'bond': bonds_by_degree[degree]\n })\n \n molecule_dict[ikey]={'smiles':str(smi),\n 'neighbor_by_degree':neighbor_by_degree,\n 'atoms':atoms,'bonds':bonds,\n 'atom_neighbor':atom_neighbors,\n 'bond_neighbor':bond_neighbors}\n return molecule_dict",
"def extract_mol_info(molecule_etree):\n smiles = extract_smiles(molecule_etree)\n alpha = extract_and_check_alpha(molecule_etree)\n beta = extract_and_check_beta(molecule_etree)\n return smiles, alpha, beta",
"def test_parses_map_3(self):\n p = GPBEC()\n p.parse(\"GPBEC,220516,5130.02,N,00046.34,W,213.8,T,218.0,M,0004.6,N,EGLM,X*11\")\n\n self.assertEquals(\"GPBEC\", p.sen_type)\n self.assertEquals(\"220516\", p.timestamp)\n self.assertEquals(\"5130.02\", p.waypoint_lat)\n self.assertEquals(\"N\", p.waypoint_lat_dir)\n self.assertEquals(\"00046.34\", p.waypoint_lon)\n self.assertEquals(\"W\", p.waypoint_lon_dir)\n self.assertEquals(\"213.8\", p.bearing_true)\n self.assertEquals(\"T\", p.bearing_true_sym)\n self.assertEquals(\"218.0\", p.bearing_mag)\n self.assertEquals(\"M\", p.bearing_mag_sym)\n self.assertEquals(\"0004.6\", p.nautical_miles)\n self.assertEquals(\"N\", p.nautical_miles_sym)\n self.assertEquals(\"EGLM\", p.waypoint_id)\n self.assertEquals(\"X\", p.faa_mode)\n self.assertEquals(\"11\", p.checksum)",
"def atoms(self, resnum, chain_id, icode=' ', alt=' ', model_num = 0):\n return [atm for atm in self.residue(resnum, chain_id, icode, alt, model_num)]",
"def get_contact_atoms(self,cutoff=8.5,chain1='A',chain2='B',\n extend_to_residue=False,only_backbone_atoms=False,\n excludeH=False,return_only_backbone_atoms=False,return_contact_pairs=False):\n\n # xyz of the chains\n xyz1 = np.array(self.get('x,y,z',chainID=chain1))\n xyz2 = np.array(self.get('x,y,z',chainID=chain2))\n\n # index of b\n index2 = self.get('rowID',chainID=chain2)\n\n # resName of the chains\n resName1 = np.array(self.get('resName',chainID=chain1))\n #resName2 = np.array(self.get('resName',chainID=chain2))\n\n # atomnames of the chains\n atName1 = np.array(self.get('name',chainID=chain1))\n atName2 = np.array(self.get('name',chainID=chain2))\n\n\n # loop through the first chain\n # TO DO : loop through the smallest chain instead ...\n index_contact_1,index_contact_2 = [],[]\n index_contact_pairs = {}\n\n for i,x0 in enumerate(xyz1):\n\n # compute the contact atoms\n contacts = np.where(np.sqrt(np.sum((xyz2-x0)**2,1)) <= cutoff )[0]\n\n # exclude the H if required\n if excludeH and atName1[i][0] == 'H':\n continue\n\n if len(contacts)>0 and any([not only_backbone_atoms, atName1[i] in self.backbone_type]):\n\n # the contact atoms\n index_contact_1 += [i]\n index_contact_2 += [index2[k] for k in contacts if ( any( [atName2[k] in self.backbone_type, not only_backbone_atoms]) and not (excludeH and atName2[k][0]=='H') ) ]\n\n # the pairs\n pairs = [index2[k] for k in contacts if any( [atName2[k] in self.backbone_type, not only_backbone_atoms] ) and not (excludeH and atName2[k][0]=='H') ]\n if len(pairs) > 0:\n index_contact_pairs[i] = pairs\n\n # get uniques\n index_contact_1 = sorted(set(index_contact_1))\n index_contact_2 = sorted(set(index_contact_2))\n\n # if no atoms were found\n if len(index_contact_1)==0:\n print('Warning : No contact atoms detected in pdb2sql')\n\n # extend the list to entire residue\n if extend_to_residue:\n index_contact_1,index_contact_2 = self._extend_contact_to_residue(index_contact_1,index_contact_2,only_backbone_atoms)\n\n\n # filter only the backbone atoms\n if return_only_backbone_atoms and not only_backbone_atoms:\n\n # get all the names\n # there are better ways to do that !\n atNames = np.array(self.get('name'))\n\n # change the index_contacts\n index_contact_1 = [ ind for ind in index_contact_1 if atNames[ind] in self.backbone_type ]\n index_contact_2 = [ ind for ind in index_contact_2 if atNames[ind] in self.backbone_type ]\n\n # change the contact pairs\n tmp_dict = {}\n for ind1,ind2_list in index_contact_pairs.items():\n\n if atNames[ind1] in self.backbone_type:\n tmp_dict[ind1] = [ind2 for ind2 in ind2_list if atNames[ind2] in self.backbone_type]\n\n index_contact_pairs = tmp_dict\n\n # not sure that's the best way of dealing with that\n if return_contact_pairs:\n return index_contact_pairs\n else:\n return index_contact_1,index_contact_2",
"def getAtomTypes(self):\n return self._raw_data['AMBER_ATOM_TYPE']",
"def get_binding_motifs(seq):\n out = {'type_1': [], 'type_2a': [], 'type_2b': []}\n for i in range(len(seq) - 9 + 1):\n kmer = seq[i:i + 9]\n out['type_1'].append(kmer[3:8])\n for i in range(len(seq) - 15 + 1):\n kmer = seq[i:i + 15]\n tail = kmer[5] + kmer[7] + kmer[9] + kmer[10]\n out['type_2a'].append(kmer[4] + tail)\n out['type_2b'].append(kmer[2] + tail)\n counted = {k: countit(v) for k, v in out.items()}\n return counted",
"def getTypes():\n\t\n\ttranslationTable = []\n\tfor x in typePrimitive:\n\t\ttranslationTable.extend(x[0])\n\t\n\tid = 0\n\ttypes = []\n\tmax = 0\n\tfor x in typePrimitive:\n\t\t\n\t\tbinds = []\n\t\tfor y in x[2]:\n\t\t\tbinds.append(translationTable.index(y))\n\t\t\n\t\tif (x[4] != False) and (x[4] > max):\n\t\t\tmax = x[4]\n\t\t\t\n\t\t\n\t\ttypes.append({'name':x[0],'nSites':x[1],'binds':binds,'sym':x[3],'id':id,'max':x[4]})\n\t\tid+=1\n\t\n\treturn (max,types)",
"def contact_maps(coors):\n\timport MDAnalysis.analysis.distances as mdad\n\treturn mdad.distance_array(coors, coors)"
] | [
"0.6316456",
"0.5423948",
"0.54129374",
"0.5391436",
"0.53679574",
"0.52848315",
"0.5265358",
"0.5238747",
"0.5222712",
"0.52078694",
"0.51621914",
"0.5157384",
"0.5152001",
"0.51297927",
"0.51236004",
"0.5117469",
"0.5099584",
"0.5091966",
"0.5074277",
"0.5069184",
"0.5064806",
"0.5032349",
"0.5030685",
"0.50303215",
"0.5011387",
"0.50003725",
"0.498693",
"0.49800622",
"0.49762195",
"0.49695787"
] | 0.5589476 | 1 |
Return list of atom pairs, chargeProduct, rMin and epsilon for each 14 interaction | def get14Interactions(self):
dihedralPointers = self._raw_data["DIHEDRALS_INC_HYDROGEN"] \
+self._raw_data["DIHEDRALS_WITHOUT_HYDROGEN"]
returnList=[]
charges=self.getCharges()
length_conv = units.angstrom.conversion_factor_to(units.nanometers)
ene_conv = units.kilocalories_per_mole.conversion_factor_to(
units.kilojoules_per_mole)
if self.chamber:
parm_acoef = [float(x) for x in self._raw_data['LENNARD_JONES_14_ACOEF']]
parm_bcoef = [float(x) for x in self._raw_data['LENNARD_JONES_14_BCOEF']]
else:
parm_acoef = [float(x) for x in self._raw_data['LENNARD_JONES_ACOEF']]
parm_bcoef = [float(x) for x in self._raw_data['LENNARD_JONES_BCOEF']]
nbidx = [int(x) for x in self._raw_data['NONBONDED_PARM_INDEX']]
numTypes = self.getNumTypes()
atomTypeIndexes=self._getAtomTypeIndexes()
for ii in range(0, len(dihedralPointers), 5):
if int(dihedralPointers[ii+2])>0 and int(dihedralPointers[ii+3])>0:
iAtom = int(dihedralPointers[ii])//3
lAtom = int(dihedralPointers[ii+3])//3
iidx = int(dihedralPointers[ii+4]) - 1
chargeProd = charges[iAtom]*charges[lAtom]
typ1 = atomTypeIndexes[iAtom] - 1
typ2 = atomTypeIndexes[lAtom] - 1
idx = nbidx[numTypes*typ1+typ2] - 1
if idx < 0: continue
a = parm_acoef[idx]
b = parm_bcoef[idx]
try:
epsilon = b * b / (4 * a) * ene_conv
rMin = (2 * a / b) ** (1/6.0) * length_conv
except ZeroDivisionError:
rMin = 1
epsilon = 0
try:
iScee = float(self._raw_data['SCEE_SCALE_FACTOR'][iidx])
except KeyError:
iScee = 1.0 if self.chamber else 1.2
try:
iScnb = float(self._raw_data['SCNB_SCALE_FACTOR'][iidx])
except KeyError:
iScnb = 1.0 if self.chamber else 2.0
returnList.append((iAtom, lAtom, chargeProd, rMin, epsilon, iScee, iScnb))
return returnList | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_charged_pairs(self):\n charges = [ ai.GetFormalCharge() for ai in self.m.GetAtoms() ]\n # search for the pairs of atoms with smarts like '[N+](=O)[O-]'\n patt = '[+1]~[-1]'\n q = Chem.MolFromSmarts(patt)\n cpairs = np.array( self.m.GetSubstructMatches(q) ).astype(np.int)\n self.charges = charges\n self.cpairs = cpairs",
"def energy(self):\n sum_energy = 0.0\n for i in range(0,self.natoms-1):\n for j in range(i+1,self.natoms):\n rij = (self.atoms[i].xyz - self.atoms[j].xyz)\n rij = rij - self.pbc_correction(rij)\n mag_rij = la.norm(rij)\n sum_energy = sum_energy + self.pair_energy(self.epsilon, self.sigma, mag_rij) \n return sum_energy",
"def getAtoms(self):\n atomNameList = self.getFlagData('ATOM_NAME')\n atomTypeNameList = self.getFlagData('AMBER_ATOM_TYPE')\n self._atomTypeNameList = atomTypeNameList\n massList = self.getFlagData('MASS')\n chargeList = self.getFlagData('CHARGE')\n resIds = self.getFlagData('RESIDUE_POINTER') + [0]\n #uniqAtomTypeId = self.getFlagData('ATOM_TYPE_INDEX') # for LJ\n balanceChargeList = self.balanceCharges(chargeList)\n coords = self.getCoords()\n ACOEFs, BCOEFs = self.getABCOEFs()\n\n atoms = []\n atomTypes = []\n tmpList = [] # a list with unique atom types\n totalCharge = 0.0\n countRes = 0\n id = 0\n for atomName in atomNameList:\n atomTypeName = atomTypeNameList[id]\n if id + 1 == resIds[countRes]:\n resid = countRes #self.residueLabel[countRes]\n countRes += 1\n mass = massList[id]\n charge = balanceChargeList[id]\n totalCharge += charge\n coord = coords[id]\n ACOEF = ACOEFs[id]\n BCOEF = BCOEFs[id]\n atomType = AtomType(atomTypeName, mass, ACOEF, BCOEF)\n if atomTypeName not in tmpList:\n tmpList.append(atomTypeName)\n atomTypes.append(atomType)\n atom = Atom(atomName, atomType, id + 1, resid, mass, charge, coord)\n atoms.append(atom)\n id += 1\n\n if atomTypeName[0].islower():\n self.atomTypeSystem = 'gaff'\n else:\n self.atomTypeSystem = 'amber'\n\n self.printDebug('Balanced TotalCharge %13.10f' % float(totalCharge / qConv))\n self.totalCharge = int(totalCharge)\n\n self.atoms = atoms\n self.atomTypes = atomTypes\n\n self.pbc = None\n if len(coords) == len(atoms) + 2:\n self.pbc = [coords[-2], coords[-1]]\n self.printDebug(\"PBC = '%s\" % self.pbc)\n self.printDebug(\"getAtoms done\")",
"def compute_atom_pair_energy(pdb_filename, ligand_params, interface_cutoff = 21.0):\n if type(ligand_params) is str:\n ligand_params = [ligand_params]\n ligand_params = Vector1([str(ligand_params)])\n \n pose = Pose()\n res_set = pose.conformation().modifiable_residue_type_set_for_conf()\n res_set.read_files_for_base_residue_types( ligand_params )\n \n pose.conformation().reset_residue_type_set_for_conf( res_set )\n pose_from_file(pose, str(pdb_filename))\n scorefxn = create_score_function('ref2015')\n pose_score = scorefxn(pose)\n \n #detect interface\n fold_tree = pose.fold_tree()\n for jump in range(1, pose.num_jump()+1):\n name = pose.residue(fold_tree.downstream_jump_residue(jump)).name()\n if name == 'WER':\n break\n interface = Interface(jump)\n interface.distance(interface_cutoff)\n interface.calculate(pose)\n \n energies = []\n en = defaultdict(lambda:np.zeros((1,4)))\n keys = []\n for rnum1 in range(1, pose.total_residue() + 1):\n if interface.is_interface(rnum1):\n r1 = pose.residue(rnum1)\n for a1 in range(1, len(r1.atoms()) + 1):\n seq1 = pose.pdb_info().pose2pdb(rnum1).strip().replace(' ','-')\n at1 = r1.atom_name(a1).strip()\n key1 = seq1 + '-' + at1\n for rnum2 in range(rnum1+1, pose.total_residue() + 1):\n if interface.is_interface(rnum2):\n r2 = pose.residue(rnum2)\n for a2 in range(1, len(r2.atoms())+1):\n seq2 = pose.pdb_info().pose2pdb(rnum2).strip().replace(' ','-')\n at2 = r2.atom_name(a2).strip()\n key2 = seq2 + '-' + at2\n ee = etable_atom_pair_energies(r1, a1, r2, a2, scorefxn)\n if all(e == 0.0 for e in ee):\n continue\n en[key1] += np.array(ee)\n en[key2] += np.array(ee)\n energy_matrix = np.array([v for v in en.values()])\n return list(en.keys()), energy_matrix",
"def get_representatives( self, t) :\n if t == 1 : return [(1,0,0,1)]\n \n rep_list = []\n \n for x,y in P1List(t):\n ## we calculate a pair c,d satisfying a minimality condition\n ## to make later multiplications cheaper\n (_, d, c) = Integer(x)._xgcd(Integer(y), minimal=True)\n rep_list.append((x,y,-c,d))\n \n return rep_list",
"def atoms(self):\n print('processing [ atoms ]')\n x = \"\"\"[ atoms ]\n; nr type resnr residu atom cgnr charge mass\n\"\"\"\n qtot = 0.0\n fmt = '{nr:6d} {type} 1 {residu} {atom} 1 {charge:14.11f} {mass:7.4f} ; qtot {qtot}\\n'\n for i, node in enumerate(self):\n vals = {}\n vals['nr'] = 1+i\n vals['type'] = node.atom.name\n vals['residu'] = self.molname\n vals['atom'] = node.atom.name\n vals['charge'] = node.properties['charge']\n qtot += node.properties['charge']\n vals['qtot'] = qtot\n vals['mass'] = node.properties['mass']\n x += fmt.format(**vals)\n return x + '\\n'",
"def sexp(p,n,res_field=RR):\n assert n>=2, \"Carleman matrix must at least be of size 2 to retrieve the coefficients. But given was \" + repr(n)\n CM = p.carleman_matrix(n)\n ev = [ r[0] for r in CM.charpoly().roots(QQbar) ]\n assert len(ev) == n, \"Carleman matrix must have exactly \" + repr(n) + \"eigenvalues, but has \" + repr(len(ev))\n\n #We want to compute:\n #sum over k: evk^t*(CM-ev1*I)*(CM-ev2*I)*. omit k * (CM-evn*I)/(evk-ev1)*.. omit k *(evk-evn)\n Char = [0]*n\n for k in range(n):\n #here is possibility for improvement of precision\n #to separate the fractional from the root parts\n #expanding the product\n Char[k] = CM - ev[k]*identity_matrix(n)\n\n #we want to have the first row of the product of the matrices\n #thatswhy we mulitply in front with:\n prod = vector(QQbar,[0,1]+[0]*(n-2))\n prodwo = [0]*n\n for k in range(n):\n prodwo[k]=prod #these are the first terms until k-1\n\n #no need to continue\n if k == n-1:\n break\n\n #and we add the terms starting with k+1\n for i in range(k+1,n):\n prodwo[k] = prodwo[k] * Char[i]\n\n prod = prod * Char[k]\n\n sprodwo = [0]*n\n for k in range(n):\n if k==0:\n sprodwo[k] = ev[k] - ev[1]\n start = 2\n else:\n sprodwo[k] = ev[k] - ev[0]\n start = 1\n\n for i in range(start,n):\n if i != k:\n sprodwo[k] = sprodwo[k] * (ev[k] - ev[i])\n\n for k in range(n):\n print ev[k]\n print prodwo[k][0]/sprodwo[k]\n print res_field\n #return lambda t: sum(res_field(ev[k])**t*res_field(prodwo[k][0]/sprodwo[k]) for k in range(n))\n return [ev,[prodwo[k][0]/sprodwo[k] for k in range(n)]]",
"def genPrimerPairs_5Ext(primer_length=20, anneal_length=10, GC_low=40, GC_high=60):\n\n print('Primers for 5\\' extension half-asstemers')\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n \"\"\"re.match checks if the first 2 Nuc are GC in the forward and backwards direction\"\"\"\n while not (re.match(\"[GC]{2}\",str(forwTemplate5_3)) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[::-1])) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[10:12]))):\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n\n forwTemp3_5 = forwTemplate5_3[::-1]\n forwPrimer5_3 = forwTemp3_5.complement()\n print(f\"Template Seq 3\\' - > 5\\': {forwTemp3_5}\")\n print(f\"ForwPrimer Seq 5\\' - > 3\\': {forwPrimer5_3}\")\n\n forwPrimer_f10 = forwPrimer5_3[:10]\n print(f\"First 10 Nucleotides of forward primer: {forwPrimer_f10}\")\n\n revPrimer_f10 = GenOligoGC(10,GC_low, GC_high)\n while not re.match(\"[GC]{2}\",str(revPrimer_f10)):\n revPrimer_f10 = GenOligoGC(10,GC_low, GC_high)\n\n revPrimer5_3 = revPrimer_f10 + forwPrimer_f10\n\n print(f\"RevPrimer Seq 5\\' - > 3\\': {revPrimer5_3}\")\n\n return forwPrimer5_3, revPrimer5_3",
"def elemental_descriptor(A1_ion, A2_ion, B_ion):\n ele_A1 = mg.Element(A1_ion)\n ele_A2 = mg.Element(A2_ion)\n ele_B = mg.Element(B_ion)\n ele_O = mg.Element('O') \n # A/B ion oxidation state \n common_oxidation_states_A1 = ele_A1.common_oxidation_states[0]\n common_oxidation_states_A2 = ele_A2.common_oxidation_states[0]\n common_oxidation_states_A = np.mean(common_oxidation_states_A1 + common_oxidation_states_A2)\n common_oxidation_states_B = ele_B.common_oxidation_states[0]\n # ionic radius property\n ionic_radius_A1 = float(str(ele_A1.average_ionic_radius)[:-4])\n ionic_radius_A2 = float(str(ele_A2.average_ionic_radius)[:-4])\n ionic_radius_A = (ionic_radius_A1+ ionic_radius_A2)/2\n ionic_radius_B = float(str(ele_B.average_ionic_radius)[:-4])\n ionic_radius_O = float(str(ele_O.average_ionic_radius)[:-4])\n # Tolerance factor \n TF = (ionic_radius_A + ionic_radius_O)/(np.sqrt(2)*(ionic_radius_B + ionic_radius_O))\n # Octahedral factor\n OF = ionic_radius_B/ionic_radius_O \n # ionic_radius ratios\n ionic_ration_AO = ionic_radius_A / ionic_radius_O\n ionic_ration_BO = ionic_radius_B / ionic_radius_O\n # averaged electronegativity for A and B atoms\n Pauling_electronegativity_A1 = ele_A1.X\n Pauling_electronegativity_A2 = ele_A2.X\n Pauling_electronegativity_A = (Pauling_electronegativity_A1 + Pauling_electronegativity_A2)/2\n Pauling_electronegativity_B = ele_B.X\n Pauling_electronegativity_O = ele_O.X\n # Difference in the electronegativity for A-O and B-O\n Diff_A_O = Pauling_electronegativity_A - Pauling_electronegativity_O\n Diff_B_O = Pauling_electronegativity_B - Pauling_electronegativity_O\n return [common_oxidation_states_A, common_oxidation_states_B, Pauling_electronegativity_A, Pauling_electronegativity_B, TF, OF, ionic_ration_AO, ionic_ration_BO, Diff_A_O, Diff_B_O]",
"def compute_hydration_energies(molecules, parameters):\n\n energies = dict() # energies[index] is the computed solvation energy of molecules[index]\n\n platform = openmm.Platform.getPlatformByName(\"Reference\")\n\n for molecule in molecules:\n # Create OpenMM System.\n system = openmm.System()\n for atom in molecule.GetAtoms():\n mass = OEGetDefaultMass(atom.GetAtomicNum())\n system.addParticle(mass * units.amu)\n\n # Add nonbonded term.\n # nonbonded_force = openmm.NonbondedSoftcoreForce()\n # nonbonded_force.setNonbondedMethod(openmm.NonbondedForce.NoCutoff)\n # for atom in molecule.GetAtoms():\n # charge = 0.0 * units.elementary_charge\n # sigma = 1.0 * units.angstrom\n # epsilon = 0.0 * units.kilocalories_per_mole\n # nonbonded_force.addParticle(charge, sigma, epsilon)\n # system.addForce(nonbonded_force)\n\n # Add GBVI term\n # gbvi_force = openmm.GBVISoftcoreForce()\n gbvi_force = openmm.GBVIForce() \n gbvi_force.setNonbondedMethod(openmm.GBVIForce.NoCutoff) # set no cutoff\n gbvi_force.setSoluteDielectric(1)\n gbvi_force.setSolventDielectric(78)\n\n # Use scaling method.\n # gbvi_force.setBornRadiusScalingMethod(openmm.GBVISoftcoreForce.QuinticSpline)\n # gbvi_force.setQuinticLowerLimitFactor(0.75)\n # gbvi_force.setQuinticUpperBornRadiusLimit(50.0*units.nanometers)\n\n # Build indexable list of atoms.\n atoms = [atom for atom in molecule.GetAtoms()] \n \n # Assign GB/VI parameters.\n for atom in molecule.GetAtoms(): \n atomtype = atom.GetStringData(\"gbvi_type\") # GBVI atomtype\n charge = atom.GetPartialCharge() * units.elementary_charge\n radius = parameters['%s_%s' % (atomtype, 'radius')] * units.angstroms\n gamma = parameters['%s_%s' % (atomtype, 'gamma')] * units.kilocalories_per_mole \n # gamma *= -1.0 # DEBUG\n lambda_ = 1.0 # fully interacting\n # gbvi_force.addParticle(charge, radius, gamma, lambda_) # for GBVISoftcoreForce\n gbvi_force.addParticle(charge, radius, gamma) # for GBVIForce\n\n # Add bonds.\n for bond in molecule.GetBonds():\n # Get atom indices.\n iatom = bond.GetBgnIdx()\n jatom = bond.GetEndIdx()\n # Get bond length.\n (xi, yi, zi) = molecule.GetCoords(atoms[iatom])\n (xj, yj, zj) = molecule.GetCoords(atoms[jatom])\n distance = math.sqrt((xi-xj)**2 + (yi-yj)**2 + (zi-zj)**2) * units.angstroms\n # Identify bonded atoms to GBVI.\n gbvi_force.addBond(iatom, jatom, distance)\n\n # Add the force to the system.\n system.addForce(gbvi_force)\n \n # Build coordinate array.\n natoms = len(atoms)\n coordinates = units.Quantity(numpy.zeros([natoms, 3]), units.angstroms)\n for (index,atom) in enumerate(atoms):\n (x,y,z) = molecule.GetCoords(atom)\n coordinates[index,:] = units.Quantity(numpy.array([x,y,z]),units.angstroms) \n \n # Create OpenMM Context.\n timestep = 1.0 * units.femtosecond # arbitrary\n integrator = openmm.VerletIntegrator(timestep)\n context = openmm.Context(system, integrator, platform)\n\n # Set the coordinates.\n context.setPositions(coordinates)\n \n # Get the energy\n state = context.getState(getEnergy=True)\n energies[molecule] = state.getPotentialEnergy()\n\n return energies",
"def geometric_descriptor(element_dict):\n # encode the orbital types\n category = {'s': 1, 'p': 2, 'd': 3, 'f': 4};\n # total number of atoms in a perovskite structure\n N = sum(element_dict.values())\n # obtain array of atomic properties for each element type\n atomic_number_list = []\n atomic_mass_list = []\n atomic_radius_list = []\n mendeleev_no_list = []\n common_oxidation_states_list = []\n Pauling_electronegativity_list = []\n row_list = []\n group_list = []\n block_list = []\n thermal_conductivity_list = []\n boiling_point_list = []\n melting_point_list = []\n average_ionic_radius_list = []\n molar_volume_list = []\n atomic_orbitals_list = []\n for item in element_dict:\n # extract atomic property from pymatgen\n ele = mg.Element(item)\n atomic_number = ele.Z\n atomic_mass = float(str(ele.atomic_mass)[:-4])\n atomic_radius = float(str(ele.atomic_radius)[:-4])\n mendeleev_no = ele.mendeleev_no\n common_oxidation_states = ele.common_oxidation_states[0]\n Pauling_electronegativity = ele.X\n row = ele.row\n group = ele.group\n block = ele.block\n thermal_conductivity = float(str(ele.thermal_conductivity)[:-12])\n boiling_point = float(str(ele.boiling_point)[: -2])\n melting_point = float(str(ele.melting_point)[: -2])\n average_ionic_radius = float(str(ele.average_ionic_radius)[:-4])\n molar_volume = float(str(ele.molar_volume)[: -5])\n if '6s' in ele.atomic_orbitals.keys():\n atomic_orbitals = ele.atomic_orbitals['6s']\n elif '4s' in ele.atomic_orbitals.keys():\n atomic_orbitals = ele.atomic_orbitals['4s']\n else:\n atomic_orbitals = ele.atomic_orbitals['2s']\n # calculate the array of atomic properties for all atoms \n atomic_number_list += [atomic_number]*element_dict[item]\n atomic_mass_list += [atomic_mass]*element_dict[item]\n atomic_radius_list += [atomic_radius]*element_dict[item]\n mendeleev_no_list += [mendeleev_no]*element_dict[item]\n common_oxidation_states_list += [common_oxidation_states]*element_dict[item]\n Pauling_electronegativity_list += [Pauling_electronegativity]*element_dict[item]\n row_list += [row]*element_dict[item]\n group_list += [group]*element_dict[item]\n block_list += [category[block]]*element_dict[item]\n thermal_conductivity_list += [thermal_conductivity]*element_dict[item]\n boiling_point_list += [boiling_point]*element_dict[item]\n melting_point_list += [melting_point]*element_dict[item]\n average_ionic_radius_list += [average_ionic_radius]*element_dict[item]\n molar_volume_list += [molar_volume]*element_dict[item]\n atomic_orbitals_list += [atomic_orbitals]*element_dict[item]\n return [generalized_mean(np.array(atomic_number_list), 1, N)] + [generalized_mean(np.array(atomic_radius_list), 1, N)] + [generalized_mean(np.array(mendeleev_no_list), 1, N)] + [generalized_mean(np.array(common_oxidation_states_list), 1, N)] + [generalized_mean(np.array(Pauling_electronegativity_list), 1, N)] + [generalized_mean(np.array(thermal_conductivity_list), 1, N)] + [generalized_mean(np.array(average_ionic_radius_list), 1, N)] + [generalized_mean(np.array(atomic_orbitals_list), 1, N)]",
"def calc_sim_collector(self, key, values):\r\n (rest1, rest2), common_ratings = key, values\r\n\t #your code here\r\n yield (rest1, rest2), (rho, n_common)",
"def specCoeffsInReaction(r):\n c = []\n for k in range(__data.nsp):\n nu = (__data.g.product_stoich_coeff(k,r) -\n __data.g.reactant_stoich_coeff(k,r))\n if (nu <> 0):\n c.append((__data.g.species_name(k),nu))\n return c",
"def lizardite_atom():\n\n rho = 2515.5\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 229.08; C[0,1] = 89.044; C[0,2] = 13.558; C[0,3] = -0.0001; C[0,4] = 4.6025; C[0,5] = 0.0001\n C[1,0] = C[0,1]; C[1,1] = 229.08; C[1,2] = 13.557; C[1,3] = -0.0001; C[1,4] = -4.6016; C[1,5] = 0.0001\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 45.838; C[2,3] = -0.0001; C[2,4] = 0.0015; C[2,5] = 0.0001\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 12.765; C[3,4] = -0.0001; C[3,5] = -4.4598\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 12.774; C[4,5] = 0.0001\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 70.0166\n\n return C, rho",
"def genPrimerPairs_3Ext(primer_length=20, anneal_length=10, GC_low=40, GC_high=60):\n\n print('Primers for 3\\' extension half-asstemers')\n\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n \"\"\"re.match checks if the first 2 Nuc are GC in the forward and backwards direction\"\"\"\n while not (re.match(\"[GC]{2}\",str(forwTemplate5_3)) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[::-1])) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[8:10]))):\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n\n forwTemp3_5 = forwTemplate5_3[::-1]\n forwPrimer5_3 = forwTemp3_5.complement()\n print(f\"Template Seq 3\\' - > 5\\': {forwTemp3_5}\")\n print(f\"ForwPrimer Seq 5\\' - > 3\\': {forwPrimer5_3}\")\n\n forwPrimer_L10 = forwPrimer5_3[10:]\n print(f\"Last 10 Nucleotides of forward primer: {forwPrimer_L10}\")\n\n revPrimer_L10 = GenOligoGC(10,GC_low, GC_high)\n while not re.match(\"[GC]{2}\",str(revPrimer_L10[::-1])):\n revPrimer_L10 = GenOligoGC(10,GC_low, GC_high)\n\n \"\"\"First 10 Nuc of rev primer must be identical to last 10 Nuc of forward Primer\"\"\"\n revPrimer5_3 = forwPrimer_L10 + revPrimer_L10\n\n print(f\"RevPrimer Seq 5\\' - > 3\\': {revPrimer5_3}\")\n\n return forwPrimer5_3, revPrimer5_3",
"def _aniso_forces_and_energies():\n # holds the forces, energies, and torques associated with an anisotropic\n # pair potential.\n FETtuple = namedtuple('FETtuple', [\n 'pair_potential', 'pair_potential_params', 'forces', 'energies',\n 'torques'\n ])\n\n path = Path(__file__).parent / \"aniso_forces_and_energies.json\"\n with path.open() as f:\n computations = json.load(f)\n fet_list = []\n for pot in computations:\n for i, params in enumerate(expand_dict(\n computations[pot][\"params\"])):\n fet_list.append(\n FETtuple(\n getattr(md.pair.aniso, pot),\n params,\n computations[pot][\"forces\"][i],\n computations[pot][\"energies\"][i],\n computations[pot][\"torques\"][i],\n ))\n return fet_list",
"def GetBonds(Bonds):\n b = sorted([(min(x), max(x)) for x in Bonds])\n Bonds13, Bonds14 = [], []\n for (a1,b1) in b:\n #check for bonds with a1 at the center of a 1-3 interaction,\n #letting b1 be the higher number of the two flanking\n clist = [b2 for (a2,b2) in b if a2 == a1 and b2 < b1] + \\\n [a2 for (a2,b2) in b if b2 == a1 and a2 < b1]\n Bonds13.extend([(min(c,b1), max(c,b1)) for c in clist])\n #check for bonds with b1 at the center of a 1-3 interaction,\n #letting a1 be the higher number of the two flanking\n clist = [b2 for (a2,b2) in b if a2 == b1 and b2 < a1] + \\\n [a2 for (a2,b2) in b if b2 == b1 and a2 < a1]\n Bonds13.extend([(min(c,a1), max(c,a1)) for c in clist])\n #find atoms connected to a1\n clist = [b2 for (a2,b2) in b if a1==a2 and not b1==b2] +\\\n [a2 for (a2,b2) in b if a1==b2 and not b1==a2]\n #find atoms connected to b1\n dlist = [a2 for (a2,b2) in b if b1==b2 and not a1==a2] +\\\n [b2 for (a2,b2) in b if b1==a2 and not a1==b2]\n Bonds14.extend([(min(c,d), max(c,d)) for c in clist for d in dlist])\n Bonds1213 = b + Bonds13\n #sort\n Bonds1213.sort()\n Bonds14.sort()\n #get unique values in case of loops\n Bonds1213 = [x for (i,x) in enumerate(Bonds1213) if i == 0 or x != Bonds1213[i-1]]\n Bonds14 = [x for (i,x) in enumerate(Bonds14) if i == 0 or x != Bonds14[i-1]]\n #convert to arrays \n Bonds1213 = array(Bonds1213, int)\n Bonds14 = array(Bonds14, int)\n return Bonds1213, Bonds14",
"def pdf_calculator_pair(initial_config_data,atom_type_1,atom_type_2):\n\tatom_1 = pd.DataFrame([])\n\tatom_2 = pd.DataFrame([])\n\t\n\tfor (index,atom) in initial_config_data.iterrows():\n\t\tif atom[\"atom_id\"] == atom_type_1:\n\t\t\tatom_1 = atom_1.append(atom)\n\t\tif atom[\"atom_id\"] == atom_type_2:\n\t\t\tatom_2 = atom_2.append(atom)\n\tfor (in_1, a1) in atom_1.iterrows():\n\t\tfor (in_2,a2) in atom_2.iterrows():\n\t\t\tat_1 = Atom.from_ds(a1)\n\t\t\tat_2 = Atom.from_ds(a2)\n\t\t\tAtom.distance_pbc(at_1, at_2)\n\t\t\t# to be continued using binning if necessary...",
"def _get_parameters(n, j, domain, g, ncap):\n alphas, betas = rc.recurrenceCoefficients(n - 2, lb=domain[0], rb=domain[1],\n j=j, g=g, ncap=ncap)\n omegas = g * np.array(alphas)\n ts = g * np.sqrt(np.array(betas)[1::])\n c0 = np.sqrt(betas[0])\n return omegas, ts, c0",
"def qssa(k11=5e-8, k=3.3e-10, k10=1e-6, mn=30):\n density = 1.42 * 1000 # the density of Delrin NC100, [g/dm3]\n cs = density / mn # reactant groups concentration, [mol/L]\n mass_loss = np.zeros_like(t)\n rs = np.zeros_like(t)\n acid_cons = np.array(c_water)\n for i in range(steps-1):\n # assume the reaction only happen with pH < 3 situation\n if ph[i] < 4:\n rs[i] = ((k10 * cs + k11) / (k11 + k + k10 * cs) - 1) * k10 * cs * acid_cons[i]\n acid_cons[i + 1] = acid_cons[i] - abs(rs[i] * t_step) + S_drop * abs(N1) / V_drop * t_step\n mass_loss[i+1] = mass_loss[i] + abs(rs[i] * t_step * mn)\n else:\n continue\n volume_loss = mass_loss /density\n pH_C = -np.log10(acid_cons) # the pH value with reaction consumption\n depth = volume_loss / S_rand # the penetration depth for single droplet, [dm]\n if t.shape[0] > 300000:\n # assume the HCl gas supplying will maintain until the pH is -1.\n try:\n place = np.where(acid_cons > 10)\n loc = int(place[0][0])\n except IndexError:\n loc = steps\n acid_cons[loc:] = acid_cons[loc-1]\n mass_loss[loc:] = mass_loss[loc-1]\n for i in range(loc-1, t.shape[0]-1):\n rs[i] = ((k10 * cs + k11) / (k11 + k + k10 * cs) - 1) * k10 * cs * acid_cons[i]\n acid_cons[i + 1] = acid_cons[i] - abs(rs[i] * t_step) # + S_drop * abs(N1) / V_drop * t_step\n mass_loss[i + 1] = mass_loss[i] + abs(rs[i] * t_step * mn)\n volume_loss = mass_loss / density\n pH_C = - np.log10(acid_cons) # the pH value with reaction consumption\n depth = volume_loss / S_rand # the penetration depth for single droplet\n rs[t.shape[0]-1] = rs[t.shape[0]-2]\n rs = abs(rs)\n return mass_loss, volume_loss, acid_cons, pH_C, depth, rs",
"def get_graph_charges_multiplicities(self, name: str, total_charge: int):\n import scine_readuct as readuct\n import scine_utilities as utils\n\n bond_orders = self.make_bond_orders_from_calc(self.systems, name)\n\n pbc_string = self.systems[name].settings.get(utils.settings_names.periodic_boundaries, \"\")\n masm_results = masm_helper.get_molecules_result(\n self.systems[name].structure,\n bond_orders,\n self.connectivity_settings,\n pbc_string,\n )\n\n split_structures = masm_results.component_map.apply(self.systems[name].structure)\n decision_lists = [masm_helper.get_decision_list_from_molecule(\n m, a) for m, a in zip(masm_results.molecules, split_structures)]\n\n # Get cbor graphs\n graphs = []\n for molecule in masm_results.molecules:\n graphs.append(masm_helper.get_cbor_graph_from_molecule(molecule))\n\n # Determine partial charges, charges per molecules and number of electrons per molecule\n bond_orders = self.make_bond_orders_from_calc(self.systems, name)\n partial_charges = self.systems[name].get_results().atomic_charges\n if partial_charges is None:\n self.systems, success = readuct.run_single_point_task(\n self.systems, [name], require_charges=True\n )\n self.throw_if_not_successful(\n success, self.systems, [name], [\"energy\", \"atomic_charges\"]\n )\n partial_charges = self.systems[name].get_results().atomic_charges\n self.systems[name].get_results().bond_orders = bond_orders\n\n charges, n_electrons, _ = self._integrate_charges(masm_results.component_map, partial_charges,\n split_structures, total_charge)\n\n # This assumes minimal multiplicity, product multiplicities are again checked later around this multiplicity\n multiplicities = [nel % 2 + 1 for nel in n_electrons]\n\n # Sort everything according to graphs and if these are equal according to charges and then multiplicities\n graphs, charges, multiplicities, decision_lists, structure_order = (\n list(start_val)\n for start_val in zip(*sorted(zip(\n graphs,\n charges,\n multiplicities,\n decision_lists,\n range(0, len(split_structures)))))\n )\n graph_string = \";\".join(graphs)\n\n ordered_structures = [split_structures[i] for i in structure_order]\n\n return ordered_structures, graph_string, charges, multiplicities, decision_lists",
"def get_imagecharge(latt_vec_array: tuple, charge: int, epsilon: float, cutoff: float, n: int =20, verbose=True, **kwargs) -> tuple:\n\n E1 = get_madelungenergy(latt_vec_array, charge=1e0, epsilon=1e0, cutoff=cutoff)\n E3 = -1.*thirdO(latt_vec_array, charge=1e0, n=n)\n\n if epsilon == 1e0:\n # epsilon==1e0, meaning vacuum \n print(\"epsilon=1e0, defect in vacuum. really!!\")\n return E1/epsilon\n else:\n scaled_E3 = E3*(1e0 - 1e0/epsilon)\n csh = E3/E1\n f = csh*(1e0 - 1e0/epsilon)\n \n E_ic = (E1 + scaled_E3) * charge * charge /epsilon\n \n if verbose == False:\n return E_ic\n else:\n return [\"{:0.3f}\".format(float(E1)), \"{:0.3f}\".format(float(E3)), \"{:0.3f}\".format(float(csh)) \\\n ,\"{:0.3f}\".format(float(f)), \"{:0.3f}\".format(float(E_ic))]",
"def test_get_atom_features(self):\n atom_features = np.array([[40, 41, 42, 43], [44, 45, 46, 47],\n [48, 49, 50, 51], [52, 53, 54, 55],\n [56, 57, 58, 59]])\n canon_adj_list = [[1, 2], [0, 3], [0, 3], [1, 2, 4], [3]]\n mol = ConvMol(atom_features, canon_adj_list)\n # atom 4 has 0 neighbors\n # atom 0 has 2 neighbors\n # atom 1 has 2 neighbors\n # atom 2 has 2 neighbors\n # atom 3 has 3 neighbors.\n # Verify that atom features have been sorted by atom degree.\n assert np.array_equal(\n mol.get_atom_features(),\n np.array([[56, 57, 58, 59], [40, 41, 42, 43], [44, 45, 46, 47],\n [48, 49, 50, 51], [52, 53, 54, 55]]))",
"def Mol_SO(Nat, multip, charge, sym, SO_3rdrow_mols_val): # number of atoms, multiplicity, charge, array of atoms in molecule, value of SO_3rdrow_mols (from orca.inp file)\n\n Mol_SO = 0\n \n # Special Case - Acetleyne - S\n if Nat == 4 and multip == 2 and charge == 1:\n countH_temp =0\n countC_temp =0\n for tmp in range(len(sym)):\n if sym[tmp] == \"H\":\n countH_temp= countH_temp +1\n if sym[tmp] == \"C\":\n countC_temp = countC_temp +1\n if countH_temp == 2 and countC_temp == 2:\n Mol_SO = -0.07 #-0.07d0\n \n with open(\"Thermochemistry.out\", \"a\") as ther_chem:\n ther_chem.write(\"DETECTED A C2H2+ SYSTEM: Using SO parameters for acetylene cation\\n\")\n ther_chem.write(\"Ref: JCP 114, 9287, 2001\\n\\n\")\n # Special Case - Acetleyne - E\n \n # For diatomics with multip = 2\n if Nat == 2 and multip == 2 :\n sort_sym = sorted(sym, reverse=True)\n if SO_3rdrow_mols_val == \"true\": # for 3rd_row elements\n \n if charge == 0:\n if sort_sym[0] == 'O' and sort_sym[1] == 'Br': Mol_SO=-2.20\n \n # COMMMENT: paper has it for cation, but it looks like it is for neutral\n if sort_sym[0] == 'Se' and sort_sym[1] == 'H': Mol_SO=-4.21\n \n if charge == +1: ### RECHECK what the values of charge is!!!!!!!!!!!!!!!!!!!!!IMPORTANT\n if sort_sym[0] == 'K' and sort_sym[1] == 'Br': Mol_SO=-2.99\n if sort_sym[0] == 'H' and sort_sym[1] == 'As': Mol_SO=-3.54\n if sort_sym[0] == 'H' and sort_sym[1] == 'Br': Mol_SO=-6.26\n if sort_sym[0] == 'F' and sort_sym[1] == 'Br': Mol_SO=-6.10\n if sort_sym[0] == 'Na' and sort_sym[1] == 'Br': Mol_SO=-3.93\n if sort_sym[0] == 'Br' and sort_sym[1] == 'Br': Mol_SO=-6.55\n \n else: # for non 3rd row elements, first and second rows\n if charge == 0:\n if sort_sym[0] == 'H' and sort_sym[1] == 'C': Mol_SO=-0.07\n if sort_sym[0] == 'O' and sort_sym[1] == 'H': Mol_SO=-0.30\n if sort_sym[0] == 'O' and sort_sym[1] == 'N': Mol_SO=-0.27\n if sort_sym[0] == 'O' and sort_sym[1] == 'Cl': Mol_SO=-0.61\n if sort_sym[0] == 'S' and sort_sym[1] == 'H': Mol_SO=-1.01\n if sort_sym[0] == 'P' and sort_sym[1] == 'O': Mol_SO=-0.53\n if sort_sym[0] == 'Si' and sort_sym[1] == 'H': Mol_SO=-0.34\n \n if charge == -1:\n if sort_sym[0] == 'N' and sort_sym[1] == 'H': Mol_SO=-0.12\n if sort_sym[0] == 'P' and sort_sym[1] == 'H': Mol_SO=-0.45\n if sort_sym[0] == 'O' and sort_sym[1] == 'O': Mol_SO=-0.34\n if sort_sym[0] == 'S' and sort_sym[1] == 'S': Mol_SO=-1.12\n \n if charge == +1:\n if sort_sym[0] == 'H' and sort_sym[1] == 'F': Mol_SO=-0.62\n if sort_sym[0] == 'P' and sort_sym[1] == 'H': Mol_SO=-0.67\n if sort_sym[0] == 'H' and sort_sym[1] == 'Cl': Mol_SO=-1.60\n if sort_sym[0] == 'N' and sort_sym[1] == 'N': Mol_SO=-0.17\n if sort_sym[0] == 'O' and sort_sym[1] == 'O': Mol_SO=-0.43\n if sort_sym[0] == 'P' and sort_sym[1] == 'P': Mol_SO=-0.57\n if sort_sym[0] == 'S' and sort_sym[1] == 'S': Mol_SO=-1.25\n if sort_sym[0] == 'Cl' and sort_sym[1] == 'Cl': Mol_SO=-1.77\n if sort_sym[0] == 'F' and sort_sym[1] == 'Cl': Mol_SO=-1.60\n \n return(Mol_SO)",
"def valence_terms(cls, topology):\n return [tuple(b.atoms) for b in topology.bonds]",
"def exo3_2(mu,x0,n,m):\r\n S = []\r\n valeur = x0\r\n for i in range(0,n+1):\r\n valeur = exo2_1(valeur,mu)\r\n S.append(valeur)\r\n for i in range(m,n):\r\n valeur = exo2_1(valeur,mu)\r\n S.append(valeur)\r\n return S",
"def E_step(X, pi, mu, sigma):\r\n N = X.shape[0] # number of objects\r\n C = pi.shape[0] # number of clusters\r\n d = X.shape[1] # dimension of each object\r\n gamma = np.zeros((N, C)) # distribution q(T)\r\n print(\"Hei\")\r\n ### YOUR CODE HERE\r\n for c in np.arange(0,C):\r\n for ix in np.arange(0,N):\r\n x = X[ix,:]\r\n xc = x - mu[c,:]\r\n sigmac = sigma[c,:,:]\r\n sigmacInv_xc = solve(a=sigmac, b= xc)\r\n exp_arg_c = -0.5*np.dot(xc , sigmacInv_xc)\r\n acc = 0.0\r\n for d in np.arange(0,C):\r\n xd = x - mu[d,:]\r\n sigmad = sigma[d,:,:]\r\n sigmadInv_xd = solve(a=sigmad, b= xd)\r\n exp_arg_d = -0.5*np.dot(xd, sigmadInv_xd)\r\n exp_diff = exp_arg_d - exp_arg_c\r\n acc = acc + (pi[d]/pi[c]) * np.sqrt(det(sigmad)/det(sigmac))*np.exp(exp_diff) \r\n gamma[ix,c] = 1/acc \r\n \r\n \r\n return gamma",
"def _build_pairs_for_eval(self):\n rec = list()\n for idx1 in range(len(self)):\n idx2, is_similar = self._get_sec_idx_and_is_similar(idx1)\n rec.append((idx2, is_similar))\n self._pairs_for_eval = rec",
"def Find_Lowest_Energy_Structure_Electrostatics(self):\n n_Na = self.structure.composition['Na']\n n_S = self.structure.composition['S']\n n_O = self.structure.composition['O']\n n_N = self.structure.composition['N']\n n_Fe = self.structure.composition['Fe']\n\n n_Fe_reduced = self.variable_magnetization_dict['Fe']['n_reduced']\n n_Fe_oxidized = n_Fe-n_Fe_reduced \n\n N_charge = ( 2.*n_O-6.*n_S-n_Na-3.*n_Fe_oxidized-2.*n_Fe_reduced )/n_N\n\n oxidation_states = {'Na':+1, 'Fe':+3, 'O':-2,'S':+6,'N':N_charge}\n Fe_2plus = pymatgen.Specie('Fe',oxidation_state=+2)\n\n structure_with_charges = self.structure.copy()\n structure_with_charges.add_oxidation_state_by_element(oxidation_states) \n\n # identify Fe sites\n list_Fe_indices = []\n for i,site in enumerate(structure_with_charges):\n if site.specie.symbol == 'Fe':\n list_Fe_indices.append(i)\n\n # Generate all possible permutation of sites and compute \n # Ewald energy\n ewald_model = EwaldElectrostaticModel(acc_factor=6)\n list_reduced_sets = []\n list_ewald_energy = []\n for reduced_set in itertools.combinations(list_Fe_indices,n_Fe_reduced):\n list_reduced_sets.append(reduced_set) \n\n struct = structure_with_charges.copy()\n for i in reduced_set:\n struct.replace(i, Fe_2plus)\n\n list_ewald_energy.append(ewald_model.get_energy(struct))\n\n if len(list_ewald_energy) == 0:\n # all sites are oxidized. No sorting involved \n list_reduced_site_indices = []\n list_oxidized_site_indices = list_Fe_indices\n else:\n # some reduction takes place. Identify best electrostatic choice\n\n imin = np.argmin(list_ewald_energy)\n\n list_reduced_site_indices = list_reduced_sets[imin] \n list_oxidized_site_indices = []\n for i in list_Fe_indices:\n if i not in list_reduced_site_indices: \n list_oxidized_site_indices.append(i) \n\n\n return list_reduced_site_indices, list_oxidized_site_indices",
"def exo6(mu,x0, n, m):\r\n liste=exo3_2(mu,x0,n,m)\r\n listem=[]\r\n listem1=[]\r\n \r\n for i in range(0,len(liste)-1):\r\n listem.append(liste[i])\r\n listem1.append(liste[i+1])\r\n \r\n listem.append(exo2_1(n,mu))\r\n listem1.append(exo2_1(n+1,mu))\r\n \r\n return listem,listem1 #\r"
] | [
"0.55804133",
"0.55528504",
"0.55493754",
"0.55289745",
"0.5501587",
"0.54684055",
"0.5379743",
"0.5365766",
"0.5360747",
"0.5359474",
"0.5317505",
"0.5289808",
"0.52889115",
"0.5276612",
"0.5248068",
"0.5214959",
"0.5205706",
"0.5186701",
"0.51694226",
"0.5148966",
"0.5148071",
"0.51289445",
"0.5123088",
"0.51210123",
"0.50955456",
"0.50812846",
"0.5063029",
"0.50594425",
"0.5054176",
"0.5050369"
] | 0.6232562 | 0 |
Return periodic boundary box beta angle and dimensions | def getBoxBetaAndDimensions(self):
beta=float(self._raw_data["BOX_DIMENSIONS"][0])
x=float(self._raw_data["BOX_DIMENSIONS"][1])
y=float(self._raw_data["BOX_DIMENSIONS"][2])
z=float(self._raw_data["BOX_DIMENSIONS"][3])
return (units.Quantity(beta, units.degree),
units.Quantity(x, units.angstrom),
units.Quantity(y, units.angstrom),
units.Quantity(z, units.angstrom)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def beta(self):\n eTheta = self.eTheta()\n cosOmg = np.cos(self.omega())\n return self.a1()/c.c*(1-eTheta**2)**0.5*cosOmg",
"def getBeta(self, alpha):\n return 2.0*(2.0-alpha) + -4.0*np.sqrt(1.0-alpha)",
"def Bimat(self):\n a, b, c, alpha, beta, gamma = self.lattice_parameters\n alpha = alpha * radians\n beta = beta * radians\n gamma = gamma * radians\n B23 = c*(np.cos(alpha)-np.cos(beta)*np.cos(gamma))/np.sin(gamma)\n B33 = np.sqrt(c**2-(c*np.cos(beta))**2-B23**2)\n return np.matrix(((a, b*np.cos(gamma), c*np.cos(beta)),\n (0, b*np.sin(gamma), B23),\n (0, 0, B33)))",
"def get_beta(self):\n\n return np.matmul(self.rotation_x, self.beta_z)",
"def beta_star(self):\n return self.reciprocal_lattice_parameters[4]",
"def get_lattice_spacing(beta):\n\n\tr0 = 0.5\n\tbeta_low = 5.7\n\tbeta_high = 6.57\n\n\tif np.any(beta < beta_low) or np.any(beta > beta_high):\n\t\traise Warning(\"Beta value of %f is outside of defined area [%f, %f].\"\n\t\t\t% (beta, beta_low, beta_high))\n\n\tdef _get_a(b):\n\t\t\"\"\"Gets the beta value without any error.\"\"\"\n\t\tbval = (b - 6.0)\n\t\t_a = np.exp(-1.6805 - 1.7139*bval + 0.8155*bval**2 - 0.6667*bval**3)*r0\n\t\treturn _a\n\n\ta = _get_a(beta)\n\ta_err_slope = ((0.6 - 0.3)/100.0)/(beta_high - beta_low) # err% / beta\n\ta_err_const = 0.3/100 - a_err_slope*beta_low\n\ta_err_percent = lambda _b: a_err_slope*_b + a_err_const\n\ta_err = a*a_err_percent(beta)\n\n\treturn a, a*a_err_percent(beta) # fermi",
"def get_bounds():\n bounds = [\n (0.1, 0.5), # Omega_m\n (0.05, 0.15) # beta\n ]\n return np.array(bounds)",
"def B(alpha: float, beta: float) -> float:\n return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)",
"def gabor_fn(sigma, theta, lambd, psi, gamma):\n\n sigma_x = sigma\n sigma_y = float(sigma) / gamma\n\n # Bounding box\n nstds = 3\n xmax = max(abs(nstds * sigma_x * np.cos(theta)),\n abs(nstds * sigma_y * np.sin(theta)))\n xmax = np.ceil(max(1, xmax))\n ymax = max(abs(nstds * sigma_x * np.sin(theta)),\n abs(nstds * sigma_y * np.cos(theta)))\n ymax = np.ceil(max(1, ymax))\n xmin = -xmax\n ymin = -ymax\n (y, x) = np.meshgrid(np.arange(ymin, ymax + 1), np.arange(xmin, xmax + 1))\n\n # Rotation\n x_theta = x * np.cos(theta) + y * np.sin(theta)\n y_theta = -x * np.sin(theta) + y * np.cos(theta)\n\n gb = np.exp(-.5 * (x_theta ** 2 / sigma_x ** 2 + y_theta ** 2 /\n sigma_y ** 2)) * np.cos(2 * np.pi / lambd * x_theta + psi)\n return gb",
"def B(alpha, beta):\n return math.gamma(apha) * math.gamma(beta) / math.gamma(alpha + beta)",
"def B(alpha, beta):\n return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)",
"def B(alpha, beta):\n return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)",
"def B(alpha, beta):\n return math.gamma(alpha) * math.gamma(beta) / math.gamma(alpha + beta)",
"def bounding_box(alpha):\n assert alpha.ndim == 2\n\n # Take the bounding box of the support, with a certain threshold.\n #print(\"Using alpha\", self.use_alpha, \"support\", self.support)\n supp_axs = [alpha.max(axis=1-i) for i in range(2)]\n\n th = 0.5 \n # Check first and last value of that threshold\n bb = [np.where(supp_axs[i] > th)[0][[0,-1]] for i in range(2)]\n\n # This bb looks like [(x0, x1), (y0, y1)], when we want it as (x0, y0, x1, y1)\n #psize = self.settings['subsample_size']\n #ret = (bb[0][0]/psize[0], bb[1][0]/psize[1], bb[0][1]/psize[0], bb[1][1]/psize[1])\n\n return (bb[0][0], bb[1][0], bb[0][1], bb[1][1])",
"def BetaVelocity(self):\n return np.linalg.norm(self.velocity) / const.speed_of_light",
"def get_box(ra0, ra1, dec0, dec1):\n\n box = np.array([[dec0, ra1], [dec1, ra0]]) * np.pi / 180\n\n return box",
"def _beta(self):\n return _handle_ab(self.solution, self.use_const)[1]",
"def betaT(self):\n if self.maTail > 1:\n return 0\n else:\n return sqrt(1 - self.maTail**2)",
"def test_beta_bounds(self):\n self.assertAlmostEqual(\n calculate_beta_bounds(10.0, 20.0, 40.0, 0.9)[0], 13.44239853)\n self.assertAlmostEqual(\n calculate_beta_bounds(10.0, 20.0, 40.0, 0.9)[1], 21.66666666)\n self.assertAlmostEqual(\n calculate_beta_bounds(10.0, 20.0, 40.0, 0.9)[2], 29.89093480)\n self.assertAlmostEqual(\n calculate_beta_bounds(10.0, 20.0, 40.0, 0.9)[3], 5.0)\n self.assertAlmostEqual(\n calculate_beta_bounds(10.0, 20.0, 40.0, 90.0)[0], 13.44239853)\n self.assertAlmostEqual(\n calculate_beta_bounds(10.0, 20.0, 40.0, 90.0)[1], 21.66666666)\n self.assertAlmostEqual(\n calculate_beta_bounds(10.0, 20.0, 40.0, 90.0)[2], 29.89093480)\n self.assertAlmostEqual(\n calculate_beta_bounds(10.0, 20.0, 40.0, 90.0)[3], 5.0)",
"def generate_pbc(self):\n s = \" - using 2D periodic boundaries -\"\n print_text(s, cls=self)\n\n xmin = MPI.min(mpi_comm_world(), self.mesh.coordinates()[:,0].min())\n xmax = MPI.max(mpi_comm_world(), self.mesh.coordinates()[:,0].max())\n ymin = MPI.min(mpi_comm_world(), self.mesh.coordinates()[:,1].min())\n ymax = MPI.max(mpi_comm_world(), self.mesh.coordinates()[:,1].max())\n \n self.use_periodic_boundaries = True\n \n class PeriodicBoundary(SubDomain):\n \n def inside(self, x, on_boundary):\n \"\"\"\n Return True if on left or bottom boundary AND NOT on one \n of the two corners (0, 1) and (1, 0).\n \"\"\"\n return bool((near(x[0], xmin) or near(x[1], ymin)) and \\\n (not ((near(x[0], xmin) and near(x[1], ymax)) \\\n or (near(x[0], xmax) and near(x[1], ymin)))) \\\n and on_boundary)\n\n def map(self, x, y):\n \"\"\"\n Remap the values on the top and right sides to the bottom and left\n sides.\n \"\"\"\n if near(x[0], xmax) and near(x[1], ymax):\n y[0] = x[0] - xmax\n y[1] = x[1] - ymax\n elif near(x[0], xmax):\n y[0] = x[0] - xmax\n y[1] = x[1]\n elif near(x[1], ymax):\n y[0] = x[0]\n y[1] = x[1] - ymax\n else:\n y[0] = x[0]\n y[1] = x[1]\n\n self.pBC = PeriodicBoundary()",
"def occupation_beta(self) -> np.ndarray:\n return np.asarray(self._occupation_beta)",
"def Bo_Bosol_calc(self):\n self.Bosol = (self.g*self.alpha * self.srflx ) \n #ZEROS FOR T3W APPLICATION\n self.Bo = np.zeros([self.b.shape[0]])",
"def getBeta(self):\n\t\treturn self.relativistic_beta",
"def get_alpha_beta_bounds(self,n=50):\n rho_temp = self.rho\n self.rho = self.rho_max\n beta = self.tau_plus(self.f1(self.rho_max),n)\n self.rho = self.rho_min\n alpha = self.tau(self.f0(self.rho_min),n)\n self.rho = rho_temp\n return alpha,beta",
"def beta_r(r):\n return 0.",
"def _get_alpha_beta(self):\n alpha = tf.nn.softplus(self.alpha_prime)\n beta = -alpha + tf.nn.softplus(self.beta_prime)\n return alpha, beta",
"def V_bonds(atoms):\n \n Vb = V_length(atoms) + V_angles(atoms)\n \n return Vb",
"def beta_r(r):\n return 1.",
"def get_bb_tpdm(self):\n _, dvecb = self.calculate_dvec_spin()\n beta_opdm = numpy.tensordot(dvecb, self.coeff.conj(), axes=2)\n nik_njl_bb = numpy.transpose(numpy.tensordot(dvecb.conj(),\n dvecb,\n axes=((2, 3), (2, 3))),\n axes=(1, 2, 0, 3))\n for ii in range(nik_njl_bb.shape[1]):\n nik_njl_bb[:, ii, ii, :] -= beta_opdm\n return beta_opdm, -nik_njl_bb",
"def get_beta_z(self):\n\n return np.matmul(self.sigma_zinv, self.sigma_zw)"
] | [
"0.65462947",
"0.6462211",
"0.6127845",
"0.6089902",
"0.6024211",
"0.59340787",
"0.5925929",
"0.5891913",
"0.5888504",
"0.58726263",
"0.58666295",
"0.58666295",
"0.58666295",
"0.58211255",
"0.580363",
"0.58013934",
"0.5800614",
"0.57164824",
"0.57111526",
"0.5701021",
"0.56932753",
"0.56694186",
"0.56573725",
"0.5650625",
"0.56212616",
"0.56159145",
"0.55661744",
"0.5542767",
"0.55416113",
"0.55326957"
] | 0.70399827 | 0 |
Create an OpenMM System from an Amber prmtop file. REQUIRED ARGUMENT topology (forcefield.Topology) The topology for the system that is about to be created ARGUMENTS (specify one or the other, but not both) prmtop_filename (String) name of Amber prmtop file (newstyle only) prmtop_loader (PrmtopLoader) the loaded prmtop file OPTIONAL ARGUMENTS | def readAmberSystem(topology, prmtop_filename=None, prmtop_loader=None, shake=None, gbmodel=None,
soluteDielectric=1.0, solventDielectric=78.5,
implicitSolventKappa=0.0*(1/units.nanometer), nonbondedCutoff=None,
nonbondedMethod='NoCutoff', scee=None, scnb=None, mm=None, verbose=False,
EwaldErrorTolerance=None, flexibleConstraints=True, rigidWater=True, elements=None,
gbsaModel='ACE'):
if prmtop_filename is None and prmtop_loader is None:
raise Exception("Must specify a filename or loader")
if prmtop_filename is not None and prmtop_loader is not None:
raise Exception("Cannot specify both a filename and a loader")
if prmtop_filename is not None:
# Load prmtop file.
if verbose: print("Reading prmtop file '%s'..." % prmtop_filename)
prmtop = PrmtopLoader(prmtop_filename)
else:
prmtop = prmtop_loader
if prmtop.getIfCap()>0:
raise Exception("CAP option not currently supported")
if prmtop.getIfPert()>0:
raise Exception("perturbation not currently supported")
if prmtop.has_scee_scnb and (scee is not None or scnb is not None):
warnings.warn("1-4 scaling parameters in topology file are being ignored. "
"This is not recommended unless you know what you are doing.")
if gbmodel is not None and gbsaModel not in ('ACE', None):
raise ValueError('gbsaModel must be ACE or None')
has_1264 = 'LENNARD_JONES_CCOEF' in prmtop._raw_data.keys()
if has_1264:
parm_ccoef = [float(x) for x in prmtop._raw_data['LENNARD_JONES_CCOEF']]
# Use pyopenmm implementation of OpenMM by default.
if mm is None:
mm = openmm
# Create OpenMM System.
if verbose: print("Creating OpenMM system...")
system = mm.System()
# Populate system with atomic masses.
if verbose: print("Adding particles...")
for mass in prmtop.getMasses():
system.addParticle(mass)
# Add constraints.
isWater = [prmtop.getResidueLabel(i) in ('WAT', 'HOH', 'TP4', 'TP5', 'T4E') for i in range(prmtop.getNumAtoms())]
isEP = [a.element is None for a in topology.atoms()]
if shake in ('h-bonds', 'all-bonds', 'h-angles'):
for (iAtom, jAtom, k, rMin) in prmtop.getBondsWithH():
if not (isEP[iAtom] or isEP[jAtom]):
system.addConstraint(iAtom, jAtom, rMin)
if shake in ('all-bonds', 'h-angles'):
for (iAtom, jAtom, k, rMin) in prmtop.getBondsNoH():
if not (isEP[iAtom] or isEP[jAtom]):
system.addConstraint(iAtom, jAtom, rMin)
if rigidWater and shake is None:
for (iAtom, jAtom, k, rMin) in prmtop.getBondsWithH():
if isWater[iAtom] and isWater[jAtom] and not (isEP[iAtom] or isEP[jAtom]):
system.addConstraint(iAtom, jAtom, rMin)
# Add harmonic bonds.
if verbose: print("Adding bonds...")
force = mm.HarmonicBondForce()
if flexibleConstraints or (shake not in ('h-bonds', 'all-bonds', 'h-angles')):
for (iAtom, jAtom, k, rMin) in prmtop.getBondsWithH():
if flexibleConstraints or not (rigidWater and isWater[iAtom] and isWater[jAtom]):
force.addBond(iAtom, jAtom, rMin, 2*k)
if flexibleConstraints or (shake not in ('all-bonds', 'h-angles')):
for (iAtom, jAtom, k, rMin) in prmtop.getBondsNoH():
force.addBond(iAtom, jAtom, rMin, 2*k)
system.addForce(force)
# Add Urey-Bradley terms.
if len(prmtop.getUreyBradleys()) > 0:
if verbose: print("Adding Urey-Bradley terms...")
force = mm.HarmonicBondForce()
force.setName('UreyBradleyForce')
for (iAtom, jAtom, k, rMin) in prmtop.getUreyBradleys():
force.addBond(iAtom, jAtom, rMin, 2*k)
system.addForce(force)
# Add harmonic angles.
if verbose: print("Adding angles...")
force = mm.HarmonicAngleForce()
if shake == 'h-angles':
numConstrainedBonds = system.getNumConstraints()
atomConstraints = [[]]*system.getNumParticles()
for i in range(numConstrainedBonds):
c = system.getConstraintParameters(i)
distance = c[2].value_in_unit(units.nanometer)
atomConstraints[c[0]].append((c[1], distance))
atomConstraints[c[1]].append((c[0], distance))
topatoms = list(topology.atoms())
for (iAtom, jAtom, kAtom, k, aMin) in prmtop.getAngles():
if shake == 'h-angles':
atomI = topatoms[iAtom]
atomJ = topatoms[jAtom]
atomK = topatoms[kAtom]
numH = ((atomI.element.atomic_number == 1) + (atomK.element.atomic_number == 1))
constrained = (numH == 2 or (numH == 1 and atomJ.element is elem.oxygen))
else:
constrained = False
if constrained:
# Find the two bonds that make this angle.
l1 = None
l2 = None
for bond in atomConstraints[jAtom]:
if bond[0] == iAtom:
l1 = bond[1]
elif bond[0] == kAtom:
l2 = bond[1]
# Compute the distance between atoms and add a constraint
length = sqrt(l1*l1 + l2*l2 - 2*l1*l2*cos(aMin))
system.addConstraint(iAtom, kAtom, length)
if flexibleConstraints or not constrained:
force.addAngle(iAtom, jAtom, kAtom, aMin, 2*k)
system.addForce(force)
# Add torsions.
if verbose: print("Adding torsions...")
force = mm.PeriodicTorsionForce()
for (iAtom, jAtom, kAtom, lAtom, forceConstant, phase, periodicity) in prmtop.getDihedrals():
force.addTorsion(iAtom, jAtom, kAtom, lAtom, periodicity, phase, forceConstant)
system.addForce(force)
# Add impropers.
if len(prmtop.getImpropers()) > 0:
if verbose: print("Adding impropers...")
force = mm.CustomTorsionForce(f'k*min(dtheta, 2*{pi}-dtheta)^2; dtheta = abs(theta-theta0)')
force.addPerTorsionParameter('k')
force.addPerTorsionParameter('theta0')
force.setName('ImproperTorsionForce')
for (iAtom, jAtom, kAtom, lAtom, forceConstant, phase) in prmtop.getImpropers():
force.addTorsion(iAtom, jAtom, kAtom, lAtom, (forceConstant, phase))
system.addForce(force)
# Add CMAP info.
## Get mapSize and Resolutions
numMap = prmtop.getNumMaps()
mapSize = prmtop.getCMAPResolutions()
if numMap > 0:
if verbose: print("Adding CMAPs...")
force = mm.CMAPTorsionForce()
### Get map energies
for field in range(numMap):
index = field + 1
ngrid = int(mapSize[field])
cmap = []
cmap_param = prmtop.getCMAPParameters(index)
forceConstConversionFactor = (units.kilocalorie_per_mole).conversion_factor_to(units.kilojoule_per_mole)
for i in range(ngrid):
for j in range(ngrid):
idx = ngrid*((j+ngrid//2)%ngrid)+((i+ngrid//2)%ngrid)
cmap.append(cmap_param[idx]*forceConstConversionFactor)
cmap = tuple(cmap)
force.addMap(ngrid, cmap)
#### Add CMAPtorsions.
if verbose: print("Adding CMAP torsions...")
for (Type, iAtom, jAtom, kAtom, lAtom, jAtom, kAtom, lAtom, mAtom) in prmtop.getCMAPDihedrals():
index=force.addTorsion(Type, iAtom, jAtom, kAtom, lAtom, jAtom, kAtom, lAtom, mAtom)
system.addForce(force)
# Add nonbonded interactions.
if verbose: print("Adding nonbonded interactions...")
force = mm.NonbondedForce()
if topology.getPeriodicBoxVectors() is None and prmtop.getIfBox() == 0:
# System is non-periodic.
if nonbondedMethod == 'NoCutoff':
force.setNonbondedMethod(mm.NonbondedForce.NoCutoff)
elif nonbondedMethod == 'CutoffNonPeriodic':
if nonbondedCutoff is None:
raise Exception("No cutoff value specified")
force.setNonbondedMethod(mm.NonbondedForce.CutoffNonPeriodic)
force.setCutoffDistance(nonbondedCutoff)
else:
raise Exception("Illegal nonbonded method for a non-periodic system")
else:
# System is periodic.
# Set periodic box vectors for periodic system
if topology.getPeriodicBoxVectors() is None:
(boxBeta, boxX, boxY, boxZ) = prmtop.getBoxBetaAndDimensions()
xVec, yVec, zVec = computePeriodicBoxVectors(boxX, boxY, boxZ, boxBeta, boxBeta, boxBeta)
system.setDefaultPeriodicBoxVectors(xVec, yVec, zVec)
else:
system.setDefaultPeriodicBoxVectors(*topology.getPeriodicBoxVectors())
# Set cutoff.
if nonbondedCutoff is None:
# Compute cutoff automatically.
min_box_width = min([boxX / units.nanometers, boxY / units.nanometers, boxZ / units.nanometers])
CLEARANCE_FACTOR = 0.97 # reduce the cutoff to be a bit smaller than 1/2 smallest box length
nonbondedCutoff = units.Quantity((min_box_width * CLEARANCE_FACTOR) / 2.0, units.nanometers)
if nonbondedMethod != 'NoCutoff':
force.setCutoffDistance(nonbondedCutoff)
# Set nonbonded method.
if nonbondedMethod == 'NoCutoff':
force.setNonbondedMethod(mm.NonbondedForce.NoCutoff)
elif nonbondedMethod == 'CutoffNonPeriodic':
force.setNonbondedMethod(mm.NonbondedForce.CutoffNonPeriodic)
elif nonbondedMethod == 'CutoffPeriodic':
force.setNonbondedMethod(mm.NonbondedForce.CutoffPeriodic)
elif nonbondedMethod == 'Ewald':
force.setNonbondedMethod(mm.NonbondedForce.Ewald)
elif nonbondedMethod == 'PME':
force.setNonbondedMethod(mm.NonbondedForce.PME)
elif nonbondedMethod == 'LJPME':
force.setNonbondedMethod(mm.NonbondedForce.LJPME)
else:
raise Exception("Cutoff method not understood.")
if EwaldErrorTolerance is not None:
force.setEwaldErrorTolerance(EwaldErrorTolerance)
# Add per-particle nonbonded parameters.
sigmaScale = 2**(-1./6.) * 2.0
nbfix = False
try:
nonbondTerms = prmtop.getNonbondTerms()
except NbfixPresent:
nbfix = True
for charge in prmtop.getCharges():
force.addParticle(charge, 1.0, 0.0)
numTypes = prmtop.getNumTypes()
parm_acoef = [float(x) for x in prmtop._raw_data['LENNARD_JONES_ACOEF']]
parm_bcoef = [float(x) for x in prmtop._raw_data['LENNARD_JONES_BCOEF']]
nbidx = [int(x) for x in prmtop._raw_data['NONBONDED_PARM_INDEX']]
acoef = [0 for i in range(numTypes*numTypes)]
bcoef = acoef[:] # copy
ene_conv = units.kilocalories_per_mole.conversion_factor_to(units.kilojoules_per_mole)
length_conv = units.angstroms.conversion_factor_to(units.nanometers)
afac = sqrt(ene_conv) * length_conv**6
bfac = ene_conv * length_conv**6
for i in range(numTypes):
for j in range(numTypes):
idx = nbidx[numTypes*i+j] - 1
if idx < 0: continue
acoef[i+numTypes*j] = sqrt(parm_acoef[idx]) * afac
bcoef[i+numTypes*j] = parm_bcoef[idx] * bfac
if has_1264:
cfac = ene_conv * length_conv**4
ccoef = [0 for i in range(numTypes*numTypes)]
for i in range(numTypes):
for j in range(numTypes):
idx = nbidx[numTypes*i+j] - 1
if idx < 0: continue
ccoef[i+numTypes*j] = parm_ccoef[idx] * cfac
cforce = mm.CustomNonbondedForce('(a/r6)^2-b/r6-c/r^4; r6=r^6;'
'a=acoef(type1, type2);'
'b=bcoef(type1, type2);'
'c=ccoef(type1, type2);')
else:
cforce = mm.CustomNonbondedForce('(a/r6)^2-b/r6; r6=r^6;'
'a=acoef(type1, type2);'
'b=bcoef(type1, type2);')
cforce.addTabulatedFunction('acoef',
mm.Discrete2DFunction(numTypes, numTypes, acoef))
cforce.addTabulatedFunction('bcoef',
mm.Discrete2DFunction(numTypes, numTypes, bcoef))
if has_1264:
cforce.addTabulatedFunction('ccoef',
mm.Discrete2DFunction(numTypes, numTypes, ccoef))
cforce.addPerParticleParameter('type')
for atom in prmtop._getAtomTypeIndexes():
cforce.addParticle((atom-1,))
else:
for (charge, (rVdw, epsilon)) in zip(prmtop.getCharges(), nonbondTerms):
sigma = rVdw * sigmaScale
force.addParticle(charge, sigma, epsilon)
if has_1264:
numTypes = prmtop.getNumTypes()
nbidx = [int(x) for x in prmtop._raw_data['NONBONDED_PARM_INDEX']]
ccoef = [0 for i in range(numTypes*numTypes)]
ene_conv = units.kilocalories_per_mole.conversion_factor_to(units.kilojoules_per_mole)
length_conv = units.angstroms.conversion_factor_to(units.nanometers)
cfac = ene_conv * length_conv**4
for i in range(numTypes):
for j in range(numTypes):
idx = nbidx[numTypes*i+j] - 1
if idx < 0: continue
ccoef[i+numTypes*j] = parm_ccoef[idx] * cfac
cforce = mm.CustomNonbondedForce('-c/r^4; c=ccoef(type1, type2)')
cforce.addTabulatedFunction('ccoef',
mm.Discrete2DFunction(numTypes, numTypes, ccoef))
cforce.addPerParticleParameter('type')
for atom in prmtop._getAtomTypeIndexes():
cforce.addParticle((atom-1,))
# Add 1-4 Interactions
excludedAtomPairs = set()
sigmaScale = 2**(-1./6.)
_scee, _scnb = scee, scnb
for (iAtom, lAtom, chargeProd, rMin, epsilon, iScee, iScnb) in prmtop.get14Interactions():
if scee is None: _scee = iScee
if scnb is None: _scnb = iScnb
chargeProd /= _scee
epsilon /= _scnb
sigma = rMin * sigmaScale
force.addException(iAtom, lAtom, chargeProd, sigma, epsilon)
excludedAtomPairs.add(min((iAtom, lAtom), (lAtom, iAtom)))
# Add Excluded Atoms
excludedAtoms=prmtop.getExcludedAtoms()
excludeParams = (0.0, 0.1, 0.0)
for iAtom in range(prmtop.getNumAtoms()):
for jAtom in excludedAtoms[iAtom]:
if min((iAtom, jAtom), (jAtom, iAtom)) in excludedAtomPairs: continue
force.addException(iAtom, jAtom, excludeParams[0], excludeParams[1], excludeParams[2])
# Copy the exceptions as exclusions to the CustomNonbondedForce if we have
# NBFIX terms
if nbfix or has_1264:
for i in range(force.getNumExceptions()):
ii, jj, chg, sig, eps = force.getExceptionParameters(i)
cforce.addExclusion(ii, jj)
# Now set the various properties based on the NonbondedForce object
if nonbondedMethod in ('PME', 'LJPME', 'Ewald', 'CutoffPeriodic'):
cforce.setNonbondedMethod(cforce.CutoffPeriodic)
cforce.setCutoffDistance(nonbondedCutoff)
cforce.setUseLongRangeCorrection(True)
elif nonbondedMethod == 'CutoffNonPeriodic':
cforce.setNonbondedMethod(cforce.CutoffNonPeriodic)
cforce.setCutoffDistance(nonbondedCutoff)
elif nonbondedMethod == 'NoCutoff':
cforce.setNonbondedMethod(cforce.NoCutoff)
else:
raise ValueError('Unrecognized cutoff option %s' % nonbondedMethod)
# Add this force to the system
system.addForce(cforce)
system.addForce(force)
# Add virtual sites for water.
epNames = ['EP', 'LP']
ep = [i for i in range(prmtop.getNumAtoms()) if isWater[i] and prmtop.getAtomName(i)[:2] in epNames]
if len(ep) > 0:
epRes = set((prmtop.getResidueNumber(i) for i in ep))
numRes = max(epRes)+1
# For each residue that contains an "extra point", find the oxygen, hydrogens, and points.
waterO = []
waterH = []
waterEP = []
for i in range(numRes):
waterO.append([])
waterH.append([])
waterEP.append([])
for i in range(prmtop.getNumAtoms()):
res = prmtop.getResidueNumber(i)
if res in epRes:
name = prmtop.getAtomName(i)
if name[0] == 'O':
waterO[res].append(i)
if name[0] == 'H':
waterH[res].append(i)
if name[:2] in epNames:
waterEP[res].append(i)
# Record bond lengths for faster access.
distOH = [None]*numRes
distHH = [None]*numRes
distOE = [None]*numRes
for (atom1, atom2, k, dist) in prmtop.getBondsWithH()+prmtop.getBondsNoH():
res = prmtop.getResidueNumber(atom1)
if res in epRes:
name1 = prmtop.getAtomName(atom1)
name2 = prmtop.getAtomName(atom2)
if name1[0] == 'H' or name2[0] == 'H':
if name1[0] == 'H' and name2[0] == 'H':
distHH[res] = dist
if name1[0] == 'O' or name2[0] == 'O':
distOH[res] = dist
elif (name1[0] == 'O' or name2[0] == 'O') and ((name1[:2] in epNames or name2[:2] in epNames)):
distOE[res] = dist
# Loop over residues and add the virtual sites.
outOfPlaneAngle = 54.735*units.degree
cosOOP = units.cos(outOfPlaneAngle)
sinOOP = units.sin(outOfPlaneAngle)
for res in range(numRes):
if len(waterO[res]) == 1 and len(waterH[res]) == 2:
if len(waterEP[res]) == 1:
# Four point water
weightH = distOE[res]/sqrt(distOH[res]**2-(0.5*distHH[res])**2)
system.setVirtualSite(waterEP[res][0], mm.ThreeParticleAverageSite(waterO[res][0], waterH[res][0], waterH[res][1], 1-weightH, weightH/2, weightH/2))
elif len(waterEP[res]) == 2:
# Five point water
weightH = cosOOP*distOE[res]/sqrt(distOH[res]**2-(0.5*distHH[res])**2)
angleHOH = 2*asin(0.5*distHH[res]/distOH[res])
lenCross = (distOH[res]**2)*sin(angleHOH)
weightCross = sinOOP*distOE[res]/lenCross
system.setVirtualSite(waterEP[res][0], mm.OutOfPlaneSite(waterO[res][0], waterH[res][0], waterH[res][1], weightH/2, weightH/2, weightCross))
system.setVirtualSite(waterEP[res][1], mm.OutOfPlaneSite(waterO[res][0], waterH[res][0], waterH[res][1], weightH/2, weightH/2, -weightCross))
# Add GBSA model.
if gbmodel is not None:
# Convert implicitSolventKappa to nanometers if it is a unit.
if units.is_quantity(implicitSolventKappa):
implicitSolventKappa = implicitSolventKappa.value_in_unit((1/units.nanometers).unit)
if verbose: print("Adding GB parameters...")
charges = prmtop.getCharges()
cutoff = None
if nonbondedMethod != 'NoCutoff':
cutoff = nonbondedCutoff
if units.is_quantity(cutoff):
cutoff = cutoff.value_in_unit(units.nanometers)
if gbmodel == 'HCT':
gb = customgb.GBSAHCTForce(solventDielectric, soluteDielectric, gbsaModel, cutoff, implicitSolventKappa)
elif gbmodel == 'OBC1':
gb = customgb.GBSAOBC1Force(solventDielectric, soluteDielectric, gbsaModel, cutoff, implicitSolventKappa)
elif gbmodel == 'OBC2':
if implicitSolventKappa > 0:
gb = customgb.GBSAOBC2Force(solventDielectric, soluteDielectric, gbsaModel, cutoff, implicitSolventKappa)
else:
gb = mm.GBSAOBCForce()
gb.setSoluteDielectric(soluteDielectric)
gb.setSolventDielectric(solventDielectric)
if gbsaModel is None:
gb.setSurfaceAreaEnergy(0)
elif gbmodel == 'GBn':
gb = customgb.GBSAGBnForce(solventDielectric, soluteDielectric, gbsaModel, cutoff, implicitSolventKappa)
elif gbmodel == 'GBn2':
gb = customgb.GBSAGBn2Force(solventDielectric, soluteDielectric, gbsaModel, cutoff, implicitSolventKappa)
else:
raise ValueError("Illegal value specified for implicit solvent model")
if isinstance(gb, mm.GBSAOBCForce):
# Built-in GBSAOBCForce does not have getStandardParameters, so use
# the one from the equivalent CustomGBForce
gb_parms = customgb.GBSAOBC2Force.getStandardParameters(topology)
else:
gb_parms = type(gb).getStandardParameters(topology)
# Replace radii and screen, but screen *only* gets replaced by the
# prmtop contents for HCT, OBC1, and OBC2. GBn and GBn2 both override
# the prmtop screen factors from LEaP in sander and pmemd
if gbmodel in ('HCT', 'OBC1', 'OBC2'):
screen = [float(s) for s in prmtop._raw_data['SCREEN']]
else:
screen = [gb_parm[1] for gb_parm in gb_parms]
radii = [float(r)/10 for r in prmtop._raw_data['RADII']]
warned = False
for i, (r, s) in enumerate(zip(radii, screen)):
if abs(r - gb_parms[i][0]) > 1e-4 or abs(s - gb_parms[i][1]) > 1e-4:
if not warned:
warnings.warn(
'Non-optimal GB parameters detected for GB model %s' % gbmodel)
warned = True
gb_parms[i][0], gb_parms[i][1] = r, s
for charge, gb_parm in zip(charges, gb_parms):
if gbmodel == 'OBC2' and implicitSolventKappa == 0:
gb.addParticle(charge, gb_parm[0], gb_parm[1])
elif gbmodel == 'GBn2':
gb.addParticle([charge, gb_parm[0], gb_parm[1],
gb_parm[2], gb_parm[3], gb_parm[4]])
else:
gb.addParticle([charge, gb_parm[0], gb_parm[1]])
# OBC2 with kappa == 0 uses mm.GBSAOBC2Force, which doesn't have
# a finalize method
if not (gbmodel == 'OBC2' and implicitSolventKappa == 0.):
gb.finalize()
system.addForce(gb)
if nonbondedMethod == 'NoCutoff':
gb.setNonbondedMethod(mm.NonbondedForce.NoCutoff)
elif nonbondedMethod == 'CutoffNonPeriodic':
gb.setNonbondedMethod(mm.NonbondedForce.CutoffNonPeriodic)
gb.setCutoffDistance(nonbondedCutoff)
elif nonbondedMethod == 'CutoffPeriodic':
gb.setNonbondedMethod(mm.NonbondedForce.CutoffPeriodic)
gb.setCutoffDistance(nonbondedCutoff)
else:
raise Exception("Illegal nonbonded method for use with GBSA")
# This applies the reaction field dielectric to the NonbondedForce
# created above. Do not bind force to another name before this!
force.setReactionFieldDielectric(1.0)
return system | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_top(openff_sys: \"System\", file_path: Union[Path, str]):\n if isinstance(file_path, str):\n path = Path(file_path)\n if isinstance(file_path, Path):\n path = file_path\n\n with open(path, \"w\") as top_file:\n top_file.write(\"; Generated by OpenFF System\\n\")\n _write_top_defaults(openff_sys, top_file)\n typemap = _build_typemap(openff_sys)\n _write_atomtypes(openff_sys, top_file, typemap)\n # TODO: Write [ nonbond_params ] section\n molecule_map = _build_molecule_map(openff_sys.topology)\n for mol_name, mol_data in molecule_map.items():\n # If the molecule is water ...\n if mol_data[\"reference_molecule\"].is_isomorphic_with(\n Molecule.from_smiles(\"O\")\n ):\n # ... do special water stuff\n pass\n _write_moleculetype(top_file, mol_name)\n _write_atoms(top_file, mol_name, mol_data, openff_sys, typemap)\n _write_valence(top_file, mol_name, mol_data, openff_sys, typemap)\n # _write_valence(openff_sys, top_file)\n _write_system(top_file, molecule_map)",
"def openmm_system(self):\n\n # Load the initial coords into the system and initialise\n pdb = app.PDBFile(self.pdb)\n forcefield = app.ForceField(self.xml)\n modeller = app.Modeller(pdb.topology, pdb.positions) # set the initial positions from the pdb\n self.system = forcefield.createSystem(modeller.topology, nonbondedMethod=app.NoCutoff, constraints=None)\n\n # Check what combination rule we should be using from the xml\n xmlstr = open(self.xml).read()\n # check if we have opls combination rules if the xml is present\n try:\n self.combination = ET.fromstring(xmlstr).find('NonbondedForce').attrib['combination']\n except AttributeError:\n pass\n except KeyError:\n pass\n\n if self.combination == 'opls':\n print('OPLS combination rules found in xml file')\n self.opls_lj()\n\n temperature = constants.STP * unit.kelvin\n integrator = mm.LangevinIntegrator(temperature, 5 / unit.picoseconds, 0.001 * unit.picoseconds)\n\n self.simulation = app.Simulation(modeller.topology, self.system, integrator)\n self.simulation.context.setPositions(modeller.positions)",
"def to_omnetpp(topology, path=None):\n try:\n from mako.template import Template\n except ImportError:\n raise ImportError('Cannot import mako.template module. '\n 'Make sure mako is installed on this machine.')\n set_delays = True\n set_capacities = True\n # Check correctness of capacity and delay attributes\n if not 'capacity_unit' in topology.graph or not topology.graph['capacity_unit'] in capacity_units:\n warn('Missing or invalid capacity unit attribute in the topology. The '\n 'output file will be generated without link capacity attributes.')\n set_capacities = False\n if not 'delay_unit' in topology.graph or not topology.graph['delay_unit'] in time_units:\n warn('Missing or invalid delay unit attribute in the topology. The '\n 'output file will be generated without link delay attributes.')\n set_delays = False\n template = Template(__TEMPLATE)\n variables = {\n 'topology': topology,\n 'set_capacities': set_capacities,\n 'set_delays': set_delays,\n }\n ned = template.render(**variables)\n if path:\n with open(path, \"w\") as out:\n out.write(ned)\n else:\n print(ned)",
"def setup_system(filename):\n pdb = PDBFile(filename)\n forcefield = ForceField('amber14-all.xml', 'amber14/tip3pfb.xml')\n\n #box vectors from charmm-gui files:\n pdb.topology.setPeriodicBoxVectors((Vec3(5.75760367, 0.0, 0.0),\n Vec3(0, 5.75760367, 0.0),\n Vec3(0.0, 0.0, 6.0)))\n system = forcefield.createSystem(pdb.topology, nonbondedMethod=PME,\n nonbondedCutoff=1*nanometer, constraints=HBonds)\n barostat = MonteCarloMembraneBarostat(1*bar, 200*bar*nanometer, 300*kelvin,\n MonteCarloMembraneBarostat.XYIsotropic, MonteCarloMembraneBarostat.ZFree)\n system.addForce(barostat)\n print('Created system')\n return system, pdb",
"def build_from_file(self, topology_file, topology_format):\n with open(topology_file) as infile:\n for line in infile:\n if line.startswith(\"#\"):\n continue\n else:\n if topology_format == 0:\n x = line.split(\"\\n\")[0].split(\"|\")\n as1 = int(x[0])\n as2 = int(x[1])\n relationship = int(x[2])\n else:\n x = line.split(\"\\n\")[0].split(\"\\t\")\n if x[2] == \"p2c\":\n as1 = int(x[0])\n as2 = int(x[1])\n relationship = -1\n elif x[2] == \"c2p\":\n as1 = int(x[1])\n as2 = int(x[0])\n relationship = -1\n elif x[2] == \"p2p\":\n as1 = int(x[1])\n as2 = int(x[0])\n relationship = 0\n else:\n continue\n\n if not self.has_edge(as1, as2):\n self.add_edge(as1, as2, relationship=relationship, as1=as1, as2=as2)",
"def load_system(filename, args, prefix, path):\n try:\n # Open file and do parameter substitution\n doc = substitute(filename, args)\n except ParseBaseException as e:\n print()\n print(\"Parsing error in system:\", filename)\n print(e)\n sys.exit(1)\n \n try:\n # Load data\n declare, statements = document.parseString(doc, parseAll=True)\n except ParseBaseException as e:\n print()\n print_linenums(doc)\n print(\"Parsing error in system:\", filename)\n print(e)\n sys.exit(1)\n \n x, name, params, inputs, outputs = declare\n # Build data\n system = System(path, name, prefix, params)\n for stat in statements:\n #print list(stat)\n if stat[0] == import_:\n system.add_import(*stat[1:])\n elif stat[0] == component:\n system.add_component(*stat[1:])\n else:\n raise Exception(\"Unexpected statement:\\n%s\" % stat)\n system.add_IO(inputs, outputs)\n return system",
"def build_topology(config):\n top_type = config['Metapopulation']['topology']\n assert top_type.lower() in ['moore', 'vonneumann', 'smallworld',\n 'complete', 'regular']\n\n if top_type.lower() == 'moore':\n return build_topology_moore(config)\n elif top_type.lower() == 'vonneumann':\n return build_topology_vonneumann(config)\n elif top_type.lower() == 'smallworld':\n return build_topology_smallworld(config)\n elif top_type.lower() == 'complete':\n return build_topology_complete(config)\n elif top_type.lower() == 'regular':\n return build_topology_regular(config)",
"def _setup_topology(self, topology_file):\n try:\n with open(topology_file, \"r\") as top_file:\n try:\n self.topology = json.load(top_file)\n except json.JSONDecodeError:\n raise RuntimeError(\"Could not parse the json file: {}\".format(topology_file))\n except FileNotFoundError:\n raise FileNotFoundError(\"Could not find the file specifying the topology:\" \" {}\".format(topology_file))\n except IsADirectoryError:\n raise FileNotFoundError(\"Could not find the file specifying the topology: \" \"{}\".format(topology_file))",
"def build(obs_space: Box, action_space: Box, spec: Spec) -> MLPModel:\n model = MLPModel(obs_space, action_space, spec.network)\n model.initialize_parameters(spec.initializer)\n if spec.residual:\n model = ResidualStochasticModel(model)\n return model",
"def load(data, pbc=(True, True, True), symbols=None, atom_style=None,\n units='metal'):\n\n # First pass over file to generate system and locate content\n system, params = firstpass(data, pbc, symbols, units)\n\n # Set atom_style\n if atom_style is None:\n \n # Use default if no value given/found\n if params['atom_style'] is None:\n atom_style = 'atomic'\n \n # Use value listed in data\n else:\n atom_style = params['atom_style']\n \n # Check that atom_style matches the one in data\n elif params['atom_style'] is not None and atom_style != params['atom_style']:\n raise ValueError(f'given atom_style of {atom_style} differs from value of {params[\"atom_style\"]} found in data')\n \n # Read in Atoms info\n system = read_atoms(data, system, atom_style, units, params['atomsstart'], params['atomscolumns'])\n \n # Read in Velocities info\n system = read_velocities(data, system, atom_style, units, params['velocitiesstart'])\n \n return system",
"def openmmTop_to_oemol(topology, positions, verbose=False):\n\n # Create an empty OEMol\n oe_mol = oechem.OEMol()\n\n # Mapping dictionary between openmm atoms and oe atoms\n openmm_atom_to_oe_atom = {}\n\n # Python set used to identify atoms that are not in protein residues\n keep = set(proteinResidues).union(dnaResidues).union(rnaResidues)\n\n for chain in topology.chains():\n for res in chain.residues():\n # Create an OEResidue\n oe_res = oechem.OEResidue()\n # Set OEResidue name\n oe_res.SetName(res.name)\n # If the atom is not a protein atom then set its heteroatom\n # flag to True\n if res.name not in keep:\n oe_res.SetFragmentNumber(chain.index + 1)\n oe_res.SetHetAtom(True)\n # Set OEResidue Chain ID\n oe_res.SetChainID(chain.id)\n # res_idx = int(res.id) - chain.index * len(chain._residues)\n # Set OEResidue number\n oe_res.SetResidueNumber(int(res.id))\n\n for openmm_at in res.atoms():\n # Create an OEAtom based on the atomic number\n oe_atom = oe_mol.NewAtom(openmm_at.element._atomic_number)\n # Set atom name\n oe_atom.SetName(openmm_at.name)\n # Set Symbol\n oe_atom.SetType(openmm_at.element.symbol)\n # Set Atom index\n oe_res.SetSerialNumber(openmm_at.index + 1)\n # Commit the changes\n oechem.OEAtomSetResidue(oe_atom, oe_res)\n # Update the dictionary OpenMM to OE\n openmm_atom_to_oe_atom[openmm_at] = oe_atom\n\n if topology.getNumAtoms() != oe_mol.NumAtoms():\n raise ValueError(\"OpenMM topology and OEMol number of atoms mismatching: \"\n \"OpenMM = {} vs OEMol = {}\".format(topology.getNumAtoms(), oe_mol.NumAtoms()))\n\n # Count the number of bonds in the openmm topology\n omm_bond_count = 0\n\n # Create the bonds\n for omm_bond in topology.bonds():\n\n omm_bond_count += 1\n\n at0 = omm_bond[0]\n at1 = omm_bond[1]\n\n oe_bond_order = omm_bond.order\n\n # If bond order info are not present set the bond order temporary to one\n if not omm_bond.order:\n oe_bond_order = 1\n\n # OE atoms\n oe_atom0 = openmm_atom_to_oe_atom[at0]\n oe_atom1 = openmm_atom_to_oe_atom[at1]\n\n # Create the bond\n oe_bond = oe_mol.NewBond(oe_atom0, oe_atom1, oe_bond_order)\n\n if omm_bond.type:\n if omm_bond.type == 'Aromatic':\n oe_atom0.SetAromatic(True)\n oe_atom1.SetAromatic(True)\n oe_bond.SetAromatic(True)\n oe_bond.SetType(\"Aromatic\")\n elif omm_bond.type in [\"Single\", \"Double\", \"Triple\", \"Amide\"]:\n oe_bond.SetType(omm_bond.type)\n else:\n oe_bond.SetType(\"\")\n\n if omm_bond_count != oe_mol.NumBonds():\n raise ValueError(\"OpenMM topology and OEMol number of bonds mismatching: \"\n \"OpenMM = {} vs OEMol = {}\".format(omm_bond_count, oe_mol.NumBonds()))\n\n # Set the OEMol positions\n pos = positions.in_units_of(unit.angstrom) / unit.angstrom\n pos = list(itertools.chain.from_iterable(pos))\n oe_mol.SetCoords(pos)\n oechem.OESetDimensionFromCoords(oe_mol)\n\n return oe_mol",
"def create_om_problem(prob):\n ivc = om.IndepVarComp()\n\n # Add subsystems to problem ##\n add_subsystems(prob, ivc)\n\n # Defining problem parameters ##\n add_parameters(prob, ivc)\n\n # Setting up the problem options ##\n driver_setup(prob)\n\n # Setup the model hierarchy for OpenMDAO ##\n prob.setup()",
"def read_gro(filename):\n top = Topology()\n\n with open(filename, \"r\") as gro_file:\n top.name = str(gro_file.readline().strip())\n n_atoms = int(gro_file.readline())\n coords = u.nm * np.zeros(shape=(n_atoms, 3))\n for row, _ in enumerate(coords):\n line = gro_file.readline()\n content = line.split()\n if not line:\n msg = (\n \"Incorrect number of lines in .gro file. Based on the \"\n \"number in the second line of the file, {} rows of\"\n \"atoms were expected, but at least one fewer was found.\"\n )\n raise ValueError(msg.format(n_atoms))\n\n res = content[0]\n atom_name = content[1]\n atom_id = content[2]\n coords[row] = u.nm * np.array(\n [\n float(content[3]),\n float(content[4]),\n float(content[5]),\n ]\n )\n site = Atom(name=atom_name, position=coords[row])\n\n r = re.compile(\"([0-9]+)([a-zA-Z]+)\")\n m = r.match(res)\n site.molecule = (m.group(2), int(m.group(1)))\n site.residue = (m.group(2), int(m.group(1)))\n top.add_site(site, update_types=False)\n top.update_topology()\n\n # Box information\n line = gro_file.readline().split()\n top.box = Box(u.nm * np.array([float(val) for val in line[:3]]))\n\n # Verify we have read the last line by ensuring the next line in blank\n line = gro_file.readline()\n if line:\n msg = (\n \"Incorrect number of lines in input file. Based on the \"\n \"number in the second line of the file, {} rows of atoms \"\n \"were expected, but at least one more was found.\"\n )\n raise ValueError(msg.format(n_atoms))\n\n return top",
"def generate_testsystem(smiles = 'CCCC',\n forcefield_files = ['amber14/protein.ff14SB.xml', 'amber14/tip3p.xml'],\n forcefield_kwargs = {'removeCMMotion': False, 'ewaldErrorTolerance': 1e-4, 'constraints' : None, 'hydrogenMass' : 4 * unit.amus},\n nonperiodic_forcefield_kwargs = {'nonbondedMethod': app.NoCutoff},\n periodic_forcefield_kwargs = {'nonbondedMethod': app.PME},\n small_molecule_forcefield = 'gaff-2.11',\n padding=9*unit.angstroms,\n ionicStrength=0.0*unit.molar,\n water_model = 'tip3p',\n pressure = 1.0 * unit.atmosphere,\n temperature = 300 * unit.kelvin,\n barostat_period = 50,\n **kwargs\n ):\n from openforcefield.topology import Molecule\n from perses.utils.openeye import smiles_to_oemol\n from openmmforcefields.generators.system_generators import SystemGenerator\n from perses.utils.openeye import OEMol_to_omm_ff\n from simtk import openmm\n from qmlify.utils import pull_force_by_name\n\n oemol = smiles_to_oemol(smiles)\n off_molecules = [Molecule.from_openeye(oemol)]\n vac_system_generator = SystemGenerator(forcefields=forcefield_files,\n small_molecule_forcefield=small_molecule_forcefield,\n forcefield_kwargs=forcefield_kwargs,\n nonperiodic_forcefield_kwargs = nonperiodic_forcefield_kwargs, molecules = off_molecules)\n barostat = openmm.MonteCarloBarostat(pressure, temperature, barostat_period)\n sol_system_generator = SystemGenerator(forcefields=forcefield_files,\n small_molecule_forcefield=small_molecule_forcefield,\n forcefield_kwargs=forcefield_kwargs,\n periodic_forcefield_kwargs = periodic_forcefield_kwargs,\n molecules = off_molecules,\n barostat = barostat)\n\n\n vac_system, vac_positions, vac_topology = OEMol_to_omm_ff(oemol, vac_system_generator)\n\n #now i can attempt to solvate\n modeller = app.Modeller(vac_topology, vac_positions)\n modeller.addSolvent(sol_system_generator.forcefield, model=water_model, padding=padding, ionicStrength=ionicStrength)\n sol_positions, sol_topology = modeller.getPositions(), modeller.getTopology()\n sol_positions = unit.quantity.Quantity(value = np.array([list(atom_pos) for atom_pos in sol_positions.value_in_unit_system(unit.md_unit_system)]), unit = unit.nanometers)\n sol_system = sol_system_generator.create_system(sol_topology)\n\n vac_sys_pos_top = (vac_system, vac_positions, vac_topology)\n sol_sys_pos_top = (sol_system, sol_positions, sol_topology)\n\n #a quick assertion to make sure the nonbonded forces are being treated properly\n vac_nbf, sol_nbf = pull_force_by_name(vac_system, 'NonbondedForce'), pull_force_by_name(sol_system, 'NonbondedForce')\n assert not vac_nbf.usesPeriodicBoundaryConditions()\n assert sol_nbf.usesPeriodicBoundaryConditions()\n\n return vac_sys_pos_top, sol_sys_pos_top",
"def convert_topology(src_filename, set_backbone=True, in_place=False, split_dir=None):\n\n # Grab unit cell description (should be on first few lines:\n cryst = None\n with open(src_filename) as src:\n for line in src.readlines():\n if line.startswith('CRYST1'):\n cryst = line\n break\n\n # Read in source PDB (DEShaw original format)\n src_pdb = PdbStructure(open(src_filename))\n atoms = list(src_pdb.iter_atoms())\n topo = md.load(src_filename).top\n\n # Break into 4 segments\n segment_list = ['C1', 'C2', 'C3', 'C4']\n segment = {l:[] for l in segment_list}\n for i in atoms: \n segment[i.segment_id].append(i)\n\n # Set temperature factor (for gradual heating) \n if set_backbone:\n backbone = topo.select(\"backbone\")\n for i in range(0, len(segment['C1'])):\n if i in backbone:\n segment['C1'][i].location.temperature_factor = 1.0\n\n # Resort water segements and alias \"pseu\" to OM (tip4p forcefield)\n for wat in ['C2', 'C3']:\n segment[wat] = sorted(segment[wat], key = lambda i: i.residue_number)\n start_serial_num = min(segment[wat], key= lambda i: i.serial_number)\n for i in range(0, len(segment[wat])):\n newsn = i + start_serial_num.serial_number\n segment[wat][i].serial_number = newsn\n if segment[wat][i].get_name == 'pseu':\n segment[wat][i].set_name_with_spaces(' OM ')\n\n # FOR RE-RUNNING THE PSFGEN\n if split_dir is not None:\n for s in segment_list:\n with open(split_dir + '/%s.pdb' % s, 'w') as dest:\n for atom in segment[s]:\n _=dest.write(str(atom) + '\\n')\n\n # Writeout new file\n if in_place:\n dest = open(src_filename, 'w')\n if cryst is not None:\n dest.write(cryst)\n for s in segment_list:\n for atom in segment[s]:\n _=dest.write(str(atom) + '\\n')\n _=dest.write('END')\n dest.close()",
"def topology(self, topo_file: str, *args: str):\n self.scion_sh('topology', '-c', topo_file, '-d', *args)",
"def create_model(self):\n try:\n self.model = PPO2.load(self.save_path)\n self.model.set_env(self.env)\n print(\"Loading of the latest model successful!\")\n except:\n print(\"Creating new model...\")\n self.model = PPO2(CnnPolicy, self.env, verbose=1)",
"def topology(self, topo_file: str, *args: str):\n pass",
"def __init__(self, travel_model_dir_name, mode='full', years_to_run=None, procedure_file=\"opus.par\"):\n\n\ttravel_model_configuration = {}\n\t\n\ttravel_model_configuration.update( {'visum_version_number': 10} )\n\t\n\t### mapping from visum matrice name to urbansim travel_data variable name\n\t## dict key is used as matrix number for VisumPy.helpers.GetODMatrix and VisumPy.helpers.GetSkimMatrix\n\t## dict value is used as attribute name for urbansim travel_data table\n\ttm_to_urbansim_variables = {\n\t'od':{\n\t ## need data for zone index, e.g.\n # -1:'from_zone_id',\n\t # -2:'to_zone_id',\n\t1:'transit_trips', #'transit (PuT - public transport) trips',\n\t2:'auto_trips', #'auto trips',\n\t}, \n\t'skim':{ \n\t ## need data for zone index, e.g.\n # -1:'from_zone_id',\n\t # -2:'to_zone_id',\n\t1: 'auto_travel_time', #'auto assigned travel time (ttc)',\n\t2: 'transit_in_vehicle_time' #'PuT in-vehicle time (ivt)',\n\t} \n\t}\n \n\t### TAZ attributes to be transferred from urbansim to visum\n\turbansim_to_tm_variables = [\n\t 'TAZ=(zone.zone_id).astype(int16)',\n\t 'retail_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_retail)', \n\t ## the employment groups below need to be defined in employment_adhoc_sector_groups and \n\t ## employment_adhoc_sector_group_definitions before they can be used\n\t #'fires_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_fires)',\n\t #'gov_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_gov)',\n\t #\"educ_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_educ)\",\n\t #\"wtcu_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_wtcu)\",\n\t #\"manu_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_manu)\",\n\t #\"univ_by_taz=zone.aggregate(urbansim.gridcell.number_of_jobs_of_group_univ)\",\n\t ## need to change income categories to 4 instead of 3\n\t \"low_income_hh_by_taz=zone.aggregate(urbansim.gridcell.number_of_low_income_households)\",\n\t \"mid_income_hh_by_taz=zone.aggregate(urbansim.gridcell.number_of_mid_income_households)\",\n\t #\"upper_mid_income_hh_by_taz=?\",\n\t \"upper_income_hh_by_taz=zone.aggregate(urbansim.gridcell.number_of_high_income_households)\",\n\t ## need variable specification\n\t #\"pctmf=?\",\n\t #\"gqi=?\",\n\t #\"gqn=?\",\n\t #\"fteuniv=?\",\n\t #\"density=?\"\n ]\n \n\ttravel_model_configuration.update( {\n\t \"tm_to_urbansim_variables\":tm_to_urbansim_variables,\n\t \"urbansim_to_tm_variables\":urbansim_to_tm_variables,\n\t} )\n\t\n\tself.__add_models(travel_model_configuration, mode)\n\tself.__add_years(travel_model_configuration, travel_model_dir_name, years_to_run, procedure_file)\n\n\tself.merge(travel_model_configuration)",
"def create_process(args):\n process = args.process\n\n # create list of valid machines\n valid_machines = []\n valid_types = [cmpy.machines.MealyHMM, \n cmpy.machines.RecurrentEpsilonMachine]\n\n for em in dir(cmpy.machines):\n if em[0].isupper():\n try:\n m_str = 'cmpy.machines.' + em +'()' \n eval(m_str)\n mtype = type(eval(m_str))\n if mtype in valid_types:\n valid_machines.append(em)\n except:\n pass\n\n # remove MealyHMM, RecurrentEpsilonMachine\n valid_machines.remove('MealyHMM')\n valid_machines.remove('RecurrentEpsilonMachine')\n\n # if in valid_machine, try to create instance\n if process in valid_machines:\n eM = eval('cmpy.machines.' + process + '()')\n else: \n error_msg = (\"\\n\\nProcess {} not valid. Try:\\n\\n{}\\n\".format(process,\n valid_machines))\n raise ProcessException(error_msg)\n\n return eM",
"def create_system_from_gro(gro_file):\n\n f = open(gro_file)\n f.readline() # skip comment line\n n_particles = int(f.readline())\n \n # store coordinates and velocities\n x, y, z = [], [], []\n for i in range(n_particles):\n s = f.readline()[20:69]\n # coordinates\n x.append(float(s[0:8]))\n y.append(float(s[8:16]))\n z.append(float(s[16:24]))\n \n # store box size\n Lx, Ly, Lz = map(float, f.readline().split()) # read last line, convert to float\n f.close()\n\n system = init.create_empty(N=n_particles, box=data.boxdim(Lx=Lx,Ly=Ly,Lz=Lz), particle_types=['CG'])\n for i, p in enumerate(system.particles):\n p.position = ( x[i], y[i], z[i] ) \n p.type = 'CG'\n\n return(system)",
"def test_load_system(self, name, file_name):\n json_file = os.path.join(self.geometry_dir, file_name)\n system = MultiBodySystem.from_json(json_file)",
"def createLsystemFromFile( filename ):\n\tfp = open(filename, \"r\")\n\tlines = fp.readlines()\n\tfp.close()\n\tlsys = init()\n\tfor line in lines:\n\t\twords = line.split()\n\t\tif words[0] == 'base':\n\t\t\tsetBase(lsys, words[1])\n\t\telif words[0] == 'rule':\n\t\t\taddRule(lsys, words[1:])\n\treturn lsys",
"def setup_system_implicit(filename, barostat=False):\n pdb = PDBFile(filename)\n forcefield = app.ForceField('amber99sbildn.xml', 'amber99_obc.xml')\n system = forcefield.createSystem(pdb.topology, nonbondedMethod=app.CutoffNonPeriodic, constraints=HBonds,\n implicitSolvent=OBC2,implicitSolventKappa=1.0/nanometer)\n if barostat:\n system.addForce(MonteCarloBarostat(1*bar, 310*kelvin))\n set_dihedral_force_group(system)\n print('Created system')\n return system, pdb",
"def make_objects():\n cwd = os.getcwd()\n\n os.chdir(\"test_data/protein_load\")\n pmodel = pyODEM.model_loaders.Protein(\"ww_domain.ini\")\n os.chdir(cwd)\n\n pmodel.set_temperature(120.)\n\n return pmodel",
"def __init__(self,mf,rnmf,exemt3dms,modelname,workspace,dataspace,fltfile):\n #Constructor begins here\n self.__exefile=exemt3dms\n self.__workspace=workspace\n self.__dataspace=dataspace\n self.__modelname=modelname\n self.__ftlnm=fltfile\n \n # create mt3dms model object\n self.__mt = flopy.mt3d.Mt3dms(modflowmodel=mf, modelname=self.__modelname, \n exe_name=self.__exefile , ftlfilename=self.__ftlnm,\n model_ws=self.__workspace)\n \n # basic transport package\n perlen=rnmf.getPerlen()\n nper=rnmf.getNper()\n nstp=rnmf.getNstp()\n self.__btn = flopy.mt3d.Mt3dBtn(self.__mt, prsity=0.3, icbund = 1, sconc=0.0, ncomp=1, \n perlen = perlen, nper=nper, nstp = nstp, tsmult = 1.0, \n nprs = -1, nprobs = 10, cinact = -1, chkmas=True)\n \n # advaction package\n self.__adv = flopy.mt3d.Mt3dAdv(self.__mt, mixelm=-1, percel=0.75)\n # dispersion package\n self.__dsp = flopy.mt3d.Mt3dDsp(self.__mt, al=1000, trpt=0.1, trpv=0.1, dmcoef=1e-09)\n #Forcing objects\n itype = flopy.mt3d.Mt3dSsm.itype_dict()\n print(itype)\n print(flopy.mt3d.Mt3dSsm.get_default_dtype())\n wellbld=rnmf.getWelBld()\n self.__ssm=wellbld.createssm(self.__mt,itype)\n # matrix solver package\n self.__gcg = flopy.mt3d.Mt3dGcg(self.__mt, cclose=1e-6)",
"def construct_mpi_topology(self, dico):\n period = [True]*self.dim\n\n if dico is None:\n comm = mpi.COMM_WORLD\n else:\n comm = dico.get('comm', mpi.COMM_WORLD)\n self.mpi_topo = MPI_topology(self.dim, period, comm)",
"def test_simple_creation():\n # Get model file\n create.main(\"mlp\", \"10:12:8\", \"model_test.tar\")",
"def load(self, device: str):\n if self.config.n_model == \"p2g.zh\":\n from pororo.models.p2g import P2gM\n\n pinyin = download_or_load(\n f\"misc/pinyin2idx.{self.config.lang}.pkl\",\n self.config.lang,\n )\n char = download_or_load(\n f\"misc/char2idx.{self.config.lang}.pkl\",\n self.config.lang,\n )\n ckpt = download_or_load(\n f\"misc/{self.config.n_model}.pt\",\n self.config.lang,\n )\n model = P2gM(pinyin, char, ckpt, device)\n return PororoP2GZh(model, self.config)\n\n if self.config.n_model == \"p2g.ja\":\n from fairseq.models.transformer import TransformerModel\n\n load_dict = download_or_load(\n \"transformer/transformer.base.ja.p2g\",\n self.config.lang,\n )\n\n model = (TransformerModel.from_pretrained(\n model_name_or_path=load_dict.path,\n checkpoint_file=\"transformer.base.ja.p2g.pt\",\n data_name_or_path=load_dict.dict_path,\n source_lang=load_dict.src_dict,\n target_lang=load_dict.tgt_dict,\n ).eval().to(device))\n\n return PororoP2GJa(model, self.config)",
"def load(self, model_name_or_path):\n return BertMLM(model_name_or_path, self.top_k)"
] | [
"0.5573243",
"0.55360574",
"0.53219885",
"0.5308998",
"0.5227083",
"0.51333475",
"0.5017997",
"0.49888855",
"0.49778077",
"0.49404794",
"0.4938319",
"0.49289978",
"0.48723698",
"0.4865901",
"0.48460186",
"0.48352525",
"0.4804288",
"0.47721088",
"0.474612",
"0.47237206",
"0.47176543",
"0.46924144",
"0.46798798",
"0.46786827",
"0.46674877",
"0.46388662",
"0.46323815",
"0.46252176",
"0.46192175",
"0.4601811"
] | 0.7227145 | 0 |
Read atomic coordinates (and optionally, box vectors) from Amber formatted coordinate file. ARGUMENTS filename (string) name of Amber coordinates file to be read in OPTIONAL ARGUMENTS asNumpy (boolean) if True, results will be returned as Numpy arrays instead of lists of Vec3s RETURNS coordinates, velocities, boxVectors The velocities and boxVectors will be None if they are not found in the restart file EXAMPLES Read coordinates in vacuum. >>> directory = os.path.join(os.getenv('YANK_INSTALL_DIR'), 'test', 'systems', 'alaninedipeptidegbsa') >>> crd_filename = os.path.join(directory, 'alaninedipeptide.inpcrd') >>> coordinates, velocities, box_vectors = readAmberCoordinates(crd_filename) Read coordinates in solvent. >>> directory = os.path.join(os.getenv('YANK_INSTALL_DIR'), 'test', 'systems', 'alaninedipeptideexplicit') >>> crd_filename = os.path.join(directory, 'alaninedipeptide.inpcrd') >>> coordinates, velocities, box_vectors = readAmberCoordinates(crd_filename) | def readAmberCoordinates(filename, asNumpy=False):
try:
crdfile = AmberNetcdfRestart(filename)
except ImportError:
# See if it's an ASCII file. If so, no need to complain
try:
crdfile = AmberAsciiRestart(filename)
except TypeError:
raise TypeError('Problem parsing %s as an ASCII Amber restart file '
'and scipy could not be imported to try reading as '
'a NetCDF restart file.' % filename)
except (IndexError, ValueError):
raise TypeError('Could not parse Amber ASCII restart file %s' %
filename)
except ImportError:
raise ImportError('Could not find numpy; cannot use asNumpy=True')
except TypeError:
# We had scipy, but this is not a NetCDF v3 file. Try as ASCII now
try:
crdfile = AmberAsciiRestart(filename)
except TypeError:
raise
raise TypeError('Problem parsing %s as an ASCII Amber restart file'
% filename)
except (IndexError, ValueError):
raise TypeError('Could not parse Amber ASCII restart file %s' %
filename)
# Import error cannot happen, since we had scipy which has numpy as a
# prereq. Do not catch that exception (only catch what you intend to
# catch...)
# We got here... one of the file types worked. Return the coordinates,
# velocities, and boxVectors
return crdfile.coordinates, crdfile.velocities, crdfile.boxVectors | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_coordinates_xyz(filename):\n\n f = open(filename, 'r')\n V = list()\n atoms = list()\n n_atoms = 0\n\n # Read the first line to obtain the number of atoms to read\n try:\n n_atoms = int(f.readline())\n except ValueError:\n print(\"Could not obtain the number of atoms in the .xyz file. \"+filename)\n return None\n\n # Skip the title line\n f.readline()\n\n # Use the number of atoms to not read beyond the end of a file\n for lines_read, line in enumerate(f):\n\n if lines_read == n_atoms:\n break\n\n atom = re.findall(r'[a-zA-Z]+', line)[0]\n # atom = atom.upper()\n\n numbers = re.findall(r'[-]?\\d+\\.\\d*(?:[Ee][-\\+]\\d+)?', line)\n numbers = [float(number) for number in numbers]\n\n # The numbers are not valid unless we obtain exacly three\n if len(numbers) == 3:\n V.append(np.array(numbers))\n atoms.append(atom)\n else:\n exit(\"Reading the .xyz file failed in line {0}. Please check the format.\".format(lines_read + 2))\n\n f.close()\n atoms = np.array(atoms)\n V = np.array(V)\n return atoms, V",
"def read_xyz(filename):\n #print('Reading geom from:'),filename\n atoms = []\n coordinates = []\n\t\n xyz = open(filename)\n n_atoms = int(xyz.readline())\n title = xyz.readline()\n for line in xyz:\n\tif len(line.strip()) == 0:\n\t\tpass\n\t\tbreak\t\n\tatom,x,y,z = line.split()\n\tatoms.append(atom)\n\tcoordinates.append([float(x), float(y), float(z)])\n xyz.close()\n coordinates = [[w * angtobh for w in ww] for ww in coordinates] #ang to bh\n\n if n_atoms != len(coordinates):\n \tprint('Number of atoms in xyz file doesnt equal to the number of lines.')\n\tsys.exit(1)\n \n return atoms, coordinates",
"def read_xyz(self, filename):\n # first line contains number of atoms\n self.numatom = int(filename.readline().split()[0])\n # second line contains a comment\n self.comment = filename.readline()[:-3]\n # rest of the lines contain coordinates structured Element X Y Z\n string = \"Element X Y Z \\n\" + filename.read()\n self.contents = pd.read_table(StringIO(string), sep=r'\\s+')",
"def read_xyz(filename):\n\n config = {}\n\n with open(filename, 'r') as f:\n # number of atoms (spins)\n config['nat'] = int(re.findall('\\S+', f.readline())[0])\n\n # box parameters (type, dimension, shape, periodicity)\n sarr = re.findall('\\S+', f.readline())\n config['latt_type'] = sarr[0]\n dims = list(map(int, sarr[1:4]))\n config['latt_box'] = np.array(dims)\n config['box'] = np.diag(dims)\n config['pbc'] = list(map(int, sarr[4:7]))\n if len(sarr) > 7:\n dim_intra = len(sarr) - 7\n\n atom_types = []\n xyz = []\n config['latt_i'] = np.zeros(dims, dtype=int)\n config['latt_atoms'] = np.zeros(dims, dtype=int)\n config['latt_intra'] = np.zeros(tuple(dims) + (dim_intra,), dtype='float64')\n for i in range(config['nat']):\n sarr = re.findall('\\S+', f.readline())\n t = int(sarr[0])\n r = tuple(map(int, sarr[1:4]))\n\n atom_types.append(t)\n xyz.append(r)\n\n config['latt_i'][r] = i\n config['latt_atoms'][r] = t\n\n for j in range(dim_intra):\n ci = float(sarr[4 + j])\n config['latt_intra'][r[0], r[1], r[2], j] = ci\n\n config['atom_types'] = np.array(atom_types)\n config['xyz'] = np.array(xyz)\n \n return config",
"def atmparamread(filename):\n f = open(filename, 'r')\n f.readline()\n line = f.readline()\n #Td = float(line.split()[0])\n #Pd = float(line.split()[1])\n #Mc = float(line.split()[2])\n #rc = float(line.split()[3])\n n = int(line.split()[0])\n f.readline()\n atm = 0*numpy.ndarray(shape=(n, ncol), dtype=float)\n S = 0*numpy.ndarray(shape=(n), dtype=float)\n for i in range(n):\n line = f.readline()\n S[i] = float(line.split()[0])\n for j in range(ncol ):\n atm[i, j] = float(line.split()[j+1])\n f.close()\n return atm, S",
"def readcif(filename, **kwds):\n \n # Read the unit cell parameters\n a, b, c, alf, bet, gam = [[]]*6\n with open(filename, 'r') as f:\n \n for line in f:\n if \"length_a\" in line:\n a = numgrab(line)\n elif \"length_b\" in line:\n b = numgrab(line)\n elif \"length_c\" in line:\n c = numgrab(line)\n elif \"angle_alpha\" in line:\n alf = numgrab(line)\n elif \"angle_beta\" in line:\n bet = numgrab(line)\n elif \"angle_gamma\" in line:\n gam = numgrab(line)\n \n crystVec = a + b + c + alf + bet + gam\n \n # Read atomic coordinates\n cifdata = pd.read_csv(filename, delim_whitespace=True, header=None, **kwds)\n atomLabels = np.array(cifdata.values[:,0], dtype='str')\n coords = np.array(cifdata.values[:,1:4]).astype('float64')\n\n return atomLabels, coords, crystVec",
"def read(self, FN, natoms=None, return_title=False, \\\n multiplier=None, trajectory=False):\n if not os.path.isfile(FN):\n raise Exception('Coordinate file %s does not exist!' % FN)\n if FN.endswith('.gz'):\n import gzip\n F = gzip.open(FN, 'r')\n else:\n F = open(FN, 'r')\n dat = F.read().strip().split('\\n')\n F.close()\n\n title = dat.pop(0) # Title\n\n if len(dat[0].split()) > 1:\n # VMD format (does not specify number of atoms)\n crd = []\n for line in dat:\n crd = crd + [float(x) for x in line.split()]\n crd = np.resize(crd, (len(crd) / 3, 3))\n else:\n # AMBER format\n file_natoms = int(dat.pop(0)) # Number of atoms\n if (natoms is not None) and (file_natoms != natoms):\n print \"Incorrect number of atoms in crd file\"\n return np.array([])\n\n if trajectory:\n w = 8 # For mdcrd\n else:\n w = 12 # For inpcrd\n crd = []\n for line in dat:\n crd = crd + [float(line[x:x + w]) for x in range(0, len(line), w)]\n crd = np.resize(crd, (len(crd) / 3, 3))\n\n if multiplier is not None:\n crd = multiplier * crd\n if (natoms is not None):\n crd = np.vsplit(crd, crd.shape[0] / natoms)\n print \" read %d configurations from %s\" % (len(crd), FN)\n\n if return_title:\n return (crd, title)\n else:\n return crd",
"def load_xyz(filename):\n periodic = load_periodic()\n #read molecule\n with open(filename) as f:\n size = int(next(f))\n title = next(f).strip()\n molecule = Molecule(title,size)\n for _ in range(size):\n row = next(f).split()\n tag = row[0]\n element = periodic[tag]\n coordinate = []\n for j in range(3):\n coordinate.append(float(row[j+1]))\n atom = Atom(element,coordinate)\n\n molecule.append(atom)\n f.close()\n \n return molecule",
"def read_xyz_file(filename, num_spatial_dimensions):\n print(\"Reading data from XYZ file.\")\n\n particle_positions = []\n frame_number = 0\n line_number = 0\n frame_particles = 0\n with open(filename, 'r') as input_file:\n for line in input_file:\n if line_number == 0:\n # Check for blank line at end of file\n if line != \"\":\n frame_particles = int(line)\n particle_positions.append(np.zeros((frame_particles, num_spatial_dimensions)))\n elif line_number == 1:\n pass\n else:\n for dimension in range(num_spatial_dimensions):\n particle_positions[frame_number][line_number-2][dimension] = line.split()[1:][dimension]\n line_number += 1\n # If we have reached the last particle in the frame, reset counter for next frame\n if line_number == (frame_particles + 2):\n line_number = 0\n frame_number += 1\n\n print(\"XYZ read complete.\")\n\n return particle_positions",
"def read_coordinates(path_to_file, zipped=False):\n coords = []\n if options.verbose:\n syserr(\"Reading sequences from coordinate file %s\\n\" % (path_to_file))\n try:\n if zipped:\n corfile = gzip.open(path_to_file, 'rb')\n else:\n corfile = open(path_to_file, 'r')\n except IOError:\n raise IOError('Cannot read from coordinate file %s' % (path_to_file))\n for coord in corfile:\n coord = coord.rstrip().split()\n try:\n coords.append([coord[0], coord[1], int(coord[2]), int(coord[3]), coord[4]])\n except IndexError:\n syserr(\"IndexError in coordinate file, line: %s\\n\" % (' '.join(coord)))\n except ValueError, e:\n syserr(str(e) + \"\\n\")\n\n return coords",
"def read_coordinate_file(file):\n with open(file, 'r') as file1:\n coords = []\n\n for line in file1:\n line = line.strip('{} \\n')\n (a, b) = line.split(\",\")\n ''' \n x and y are expressed as latitude and longitude. These are converted with the Mercator projection (from Computer assignment 1)\n into x and y coordinates.\n '''\n coord = [(float(b)*m.pi/180), (m.log((m.tan(m.pi/4+m.pi*float(a)/360))))]\n coords.append(coord)\n return np.array(coords)",
"def readCubeFile(self, filename):\n\n inputfile = open(filename, \"r\")\n header = \"\".join([inputfile.readline(), inputfile.readline()])\n\n temp = inputfile.readline().strip().split()\n self.numAtoms = int(temp[0])\n self.origin = list(map(float, temp[1:]))\n\n self.numPoints = [0] * 3\n self.spacing = [0] * 3\n for i in range(3):\n line = inputfile.readline().strip().split()\n self.numPoints[i] = int(line[0])\n temp = list(map(float, line[1:]))\n self.spacing[i] = temp[i]\n assert sum(temp[:i] + temp[i + 1:]) == 0\n\n # Read in the lines with atom data\n for i in range(self.numAtoms):\n line = inputfile.readline()\n\n self.data = np.zeros((self.numPoints[1], self.numPoints[0], self.numPoints[2]), \"float\")\n i = j = k = 0\n while i < self.numPoints[1]:\n line = next(inputfile)\n temp = list(map(float, line.strip().split()))\n for x in range(0, len(temp)):\n self.data[j, i, x + k] = temp[x]\n\n k += len(temp)\n if k == self.numPoints[2]:\n j += 1\n k = 0\n if j == self.numPoints[1]:\n i += 1\n j = 0\n\n inputfile.close()",
"def calcCoordinatesOfFile(self, filename):\n # Write the cpptraj infile\n UUID = uuid.uuid1()\n if self.debug:\n cpptraj_infile_path = \"{filename}.cpptraj_in\".format(jn=self.jobname, filename = filename)\n cpptraj_outfile_path = \"{filename}.cpptraj_out\".format(jn=self.jobname, filename = filename)\n cpptraj_logfile_path = \"{jn}-log/cpptraj.log\".format(jn=self.jobname)\n\n else:\n basename = filename.split('/')[-1]\n cpptraj_infile_path = self.tmp_dir + \"/{0}_{1}.cpptraj_in\".format(basename, UUID)\n cpptraj_outfile_path = self.tmp_dir + \"/{0}_{1}.cpptraj_out\".format(basename, UUID)\n cpptraj_logfile_path = \"/dev/null\"\n\n \n cpptraj_infile = open(cpptraj_infile_path, 'w')\n cpptraj_infile.write('trajin {filename}\\n'.format(filename = filename)) \n for coordinate_mask in self.COORDINATE_MASKS:\n cpptraj_infile.write('{mask} out {out}\\n'.format(mask = coordinate_mask,\n out = cpptraj_outfile_path))\n \n # Write changes to file\n cpptraj_infile.close()\n \n # Run cpptraj\n cpptraj_execute_string = ' -p {top} -i {inpath} > {log}'.format(\n top = self.amber_topology_file, \n inpath = cpptraj_infile_path,\n log = cpptraj_logfile_path)\n os.system('{cpptraj} {execute}'.format(cpptraj=self.cpptraj_binary, execute=cpptraj_execute_string))\n \n # Load cpptraj output as numpy array\n try:\n coordinates = numpy.loadtxt(cpptraj_outfile_path) \n # Delete the first entry which refers to the frame index\n coordinates = numpy.delete(coordinates, 0)\n except:\n #TODO What should happen then?\n sys.stderr.write('amber_module error: cpptraj output {0} can not '\\\n 'be found or loaded.\\n'.format(cpptraj_outfile_path))\n \n if not self.debug:\n os.remove(cpptraj_outfile_path)\n os.remove(cpptraj_infile_path)\n \n return coordinates",
"def read_body(filepath, **kwargs):\n with open(filepath, 'r') as infile:\n coords = numpy.loadtxt(infile, unpack=True, **kwargs)\n return coords",
"def read_vasp(filename='XYZ'):\n # TODO: read velocities, now not supported. or not needed?\n with open(filename, \"r\") as f:\n # _read_string return Molecule object.\n return _read_string(f.read())",
"def process_extended_xyz_file_to_array(extended_xyz_file_path, verbose=True):\n\n with open(extended_xyz_file_path, \"r\") as input_file:\n\n # Read all the lines at once\n lines = input_file.readlines()\n\n # Get the number of atoms per block, which is always the first line of\n # either an xyz or extended xyz file\n n_atoms = int(lines[0].strip())\n\n # We can print some diagnostics to help us debug\n if verbose:\n print(\n f\"Read {len(lines)} lines from {extended_xyz_file_path}, each \"\n f\"block has {n_atoms} atoms\"\n )\n\n # Each \"single\" xyz file has the following lines:\n # A single line indicating how many atoms there are in the block\n # A comment line\n # n_atoms lines for the species type and coordinates\n # With this information, we can \"chunk\" the list into some number of equal\n # parts each containing 12+2 lines.\n # Check out a way to do this here:\n # https://www.delftstack.com/howto/python/\n # python-split-list-into-chunks/\n # #split-list-in-python-to-chunks-using-the-lambda-function\n EXTRA_LINES = 2 # <- no magic numbers\n offset = n_atoms + EXTRA_LINES\n\n # List comprehension is much faster than for loops. Try to avoid the latter\n # when at all possible\n chunked = [lines[ii:ii + offset] for ii in range(0, len(lines), offset)]\n\n if verbose:\n print(f\"Got {len(chunked)} snapshots\")\n\n # Each entry of chunked contains the:\n # - number of atoms (same for everything)\n # - the energy (I think)\n # - the atom types/coordinates\n # Get the energies\n comment_lines = np.array([\n float(lines[ii + 1]) for ii in range(0, len(lines), offset)\n ])\n\n # Get the atom list - only have to do this once!\n atom_list = [line.split()[0] for line in chunked[0][EXTRA_LINES:]]\n\n # Finally, get the coordinates\n chunked = np.array([\n [line.split()[1:4] for line in chunk[EXTRA_LINES:]]\n for chunk in chunked\n ], dtype=float)\n\n return dict(energy=comment_lines, elements=atom_list, coordinates=chunked)",
"def ComputeStructuredCoordinates(self, vtkAMRBox, , , , p_int=..., p_int=..., p_int=..., *args, **kwargs):\n ...",
"def read_from_np(self, filename):\n\n # Check if the file exist\n if os.path.exists(filename):\n\n # Count number of contour and lines in the files\n # %timeit shows that \"linecache\" is way faster than \"readline\" on the first line\n logger.info(\"Reading contours from file {0}\".format(filename))\n ncontours = int(linecache.getline(filename, 1))\n\n with open(filename) as f:\n nlines = sum(1 for _ in f)\n\n logger.debug(\"Number of contours: {0}\".format(ncontours))\n\n # Initialise lon and lat as list of lists\n lon = [[]] * ncontours\n lat = [[]] * ncontours\n\n # Initialise line to read number\n linenum = 2\n\n # Loop on the contours\n for n in range(0, ncontours):\n # Number of points in the current contour\n npoints = int(linecache.getline(filename, linenum))\n nskiplines = linenum + npoints\n\n # Load coordinates (npoints lines to be read)\n coords = np.genfromtxt(filename, skip_header=linenum, skip_footer=nlines - nskiplines)\n coords = coords.T\n lon[n] = coords[0]\n lat[n] = coords[1]\n\n # Update line number\n # (taking into account the number of points already read)\n linenum = nskiplines + 1\n\n self.x = np.array(lon)\n self.y = np.array(lat)\n\n return self\n\n else:\n logger.error(\"File {0} does not exist\".format(filename))\n raise FileNotFoundError('File does not exist')",
"def readPositions(positionFilePath):\n\n if not os.path.exists(positionFilePath):\n print 'File ' + positionFilePath + ' is missing!'\n return []\n\n pointList = []\n\n #TODO: Read this from the file?\n MEAN_MOON_RADIUS = 1737400\n\n isLolaFile = False\n isPcAlignErrorFile = False\n f = open(positionFilePath, 'r')\n i = 0\n for line in f:\n # On first line check if this is a LOLA RDR file\n if (i == 0):\n if (line.find('Coordinated_Universal_Time') == 0):\n isLolaFile = True\n print 'Detected LOLA RDR file'\n continue # Skip this header line\n if (line.find('radius (meters)') > 0):\n isPcAlignErrorFile = True\n print 'Detected pc_align error file'\n continue # Skip this header line\n\n if isLolaFile: # Pick out the correct fields\n\n strings = line.split(',')\n pointList.append(float(strings[1])) # lon\n pointList.append(float(strings[2])) # lat\n pointList.append(float(strings[3])*1000 - MEAN_MOON_RADIUS) # alt\n \n elif isPcAlignErrorFile: # pc_align error file\n\n strings = line.split(',')\n pointList.append(float(strings[0])) # lon\n pointList.append(float(strings[1])) # lat\n pointList.append(float(strings[2]) - MEAN_MOON_RADIUS) # alt\n\n else: # Default handling\n if line.find('#') < 0: # Skip lines containing the comment symbol\n strings = line.split(',')\n #print strings\n pointList.append(float(strings[1])) # lon\n pointList.append(float(strings[0])) # lat\n pointList.append(float(strings[2])) # alt\n i = i + 1\n f.close()\n\n #print pointList\n return pointList",
"def load_raster_xyz(self, filename):\n with rasterio.open(filename, 'r') as src:\n ## Alias 'affine' no longer works for 'transform'\n ##matrix = src.affine\n matrix = src.transform\n self.size = (src.width, src.height)\n # read per scan line\n for row in range(0, src.height):\n window = ((row, row+1), (0, src.width))\n data = src.read(window=window)\n this_row = data[0][0]\n for column in range(0, src.width):\n x, y = matrix * (column, row)\n yield x, y, this_row[column]",
"def read_xyz_traj(filename):\n with open(filename, 'r') as traj_file:\n traj = traj_file.readlines()\n n_atoms = int(traj[0].strip()) # Get number of atoms from first line\n n_frames = int(len(traj) / (n_atoms + 2)) # Calculate number of frames (assuming n_atoms is constant)\n trajectory = {'atoms': np.empty((n_frames, n_atoms), dtype='U2'), # String of length 2\n 'coordinates': np.empty((n_frames, n_atoms, 3)), # Float\n 'headers': np.empty((n_frames,), dtype=object)} # Python object\n for frame in range(n_frames):\n start = frame * (n_atoms + 2) # Frame start\n end = (frame + 1) * (n_atoms + 2) # Frame end\n trajectory['coordinates'][frame] = [[float(i) for i in line.split()[1:4]] for line in traj[start + 2:end]]\n trajectory['atoms'][frame] = [line.split()[0] for line in traj[start + 2:end]]\n trajectory['headers'][frame] = (traj[start + 1].strip())\n return trajectory",
"def _read_xyz(ds,datafile,long_format=False):\n\n from cheml.io.xyz import get_molecules\n\n ds.list_of_mol = get_molecules(datafile,ds.nmol,long_format)\n ds.nmol = len(ds.list_of_mol)",
"def read_positions():\n return np.genfromtxt(\"POSITIONS.OUT\").transpose()",
"def read_from_ascii(self, filename):\n self.ascii_filename = filename\n # read file content into a string\n f=open(filename,'r')\n file_str=f.read()\n f.close()\n # make dictionary with file content\n reg_exp_data_groups=re.compile(r'^#>>(\\w+):.*\\n',re.M)\n file_dict=self.make_data_dict_from_str(reg_exp_data_groups,file_str)\n # read arrays ------------------------------\n self.x=np.loadtxt(StringIO.StringIO(file_dict['x']))\n self.p=np.loadtxt(StringIO.StringIO(file_dict['p']))\n self.fmci_XP=np.loadtxt(StringIO.StringIO(file_dict['XP']))\n # regular expression for extracting parameter=value\n reg_exp_param_val=re.compile(r'\\n*(\\w+)=',re.M)\n # read params_physics -----------------------\n params_physics_dict=self.make_data_dict_from_str(reg_exp_param_val,file_dict['params_physics'])\n self.name=self.__get_particle_name(params_physics_dict['particle'])\n self.time=float(params_physics_dict['time'])\n # read params_TDC ---------------------------\n params_TDC_dict=self.make_data_dict_from_str(reg_exp_param_val,file_dict['params_TDC'])\n self.calc_id=params_TDC_dict['calc_id']\n self.i_ts=int(params_TDC_dict['i_ts'])",
"def readInput(in_file_name):\n in_file = open(in_file_name, 'r')\n positions = []\n samples = []\n M = []; P = [];\n MC = []; PC = [];\n while True:\n line = in_file.readline()\n if not line: break\n if line[0] == '#': continue #skip comment\n line = line.rstrip('\\n').split('\\t')\n \n #genomic positions and allele support in plasma samples\n positions.append(int(line[0]))\n samples.append(tuple(map(int, line[1:5])))\n \n #maternal and paternal alleles\n M.append(tuple(line[5:7]))\n MC.append(tuple(map(float, line[7:9])))\n \n P.append(tuple(line[9:11]))\n PC.append(tuple(map(float, line[11:13]))) \n \n in_file.close()\n return positions, samples, M, P, MC, PC",
"def read_from(self, filename):\n\n lon, lat, field, weight = [], [], [], []\n\n if os.path.exists(filename):\n logger.info(\"Reading data from file {0}\".format(filename))\n with open(filename, 'r') as f:\n line = f.readline()\n ncols = len(line.split())\n while ncols >= 3:\n lon.append(float(line.split()[0]))\n lat.append(float(line.split()[1]))\n field.append(float(line.split()[2]))\n if ncols >= 4:\n weight.append(float(line.split()[3]))\n else:\n weight.append(1.)\n line = f.readline()\n ncols = len(line.split())\n\n self.x = np.array(lon)\n self.y = np.array(lat)\n self.field = np.array(field)\n self.weight = np.array(weight)\n return self\n else:\n logger.error(\"File {0} does not exist\".format(filename))\n raise FileNotFoundError('File does not exist')",
"def readAngles(fileName: str) -> List[float]:\n outList = list()\n with open(fileName, 'r') as fileIn:\n for line in fileIn:\n val = float(line)\n outList.append(val)\n return outList",
"def read_lanczos(filename=\"mfdn_alphabeta.dat\"):\n\n # extract raw vectors\n alphabeta_array = np.loadtxt(filename, usecols=(1, 2))\n (alpha,beta) = (alphabeta_array[:, 0], alphabeta_array[:-1, 1])\n\n return (alpha,beta)",
"def load(self, _name):\r\n with open(_name, 'r') as fin:\r\n self.filename = _name\r\n\r\n self.comment_1 = fin.readline() # Save 1st comment\r\n self.comment_2 = fin.readline() # Save 2nd comment\r\n\r\n _str = fin.readline().split() # Number of Atoms and Origin\r\n self.n_atoms = int(_str[0]) # Number of Atoms\r\n self.origin = np.array([float(_str[1]), float(_str[2]), float(_str[3])]) # Position of Origin\r\n\r\n nVoxel = fin.readline().split() # Number of Voxels\r\n self.n_x = int(nVoxel[0])\r\n self.x = np.array([float(nVoxel[1]), float(nVoxel[2]), float(nVoxel[3])])\r\n\r\n nVoxel = fin.readline().split() #\r\n self.n_y = int(nVoxel[0])\r\n self.y = np.array([float(nVoxel[1]), float(nVoxel[2]), float(nVoxel[3])])\r\n\r\n nVoxel = fin.readline().split() #\r\n self.n_z = int(nVoxel[0])\r\n self.z = np.array([float(nVoxel[1]), float(nVoxel[2]), float(nVoxel[3])])\r\n\r\n self.atoms = []\r\n self.atoms_xyz = []\r\n for atom in range(self.n_atoms):\r\n line = fin.readline().split()\r\n self.atoms.append(line[0])\r\n self.atoms_xyz.append(list(map(float, [line[2], line[3], line[4]])))\r\n\r\n self.data = np.zeros((self.n_x, self.n_y, self.n_z))\r\n\r\n i = int(0)\r\n for s in fin:\r\n for v in s.split():\r\n self.data[int(i / (self.n_y * self.n_z)), int((i / self.n_z) % self.n_y),\r\n int(i % self.n_z)] = float(v)\r\n i += 1\r\n\r\n return None",
"def get_coords_from_position(position, file):\n line_counter = 1\n column_counter = 1\n try:\n with open(file, 'r') as source:\n string = source.read()\n except:\n #unable to open file -> 3\n error.ThrowError(3)\n i = 0\n j = position\n while j > 0:\n if string[i] == '\\n':\n line_counter += 1\n column_counter = 1\n else:\n column_counter += 1\n i += 1\n j -= 1\n return Coords(line_counter, column_counter, position)"
] | [
"0.65199834",
"0.6151185",
"0.5955771",
"0.5821698",
"0.5642972",
"0.55947125",
"0.5586875",
"0.5569145",
"0.55424154",
"0.55242324",
"0.5440504",
"0.5354222",
"0.5305724",
"0.5268994",
"0.52577853",
"0.5247618",
"0.5231697",
"0.5143245",
"0.51137674",
"0.5104983",
"0.5052237",
"0.5041058",
"0.49637604",
"0.49536508",
"0.4949702",
"0.49297267",
"0.4921237",
"0.49112853",
"0.49070978",
"0.48937446"
] | 0.82705474 | 0 |
Init new bag based on Dynamic Array DO NOT CHANGE THIS METHOD IN ANY WAY | def __init__(self, start_bag=None):
self.da = DynamicArray()
# populate bag with initial values (if provided)
# before using this feature, implement add() method
if start_bag is not None:
for value in start_bag:
self.add(value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n self.bag = {}",
"def __init__(self):\n self.da = DynamicArray()",
"def __init__(self):\n self.da = DynamicArray()",
"def __init__(self):\n self.da = DynamicArray()",
"def __init__(self):\r\n self.da = DynamicArray()",
"def bag(self, bag):\n self._bag = bag",
"def __init__(self):\n self._dict = {}\n self._array = []",
"def __init__(self):\n self.arr = []\n self.d = dict()",
"def __init__(self, *args):\n self.bag = []\n\n # Each item in 'args' is a tuple, we can assign each item in\n # the tuple directly to a variable name in our loop. Here we\n # assign the first item in each tuple to the variable named\n # 'item' and we assign the second item in each tuple to the\n # variable named 'count'.\n for item, count in args:\n # 'xrange' is a function that yields a finite set of\n # numbers. We are using it as a counter here so that we\n # add each 'item' to our bag only as many times as was\n # specified\n #\n # The '_' in this for-loop means that we're not assigning\n # the value of the items from xrange to any\n # variables. We're just literally using xrange to loop\n # \"count\" number of times.\n for _ in xrange(count):\n self.bag.append(item)\n\n # All of our items have been added to our bag. Now let's\n # shuffle it up calling the semi-private instance method\n # '_shuffle'. In Python any method with a single or\n # double-underscore prefix ('_' or '__') is considered\n # 'private' and should not be accessed directly outside of the\n # class.\n self._shuffle()\n\n # pos is our \"position\" in the list of items. We'll begin with\n # our position at the end of the list (remember, ``len()``\n # returns a COUNT of objects in a list. We subtract 1 from\n # this because we use self.pos to index each position in the\n # bag.\n #\n # Don't forget, list indices begin at 0 so the last item is\n # accessed using an index one less than the number of items in\n # the list.\n self.pos = len(self.bag) - 1",
"def add_bag(self, bag, quantity):\n self.bags.append((bag, quantity))",
"def __init__(self):\n self.size = 0\n self.array = []\n self.val2i = {}",
"def initialize(self):\r\n self.bucket_array.initialize()",
"def __init__(self):\n self.arr, self.map = [], {}",
"def __copy__(self):\n return Bag(self.items)",
"def __init__(self):\n self.bucket_length = 997\n self.bucket_array = [Bucket() for i in range(self.bucket_length)]",
"def __init__(self):\n self.arr = []\n for i in range(80):\n self.arr.append([])",
"def __init__(self):\n self.map = {}\n self.array = []",
"def __init__(self):\n self.data = ArrayStack(10)",
"def __init__(self):\n \n self.items = [] \n self.ind = defaultdict(set) # item -> index into the items array",
"def __init__(self):\n self.arr = []\n self.size = 0",
"def get_bag(bags, colour):\n if colour in bags:\n bag = bags[colour]\n else:\n bag = Bag(colour)\n bags[colour] = bag\n return bag",
"def __init__(self):\n self.map = {}\n self.arr = []",
"def construct_bag(self):\n testing = self._are_we_testing()\n\n alphabet = self.config['test']['bag']['word'].lower() if testing \\\n else self.config['bag']['letters'].keys()\n\n for letter in alphabet:\n bag_amount = self.config['test']['bag']['letters'][letter]['bag_amount'] if testing \\\n else self.config['bag']['letters'][letter]['bag_amount']\n point_val = self.config['test']['bag']['letters'][letter]['point_val'] if testing \\\n else self.config['bag']['letters'][letter]['point_val']\n count = 0\n\n while count < bag_amount:\n tile = Tile(letter=letter.upper(), point_val=point_val, id=count+1, bag_amount=bag_amount)\n self.contents.append(tile)\n count += 1\n\n # check tile amount\n tile_count = len(self.contents)\n if tile_count < 100 and not testing:\n raise ValueError(\"Letter bag only contains {} tiles. Must contain a total of 100.\".format(\n tile_count))\n\n return None",
"def __init__(self, arr=None):\n self.data = arr.copy() if arr else []",
"def __init__(self):\n self.hashmap = {}\n self.array = []",
"def bag(self):\n return self._bag",
"def __init__(self, arr, n):\n self.BITree = [0] * (n+1)\n self.size = n\n\n for i in range(n):\n self.update(i, arr[i])",
"def __init__(self):\n self.arr = []",
"def __init__(self):\n self.arr = []",
"def __init__(self):\n self.arr = []"
] | [
"0.7149582",
"0.64862925",
"0.64862925",
"0.64862925",
"0.6427819",
"0.6324125",
"0.6016864",
"0.5974724",
"0.58734715",
"0.58185697",
"0.5818552",
"0.5756172",
"0.5755289",
"0.57449216",
"0.5724783",
"0.57152706",
"0.57088006",
"0.57087326",
"0.5704826",
"0.57008696",
"0.5700852",
"0.5654867",
"0.5650215",
"0.5636915",
"0.5635275",
"0.56242645",
"0.5623044",
"0.5620335",
"0.5620335",
"0.5620335"
] | 0.7376073 | 0 |
Empties bag by dereferencing the Dynamic Array | def clear(self) -> None:
self.da = DynamicArray() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset_bag(self):",
"def clear(self) -> None:\n # Creates a new, empty bag and assigns self.da to the new, empty bag.\n new_bag = Bag()\n self.da = new_bag.da",
"def clear(self):\n if self.size == 0:\n return\n self.modCount += 1\n self.size = 0\n Arrays.fill(self.keys, None)\n Arrays.fill(self.values, None)",
"def clear(self) -> None:\n self.data = {} # defaultdict fails (T282865)\n self.size = 0",
"def empty_bag(self):\n if self.peds is not None:\n for _, model in self.peds.items():\n model.reset()\n self.drone.reset()\n self.subject.reset()",
"def clear(self):\n length = len(self.data)\n self.data = [[] for j in range(length)]",
"def clear(self):\n self.array = np.zeros(shape=(0, 2))",
"def clear(self):\n self._data = []",
"def clear(self):\n self._data = []",
"def reset(self):\n for k in self.data_keys:\n setattr(self, k, [])\n self.size = 0",
"def clear(self):\r\n\t\tself.free_objects[:] = []",
"def clearValue(self):\n self.data = []",
"def clear(self):\n\n self.size = 0\n\n self.table = [[]] * 100\n\n self.keys_set = set()\n\n self.keys_ref = [[]] * 100",
"def delete_reference_array(self):\r\n del self.pxarray\r\n return",
"def clear(self):\n self.data_.clear()#Beauty of array implementation, we call .clear() and wipe our Deque \n self.size_=0#Deque should now be empty",
"def clear_array():\n while len(lines) != 0:\n lines.pop()\n while len(assembly_lines) != 0:\n assembly_lines.pop()",
"def pop(self):",
"def pop(self):",
"def clear(self):\n self.vars = []",
"def clear(self) -> None:\n self._items = []\n self._size = 0",
"def free_intermediate_arrays(self):\n self._mgx = None\n self._mgy = None\n self._mgz = None\n self._vander = None\n self._bkg_cube = None\n self._bkg_cube_dirty = True",
"def clear(self):\n self.size = 0 # set size to 0 and reinitialize buckets as empty\n self._buckets = []",
"def reset(self):\n self._data = []",
"def clear(self):\n self._length = 0 # \"Erase\" values by ignoring them\n self._resize_arr(1) # Shrink array to original size",
"def remove_gifti_data_array(self, ith):\n self.darrays.pop(ith)",
"def clear(self):\n self._items = []",
"def clear(self):\n self.counts = [0] * len(self.values)\n if HAS_NUMPY:\n self.counts = numpy.array(self.counts)",
"def clear(self):\n self.counts = [{} for _ in range(len(self.counts))]",
"def clear(self):\n dict.clear(self)\n self._sequence = []",
"def untie_everything(self):\r\n self.tied_indices = []"
] | [
"0.69397724",
"0.66944885",
"0.6348022",
"0.62654585",
"0.62618214",
"0.62593454",
"0.62128514",
"0.6195912",
"0.6195912",
"0.61813796",
"0.61733335",
"0.61612505",
"0.61371595",
"0.6111838",
"0.6082362",
"0.6075432",
"0.60725045",
"0.60725045",
"0.6012886",
"0.600536",
"0.5961621",
"0.59276986",
"0.5922068",
"0.5903495",
"0.5882644",
"0.58647066",
"0.58643365",
"0.58325857",
"0.58292526",
"0.5814907"
] | 0.7057705 | 0 |
Submit facebox hits from the training video url table to MTurk for QA | def submit_hits(self):
# query all processed urls (TrainingVideoURL table) for boxes
# create BoxHits for all boxes and submit hits to Mturk
boxes = []
num_hits_submitted = 0
for url in session.query(VideoTrainingURL).filter_by(training_job_id = self.id, processed = False):
wpage = WebPage.by_url(url.url)
if wpage is not None:
# get the video, and set url.processed only if video is updated on its face version
videos = sorted(wpage.active_videos, key=lambda x:x.length, reverse = True)
if len(videos) != 0 :
video = videos[0]
images_in_s3 = video.s3_timestamps()
for b in video.face_boxes:
if b.timestamp in images_in_s3:
boxes.append(b.id)
url.processed = True
else:
url.processed = True
boxes = sorted(set(boxes))
for i in xrange(0, len(boxes), self.NUM_BOXES_PER_HIT):
boxes_per_hit = boxes[i:i+self.NUM_BOXES_PER_HIT]
hit_id = self.evaluator.create_hit(box_ids=boxes_per_hit)
session.flush()
b = BoxHit(hit_id=hit_id, training_job_id=self.id)
num_hits_submitted += 1
for box_id in boxes_per_hit:
if not MTurkBox.query.filter_by(box_id = box_id, label_id = self.label_id).count():
MTurkBox(box_id=box_id, hit=b, label_id=self.label_id)
session.flush()
return BoxHit, num_hits_submitted | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_embed_ok(self):\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', AUDIO_FILE)\n self.fv('minus_upload', 'id_embed_video', YOUTUBE_URL) \n self.submit200()\n self.notfind(\"Невірний\")\n self.show()\n self.find(\"youtube_video\")\n self.find(\"<object width\")\n self.go200('minus_upload')\n self.formfile('minus_upload', 'file', NOTAGS_FILE)\n self.fv('minus_upload', 'id_embed_video', YOUTUBE_EMBED) \n self.submit200()\n self.notfind(\"Невірний\")\n self.show()\n self.find(\"<object width\")",
"def joke(update: Update, context: CallbackContext) -> None:\n response = scrap_joke()\n update.message.reply_text(response[\"title\"])\n try :\n update.message.reply_video(video=open(\"store/video.webm\", 'rb'), supports_streaming=True)\n except Exception as e :\n pass",
"def video_feed(self):\r\n model.video.link(self.link)\r\n age_net, gender_net = model.video.caffe_models()\r\n return Response(model.video.video_detector(age_net, gender_net),mimetype='multipart/x-mixed-replace; boundary=frame')",
"def post_video(self, url: str, text: str) -> bool:\n return False",
"def request_videos(query, API_KEY, publishedBefore, publishedAfter, maxResults=49, driver_path=\"C:/WebDriver/bin/chromedriver.exe\"):\n \n video_list = yt_search(query, API_KEY, publishedBefore, publishedAfter, maxResults)\n \n # Check if there are no video results\n if not video_list:\n return\n \n for video in video_list:\n video['query'] = query\n return video_list",
"def submit_urls(args):\n params = {\n 'api_key': API_KEY,\n 'url': args.get('url')\n }\n markdown = ''\n r = req('POST', SUB_API + 'samples', params=params)\n res = r.json()['data']\n markdown += tableToMarkdown('Threat Grid - URL Submission', res)\n results = CommandResults(\n readable_output=markdown,\n outputs_prefix='Threatgrid.SearchResult',\n outputs_key_field='Info',\n outputs=res\n )\n return results",
"def post_video(self, comment):\n\t\tpass",
"def test_assigned_video_match(self):\n user1 = get_user_model().objects.get(username='[email protected]')\n self.client.login(username='[email protected]', password='1')\n\n office = OfficeLocation.objects.all()[0]\n org = OrgGroup.objects.filter(parent__isnull=True)[0]\n\n submission1 = Interest()\n submission1.owner = user1\n submission1.video_chat = True\n submission1.save()\n submission1.departments.add(org)\n\n resp = self.client.get(reverse('mystery:mystery'))\n self.assertContains(resp, \"Cancel this\", status_code=200)\n\n user2 = random_user()\n submission2 = Interest()\n submission2.owner = user2\n submission2.is_active = False\n submission2.save()\n submission2.video_chat = True\n submission2.departments.add(org)\n submission2.is_active = True\n submission2.save()\n\n self.assertEqual(submission2.is_active, True)\n\n resp = self.client.get(reverse('mystery:mystery'))\n self.assertContains(resp, \"Success\", status_code=200)",
"def start_video_analysis(event, context): # This is the Lambda handler to kick off Rekognition job\n\n for Record in event['Records']: # There may have been more than one file uploaded\n Bucket = Record['s3']['bucket']['name']\n NewFileUploaded = urllib.parse.unquote_plus(Record['s3']['object']['key'])\n # Remove the \"+\" signs of url encoding and return original whitespaces to the filename\n start_label_detection(Bucket, NewFileUploaded)\n \n return",
"async def download_video(event):\n url = event.pattern_match.group(1)\n rmsg = await event.get_reply_message()\n if not url and rmsg:\n myString = rmsg.text\n url = re.search(\"(?P<url>https?://[^\\s]+)\", myString).group(\"url\")\n if not url:\n return await edit_or_reply(event, \"What I am Supposed to find? Give link\")\n codevent = await edit_or_reply(event, \"`Preparing to download...`\")\n reply_to_id = await reply_id(event)\n ytdl_data = await ytdl_down(codevent, video_opts, url)\n if ytdl_down is None:\n return\n f = pathlib.Path(f\"{ytdl_data['title']}.mp4\".replace(\"|\", \"_\"))\n codthumb = pathlib.Path(f\"{ytdl_data['title']}.jpg\".replace(\"|\", \"_\"))\n if not os.path.exists(codthumb):\n codthumb = pathlib.Path(f\"{ytdl_data['title']}.webp\".replace(\"|\", \"_\"))\n if not os.path.exists(codthumb):\n codthumb = None\n await codevent.edit(\n f\"`Preparing to upload video:`\\\n \\n**{ytdl_data['title']}**\\\n \\nby *{ytdl_data['uploader']}*\"\n )\n ul = io.open(f, \"rb\")\n c_time = time.time()\n attributes, mime_type = await fix_attributes(f, ytdl_data, supports_streaming=True)\n uploaded = await event.client.fast_upload_file(\n file=ul,\n progress_callback=lambda d, t: asyncio.get_event_loop().create_task(\n progress(d, t, codevent, c_time, \"upload\", file_name=f)\n ),\n )\n ul.close()\n media = types.InputMediaUploadedDocument(\n file=uploaded,\n mime_type=mime_type,\n attributes=attributes,\n thumb=await event.client.upload_file(codthumb) if codthumb else None,\n )\n await event.client.send_file(\n event.chat_id,\n file=media,\n reply_to=reply_to_id,\n caption=ytdl_data[\"title\"],\n )\n os.remove(f)\n if codthumb:\n os.remove(codthumb)\n await event.delete()",
"async def igvideo(self, ctx, url):\n response = requests.get(url.replace(\"`\", \"\"), headers={\"Accept-Encoding\": \"utf-8\"})\n tree = html.fromstring(response.content)\n results = tree.xpath('//meta[@content]')\n sources = []\n for result in results:\n try:\n if result.attrib['property'] == \"og:video\":\n sources.append(result.attrib['content'])\n except KeyError:\n pass\n if sources:\n await ctx.send(sources[0])\n self.logger.info(misolog.format_log(ctx, f\"Success\"))\n else:\n await ctx.send(\"Found nothing, sorry!\")\n self.logger.warning(misolog.format_log(ctx, f\"Found nothing\"))",
"def scrape(search_title, search_artist, get_top_result=False):\n search_artist = search_artist.replace(\" \", \"+\").replace(\"&\", \"and\")\n search_title = search_title.replace(\" \", \"+\").replace(\"&\", \"and\")\n\n search_query = search_title + \"+\" + search_artist + \"+\\\"auto-generated+by+youtube\\\"\"\n # youtube_url = \"https://www.youtube.com/results?sp=EgIQAQ%253D%253D&search_query=\" + search_query\n youtube_url = \"https://www.youtube.com/results?search_query=\" + search_query\n header = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'}\n\n try:\n response = requests.get(youtube_url, headers=header)\n except requests.exceptions.ConnectionError:\n return None, None\n\n content = response.content\n soup = BeautifulSoup(content, \"html.parser\")\n title = []\n uploader = []\n ref = []\n all_title_tags = soup.find_all(\"h3\", attrs={\"class\": \"yt-lockup-title\"})\n all_uploader_tags = soup.find_all(\"div\", attrs={\"class\": \"yt-lockup-byline\"})\n\n for h3 in all_title_tags:\n try:\n title.append(h3.find('a').text)\n ref.append(h3.find('a')['href'])\n except TypeError:\n return None, None\n\n for div in all_uploader_tags:\n try:\n uploader.append(div.text)\n except TypeError:\n pass\n\n if get_top_result:\n # Return best matching link and its duration\n best_title = rank_results(title, search_title, search_artist, uploader)#, search_artist)\n # print(\"Best result is: '\"+str(title[best_title])+\"' at index \"+str(best_title))\n final_url = 'https://www.youtube.com'+ref[best_title]\n\n # video_length = get_video_time(final_url)\n # print(\"Video length is \"+str(video_length)+' ms long')\n return str(title[best_title]), ref[best_title]\n\n # if get_top_result:\n # return [title[0], ref[0]]\n return title, ref",
"def extract_tubelets(dname, gpu=-1, redo=False):\n d = GetDataset(dname)\n\n if gpu >= 0:\n caffe.set_mode_gpu()\n caffe.set_device(gpu)\n\n model_dir = os.path.join(os.path.dirname(__file__), '../models/ACT-detector/', dname)\n output_dir = os.path.join(os.path.dirname(__file__), '../results/ACT-detector/', dname)\n \n # load the RGB network\n rgb_proto = os.path.join(model_dir, \"deploy_RGB.prototxt\")\n rgb_model = os.path.join(model_dir, \"../generated_AVA_iter_118662.caffemodel\")\n net_rgb = caffe.Net(rgb_proto, caffe.TEST, weights=rgb_model)\n \n # load the FLOW5 network\n flo_proto = os.path.join(model_dir, \"deploy_FLOW5.prototxt\")\n flo_model = os.path.join(model_dir, \"../generated_AVA_iter_59463.caffemodel\")\n net_flo = caffe.Net(flo_proto, caffe.TEST, weights=flo_model)\n\n vlist = d.test_vlist()\n for iv, v in enumerate(vlist):\n print(\"Processing video {:d}/{:d}: {:s}\".format( iv+1, len(vlist), v))\n h, w = d.resolution(v)\n \n # network output is normalized between 0,1 ; so we will multiply it by the following array\n resolution_array = np.array([w,h,w,h]*K, dtype=np.float32)\n \n # now process each frame\n for i in xrange(1, 1 + d.nframes(v) - K + 1):\n outfile = os.path.join(output_dir, d.frame_format(v,i) + \".pkl\")\n \n # skip if already computed\n if os.path.isfile(outfile) and not redo:\n continue\n \n # read the frames for the forward\n kwargs_rgb = {}\n kwargs_flo = {}\n for j in xrange(K):\n cap = cv2.VideoCapture(d.vidfile(v,0))\n #print(frame)\n #print(int(cap.get(7)))\n cap.set(1,i + j - 1)\n im = cap.read()[1]\n cap.release()\n #im = cv2.imread(d.imfile(v, i + j))\n if im is None:\n print \"Image {:s} does not exist\".format(d.imfile(v, i+j))\n return\n imscale = cv2.resize(im, (IMGSIZE, IMGSIZE), interpolation=cv2.INTER_LINEAR)\n kwargs_rgb['data_stream' + str(j)] = np.transpose(imscale-MEAN, (2, 0, 1))[None, :, :, :]\n imf = [cv2.imread(d.flowfile(v.split(\".\")[0], min(d.nframes(v), i + j + iflow))) for iflow in xrange(NFLOWS)]\n if np.any(imf) is None:\n print \"Flow image {:s} does not exist\".format(d.flowfile(v, i+j))\n return\n imscalef = [cv2.resize(im, (IMGSIZE, IMGSIZE), interpolation=cv2.INTER_LINEAR) for im in imf]\n timscale = [np.transpose(im-MEAN, (2, 0, 1))[None, :, :, :] for im in imscalef]\n kwargs_flo['data_stream' + str(j) + 'flow'] = np.concatenate(timscale, axis=1)\n \n # compute rgb and flow scores\n # two forward passes: one for the rgb and one for the flow \n net_rgb.forward(end=\"mbox_conf_flatten\", **kwargs_rgb) # forward of rgb with confidence and regression\n net_flo.forward(end=\"mbox_conf_flatten\", **kwargs_flo) # forward of flow5 with confidence and regression\n \n # compute late fusion of rgb and flow scores (keep regression from rgb)\n # use net_rgb for standard detections, net_flo for having all boxes\n scores = 0.5 * (net_rgb.blobs['mbox_conf_flatten'].data + net_flo.blobs['mbox_conf_flatten'].data)\n net_rgb.blobs['mbox_conf_flatten'].data[...] = scores\n net_flo.blobs['mbox_conf_flatten'].data[...] = scores\n net_flo.blobs['mbox_loc'].data[...] = net_rgb.blobs['mbox_loc'].data\n \n # two forward passes, only for the last layer \n # dets is the detections after per-class NMS and thresholding (stardard)\n # dets_all contains all the scores and regressions for all tubelets \n dets = net_rgb.forward(start='detection_out')['detection_out'][0, 0, :, 1:]\n dets_all = net_flo.forward(start='detection_out_full')['detection_out_full'][0, 0, :, 1:]\n \n # parse detections with per-class NMS\n if dets.shape[0] == 1 and np.all(dets == -1):\n dets = np.empty((0, dets.shape[1]), dtype=np.float32)\n\n dets[:, 2:] *= resolution_array # network output was normalized in [0..1]\n dets[:, 0] -= 1 # label 0 was background, come back to label in [0..nlabels-1]\n dets[:, 2::2] = np.maximum(0, np.minimum(w, dets[:, 2::2]))\n dets[:, 3::2] = np.maximum(0, np.minimum(h, dets[:, 3::2]))\n\n # parse detections with global NMS at 0.7 (top 300)\n # coordinates were normalized in [0..1]\n dets_all[:, 0:4*K] *= resolution_array \n dets_all[:, 0:4*K:2] = np.maximum(0, np.minimum(w, dets_all[:, 0:4*K:2]))\n dets_all[:, 1:4*K:2] = np.maximum(0, np.minimum(h, dets_all[:, 1:4*K:2]))\n idx = nms_tubelets(np.concatenate((dets_all[:, :4*K], np.max(dets_all[:, 4*K+1:], axis=1)[:, None]), axis=1), 0.7, 300)\n dets_all = dets_all[idx, :]\n \n # save file\n if not os.path.isdir(os.path.dirname(outfile)):\n os.system('mkdir -p ' + os.path.dirname(outfile))\n\n with open(outfile, 'wb') as fid:\n pickle.dump((dets, dets_all), fid)",
"def send_tweet_with_video(tweet_text, filename, uuid):\n try:\n query = {} # API call to twitter-service\n query['app_name'] = 'webcamd'\n query['uuid'] = uuid\n query['tweet_text'] = tweet_text\n query['hashtag_arg'] = 'metminiwx' # do not supply the #\n query['lat'] = 51.4151 # Stockcross\n query['lon'] = -1.3776 # Stockcross\n query['video_pathname'] = filename\n\n status_code, response_dict = cumulus_comms.call_rest_api(get_env.get_twitter_service_endpoint() + '/send_video', query)\n\n # print('status_code=' + status_code.__str__())\n # pprint(response_dict)\n # if response_dict['status'] == 'OK' and response_dict['tweet_sent'] == True:\n if response_dict['status'] == 'OK' :\n tweet_len = response_dict['tweet_len'].__str__()\n print('Tweet sent OK, tweet_len=' + tweet_len + ', uuid=' + uuid.__str__())\n else:\n print(response_dict['status'])\n\n except Exception as e:\n print('Error : send_tweet_with_video() : ' + e.__str__())",
"def AnnotateVideo(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def add_videofacet_image(request):\r\n\r\n if request.method == \"POST\":\r\n add_image_form = AddImageForm(request.POST, request=request)\r\n if add_image_form.is_valid():\r\n videofacet_id = request.POST.get('videofacet')\r\n print \"videoFACETid: \", videofacet_id\r\n videofacet = get_object_or_404(VideoFacet, id=videofacet_id)\r\n images = request.POST.getlist('images')\r\n print \"IMAGES: \", images\r\n for image in images:\r\n img_ins = get_object_or_404(ImageAsset, id=image)\r\n print \"IMGins: \", img_ins\r\n videofacet.image_assets.add(img_ins)\r\n videofacet.save()\r\n return redirect('story_detail', pk=videofacet.story.id)",
"def test_send_course_to_vem_pipeline(self, mock_conn, mock_key):\n files = [{'file_name': 'first.mp4', 'content_type': 'video/mp4'}]\n mock_key_instances = [\n Mock(\n generate_url=Mock(\n return_value='http://example.com/url_{}'.format(file_info['file_name'])\n )\n )\n for file_info in files\n ]\n mock_key.side_effect = mock_key_instances\n\n response = self.client.post(\n self.url,\n json.dumps({'files': files}),\n content_type='application/json'\n )\n\n self.assertEqual(response.status_code, 200)\n mock_conn.return_value.get_bucket.assert_called_once_with(\n settings.VIDEO_UPLOAD_PIPELINE['VEM_S3_BUCKET'], validate=False # pylint: disable=unsubscriptable-object\n )",
"def search_videos_tag(self, video_tag):\n recommendations = []\n\n if not video_tag.startswith(\"#\"):\n print(f\"No search results for {video_tag}\")\n else:\n for video in self.videos_dict:\n #s = self.videos_dict[video]\n #result = re.search(r\"\\[([A-Za-z0-9_]+)\\]\", s)\n #print(result.group(1))\n #tag_string = str(result.group(1))\n #if video_tag in tag_string:\n # recommendations.append(self.videos_dict[video])\n if video_tag in video._tags and not video.flagged:\n recommendations.append(self.videos_dict[video])\n \n recommendations.sort()\n n = len(recommendations)\n\n if n == 0:\n print(f\"No search results for {video_tag}\")\n else:\n print(f\"Here are the results for {video_tag}:\")\n for i in range(n):\n print(f\"{i+1}) {recommendations[i]}\")\n print(\"Would you like to play any of the above? If yes, specify the number of the video.\")\n print(\"If your answer is not a valid number, we will assume it's a no.\")\n\n try:\n response = int(input())\n if response in range(1,n+1):\n wanted_video_info = recommendations[response-1]\n #print(wanted_video_info)\n s = wanted_video_info\n result = re.search(r\"\\(([A-Za-z0-9_]+)\\)\", s)\n #print(result.group(1))\n self.play_video(result.group(1))\n except ValueError:\n pass",
"def upload_videofacet_image(request):\r\n\r\n if request.method == 'POST':\r\n imageform=ImageAssetForm(request.POST, request.FILES)\r\n if imageform.is_valid():\r\n videoimage = imageform.save(commit=False)\r\n # retrieve the videofacet the image should be associated with\r\n videofacet_id = request.POST.get('videofacet')\r\n videofacet = get_object_or_404(VideoFacet, id=videofacet_id)\r\n # set request based attributes\r\n videoimage.owner = request.user\r\n videoimage.organization = request.user.organization\r\n videoimage.save()\r\n # add image asset to videofacet image_assets\r\n videofacet.image_assets.add(videoimage)\r\n videofacet.save()\r\n return redirect('story_detail', pk=videofacet.story.id)",
"def search_videos(self, search_term):\n recommendations = []\n for video in self.videos_dict:\n if not video.flagged and search_term in self.videos_dict[video]:\n recommendations.append(self.videos_dict[video])\n \n recommendations.sort()\n n = len(recommendations)\n\n\n if n == 0:\n print(f\"No search results for {search_term}\")\n else:\n print(f\"Here are the results for {search_term}:\")\n for i in range(n):\n print(f\"{i+1}) {recommendations[i]}\")\n print(\"Would you like to play any of the above? If yes, specify the number of the video.\")\n print(\"If your answer is not a valid number, we will assume it's a no.\")\n\n try:\n response = int(input())\n if response in range(1,n+1):\n wanted_video_info = recommendations[response-1]\n #print(wanted_video_info)\n s = wanted_video_info\n result = re.search(r\"\\(([A-Za-z0-9_]+)\\)\", s)\n #print(result.group(1))\n self.play_video(result.group(1))\n except ValueError:\n pass",
"def search_videos(self, search_term):\n print(\"search_videos needs implementation\")",
"def test_youtube(self):\n\n test = Unfurl()\n test.add_to_queue(\n data_type='url', key=None,\n value='https://www.youtube.com/watch?v=LnhSTZgzKuY&list=PLlFGZ98XmfGfV6RAY9fQSeRfyIuhVGSdm&index=2&t=42s')\n test.parse_queue()\n\n # test number of nodes\n self.assertEqual(len(test.nodes.keys()), 16)\n self.assertEqual(test.total_nodes, 16)\n\n # Test query parsing\n self.assertEqual('Video will start playing at 42 seconds', test.nodes[16].label)\n\n # is processing finished empty\n self.assertTrue(test.queue.empty())\n self.assertEqual(len(test.edges), 0)",
"def submit(id, host):",
"def test_framework_recommendations_post(self):\n pass",
"async def youtube(self, ctx, *, query):\r\n\r\n utub = 'https://youtube.com/results?search_query='\r\n url = utub + query.replace(\" \", \"+\")\r\n r = requests.get(url).text\r\n num1 = r.find('{\"videoRenderer')\r\n num2 = r.find('{\"videoRenderer', num1+1)\r\n # print (num1)\r\n # print (num2)\r\n videoRenderer = (json.loads(r[num1:num2-1])[\"videoRenderer\"])\r\n vid = (videoRenderer[\"videoId\"])\r\n page = (\"https://youtube.com/watch?v=\" + vid)\r\n await ctx.send(page)",
"def test_techtv_detail_embed_url(\n mock_user_moira_lists, user_view_list_data, logged_in_apiclient, url\n):\n client = logged_in_apiclient[0]\n ttv_video = TechTVVideoFactory(video=user_view_list_data.video)\n mock_user_moira_lists.return_value = {user_view_list_data.moira_list.name}\n result = client.get(url.format(ttv_video.ttv_id))\n assert result.status_code == status.HTTP_200_OK\n assert (\n json.loads(result.context_data[\"js_settings_json\"])[\"video\"][\"key\"]\n == user_view_list_data.video.hexkey\n )\n assert isinstance(result.context_data[\"view\"], TechTVEmbed)",
"def save_results(self, instagram_results):",
"def example_video(dataset, num_vids, split, base_data_path, f_name, vid_name, verbose):\n\n with tf.name_scope(\"my_scope\") as scope:\n\n # Initialize model variables\n istraining = False\n\n\n data_path = os.path.join(base_data_path, 'tfrecords_'+dataset, 'Split'+str(split), f_name)\n\n # Setting up tensors for models\n input_data_tensor, labels_tensor, names_tensor = load_dataset_without_preprocessing(data_path, dataset, istraining, vid_name, verbose)\n\n # TF session setup\n config = tf.ConfigProto(allow_soft_placement=True)\n sess = tf.Session(config=config)\n init = (tf.global_variables_initializer(), tf.local_variables_initializer())\n coord = tf.train.Coordinator()\n threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)\n\n # Variables get randomly initialized into tf graph\n sess.run(init)\n\n\n acc = 0\n count = 0\n videos_loaded = 0\n previous_vid_name = ''\n total_pred = []\n\n if verbose:\n print \"Begin Testing\"\n\n # END IF\n\n ########################################## Testing loop block ################################################################\n\n while videos_loaded <= num_vids:\n output, labels, names = sess.run([input_data_tensor, labels_tensor, names_tensor])\n\n import pdb; pdb.set_trace()\n\n for batch_idx in range(len(names)):\n vid_name = names[batch_idx]\n if vid_name != previous_vid_name:\n previous_vid_name = vid_name\n videos_loaded += 1\n if verbose:\n print \"Number of videos loaded: \", videos_loaded\n\n\n # Extract remaining clips from currently loaded video, once it finishes exit while loop\n if videos_loaded > num_vids:\n break\n\n count += 1\n\n\n # END IF\n\n # END WHILE\n\n #########################################################################################################################################################\n\n # END WITH\n\n coord.request_stop()\n coord.join(threads)",
"async def bingvideo(self, *, text):\n settings = loadauth()\n operation = 'videosearch'\n if settings['apikey'] == '' or settings['apikey'] == 'blank':\n return await self.bot.say(\"Missing or incorrect API key. Please \" +\n \"contact the owner to add an API key.\")\n apikey = settings['apikey']\n text, limit = self.limitget(text)\n result = self.getfrombing(apikey, text, limit, operation)\n bottext = self.obtainresult(result, operation)\n return await self.bot.say(bottext)",
"def get_individual_video_link(self):\r\n self.filter_url_portion = '' # ignore the filter option.\r\n\r\n target_search_results_obj = []\r\n # in case we want to search more pages just change this and make a loop\r\n self.page_url_portion = '&page=1'\r\n\r\n # start with forming the search\r\n self.form_search_url()\r\n\r\n # Get the dom object from the search page\r\n search_result_dom = self.get_dom_object(self.target_yt_search_url_str)\r\n\r\n # Get the search results\r\n target_search_results_obj.extend(self.tag_element_results(search_result_dom,\r\n 'div[class=\"yt-lockup-content\"] h3[class=\"yt-lockup-title\"] a'))\r\n\r\n #print 'results len: ', len(target_search_results_obj)\r\n\r\n each_video_link_title_dict = {}\r\n for n in target_search_results_obj:\r\n video_link = n.attributes['href']\r\n ## modified video link\r\n # video_link = re.sub('watch\\?v=',r'v/',video_link)\r\n\r\n video_title = n.attributes['title'] #\"Mix\" in video_title[:4] or \"mix\" i(n video_title[:4] or\r\n ile = video_title.lower()\r\n if \"cover\" in ile or \"live\" in ile or \"acustic\" in ile or \"acoustic\" in ile or \"lesson\" in ile:\r\n print \"found blacklisted term, bypassing song: \" + ile\r\n pass #dont want these\r\n else:\r\n each_video_link_title_dict[video_title] = 'https://www.youtube.com' + video_link\r\n\r\n self.video_link_title_dict.update(each_video_link_title_dict)"
] | [
"0.5325902",
"0.52249277",
"0.5168434",
"0.51396066",
"0.51172984",
"0.51164895",
"0.50506824",
"0.50284046",
"0.49790028",
"0.49683398",
"0.4964888",
"0.49612203",
"0.49269286",
"0.49201757",
"0.4912672",
"0.48896024",
"0.48839724",
"0.48567656",
"0.48537496",
"0.48426962",
"0.4834191",
"0.4823739",
"0.4812845",
"0.48068273",
"0.4802188",
"0.47905228",
"0.47867405",
"0.47806296",
"0.47632328",
"0.4762971"
] | 0.7386757 | 0 |
get status of outstanding Hits and unprocessed URLS for a training job | def _get_job_status(self):
total_hits = session.query(BoxHit).filter_by(training_job_id=self.id).count()
num_hits_left = session.query(BoxHit).filter_by(training_job_id=self.id, outstanding=True).count()
total_urls = self.num_urls
num_urls_left = session.query(VideoTrainingURL).filter_by(job=self, processed=False).count()
faces_obtained = MTurkBox.query.filter_by(label=self.evaluator.target_label, result=True).count()
return '\n'.join([
'------------- Stats for Job ID: %s -------------' % str(self.id) ,
'Job for Label : %s' % self.label.name,
'Total URLs : %d' % total_urls,
'Total HITs : %d' % total_hits,
'unprocessed URLS : %d' % num_urls_left,
'outstanding Hits : %d' % num_hits_left,
'Job Finish Status : %s' % self.finished,
'Faces Obtained : %d' % faces_obtained,
]) + '\n' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_status(cls):\n for job in cls.query.filter(cls.finished == False):\n num_hits_left = session.query(BoxHit).filter_by(training_job_id = job.id, outstanding=True).count()\n urls_left = session.query(VideoTrainingURL).filter_by(training_job_id=job.id, processed = False)\n dynamo = DynamoIngestionStatusClient()\n num_urls_left = 0\n for url in urls_left:\n dynamo_url = dynamo.get(url.url)\n if dynamo_url is None or dynamo_url['status'] == 'Failed':\n # will never be processed, so ignore for our purposes\n url.processed = True\n else:\n num_urls_left += 1\n if num_hits_left+num_urls_left == 0:\n job.finished = True\n print '*** Job ID: %s is complete ***' % str(job.id)\n\n print '------------- Stats for Job ID: %s -------------' % str(job.id)\n print 'Total URLs : %i' % VideoTrainingURL.query.filter_by(training_job_id = job.id).count()\n print 'Total HITs : %i' % BoxHit.query.filter_by(training_job_id = job.id).count()\n if not job.finished:\n print 'unprocessed URLs: %i' % num_urls_left\n print 'outstanding HITs: %i\\n' % num_hits_left\n session.flush()",
"def job_status(self, job_id):\n url = self.base_url + \"/ml-service/phoenix-ml/job/status?id={0}\".format(job_id)\n # url = \"http://10.1.2.110:8199/phoenix-ml/job/status?id=12\"\n headers = {\"ApiKey\": self.api_key}\n response = requests.get(url=url, headers=headers)\n return response.text",
"def get_status(self):\n url = \"data_request?id=jobstatus&job=%d&plugin=zwave\" % self.id\n return self.vera.get(url)",
"def do_status(self, args):\n status = self._leet.job_status\n\n for job in self.finished_jobs:\n status.append({\"id\" : job.id,\n \"hostname\" : job.machine.hostname,\n \"plugin\": job.plugin_instance.LEET_PG_NAME,\n \"status\" : job.status})\n if status:\n pretty_jobs_status(status)\n else:\n print(\"***No jobs pending\")",
"def get_status(chronos_url, statuses=False):\n if statuses:\n print('Jobs on ' + chronos_url)\n connection = http.client.HTTPConnection(chronos_url)\n connection.request(\"GET\", \"/scheduler/jobs\")\n response_str = connection.getresponse().read().decode(\"utf-8\")\n jobs_dict = json.loads(response_str)\n\n connection.request(\"GET\", \"/scheduler/graph/csv\")\n response_str = connection.getresponse().read().decode(\"utf-8\")\n reader = csv.reader(StringIO(response_str), delimiter=',')\n jobs_csv = {}\n for row in reader:\n if row[0] == 'link':\n continue\n jobs_csv[row[1]] = row\n\n # last_status: ['fresh', 'failure', 'success']\n # state: ['idle', 'queued', 'running']\n\n job_status = {}\n job_status['running'] = []\n job_status['failure'] = []\n job_status['fresh'] = []\n job_status['all'] = []\n for job in jobs_dict:\n jname = job['name']\n if jname not in jobs_csv:\n continue\n nerror = job['errorCount']\n nsuccess = job['successCount']\n #command = job['command']\n if statuses:\n print('\\t'.join([jobs_csv[jname][2], jobs_csv[jname][3], str(nerror),\n str(nsuccess), jname]))\n job_status['all'] = job_status['all'] + [jname]\n if jobs_csv[jname][3] == 'running':\n job_status['running'] = job_status['running'] + [jname]\n elif jobs_csv[jname][2] == 'failure':\n job_status['failure'] = job_status['failure'] + [jname]\n elif jobs_csv[jname][2] == 'fresh':\n job_status['fresh'] = job_status['fresh'] + [jname]\n return job_status",
"def status(self):\n return self.job_proto.status",
"def statuses(ctx, job, page):\n\n def get_experiment_statuses():\n try:\n response = PolyaxonClient().experiment.get_statuses(\n user, project_name, _experiment, page=page)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could get status for experiment `{}`.'.format(_experiment))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n meta = get_meta_response(response)\n if meta:\n Printer.print_header('Statuses for experiment `{}`.'.format(_experiment))\n Printer.print_header('Navigation:')\n dict_tabulate(meta)\n else:\n Printer.print_header('No statuses found for experiment `{}`.'.format(_experiment))\n\n objects = list_dicts_to_tabulate(\n [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status')\n for o in response['results']])\n if objects:\n Printer.print_header(\"Statuses:\")\n objects.pop('experiment', None)\n dict_tabulate(objects, is_list_dict=True)\n\n def get_experiment_job_statuses():\n try:\n response = PolyaxonClient().experiment_job.get_statuses(user,\n project_name,\n _experiment,\n _job,\n page=page)\n except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:\n Printer.print_error('Could not get status for job `{}`.'.format(job))\n Printer.print_error('Error message `{}`.'.format(e))\n sys.exit(1)\n\n meta = get_meta_response(response)\n if meta:\n Printer.print_header('Statuses for Job `{}`.'.format(_job))\n Printer.print_header('Navigation:')\n dict_tabulate(meta)\n else:\n Printer.print_header('No statuses found for job `{}`.'.format(_job))\n\n objects = list_dicts_to_tabulate(\n [Printer.add_status_color(o.to_light_dict(humanize_values=True), status_key='status')\n for o in response['results']])\n if objects:\n Printer.print_header(\"Statuses:\")\n objects.pop('job', None)\n dict_tabulate(objects, is_list_dict=True)\n\n page = page or 1\n\n user, project_name, _experiment = get_project_experiment_or_local(ctx.obj.get('project'),\n ctx.obj.get('experiment'))\n\n if job:\n _job = get_experiment_job_or_local(job)\n get_experiment_job_statuses()\n else:\n get_experiment_statuses()",
"def updater_job_status(self,request):\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/installer/status invoked with:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(request.options).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" << %s\" % s)\n\t\t# -----------------------------------\n\n\t\t# First check if a job is running. This will update the\n\t\t# internal field self._current_job, or if the job is finished,\n\t\t# it would return an empty string.\n\t\tinst = self.__which_job_is_running()\n\n\t\tjob = request.options.get('job','')\n\t\tresult = {}\n\t\tif job in INSTALLERS:\n\t\t\t# make a copy, not a reference!\n#\t\t\tresult = {}\n#\t\t\tfor arg in INSTALLERS[job]:\n#\t\t\t\tresult[arg] = INSTALLERS[job][arg]\n\t\t\tresult = deepcopy(INSTALLERS[job])\n\n\t\t\tif 'statusfile' in INSTALLERS[job]:\n\t\t\t\ttry:\n\t\t\t\t\tfor line in open(INSTALLERS[job]['statusfile']):\n\t\t\t\t\t\tfields = line.strip().split('=')\n\t\t\t\t\t\tif len(fields) == 2:\n\t\t\t\t\t\t\tresult['_%s_' % fields[0]] = fields[1]\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\t# if we encounter that the frontend asks about the last job we\n\t\t\t# have executed -> include its properties too.\n\t\t\tif self._current_job:\n\t\t\t\tif self._current_job['job'] == job:\n\t\t\t\t\tfor f in self._current_job:\n\t\t\t\t\t\tresult[f] = self._current_job[f]\n\t\t\t\t\t\tif isinstance(result[f],str) and result[f].isdigit():\n\t\t\t\t\t\t\tresult[f] = int(result[f])\n\t\t\t\tif inst == '':\n\t\t\t\t\tresult['running'] = False\n\t\t\telse:\n\t\t\t\t# no job running but status for release was asked? \n\t\t\t\t# maybe the server restarted after job finished\n\t\t\t\t# and the frontend did not get that information\n\t\t\t\t# Bug #26318\n\t\t\t\tif job == 'release':\n\t\t\t\t\tresult['detail'] = '%s-%s' % (self.ucr.get('version/version'), self.ucr.get('version/patchlevel'))\n\t\t\t\telse:\n\t\t\t\t\tresult['detail'] = _('Unknown')\n\n\t\t\t# -------------- additional fields -----------------\n\n\t\t\t# elapsed time, ready to be displayed. (not seconds, but rather\n\t\t\t# the formatted string)\n\t\t\tif 'time' in result and 'started' in result:\n\t\t\t\telapsed = result['time'] - result['started']\n\t\t\t\tif elapsed < 60:\n\t\t\t\t\tresult['elapsed'] = '%ds' % elapsed\n\t\t\t\telse:\n\t\t\t\t\tmins = int(elapsed/60)\n\t\t\t\t\tsecs = elapsed - (60 * mins)\n\t\t\t\t\tif mins < 60:\n\t\t\t\t\t\tresult['elapsed'] = '%d:%02dm' % (mins,secs)\n\t\t\t\t\telse:\n\t\t\t\t\t\thrs = int(mins/60)\n\t\t\t\t\t\tmins = mins - (60*hrs)\n\t\t\t\t\t\tresult['elapsed'] = '%d:%02d:%02dh' % (hrs,mins,secs)\n\t\t\t# Purpose is now formatted in the language of the client (now that\n\t\t\t# this LANG is properly propagated to us)\n\t\t\tif 'purpose' in result:\n\t\t\t\tif result['purpose'].find('%') != -1:\n\t\t\t\t\t# make sure to not explode (Bug #26318), better show nothing\n\t\t\t\t\tif 'detail' in result:\n\t\t\t\t\t\tresult['label'] = result['purpose'] % result['detail']\n\t\t\t\telse:\n\t\t\t\t\tresult['label'] = result['purpose']\n\t\t\t# Affordance to reboot... hopefully this gets set before\n\t\t\t# we stop polling on this job status\n\t\t\tself.ucr.load()\t# make it as current as possible\n\t\t\tresult['reboot'] = self.ucr.is_true('update/reboot/required',False)\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/installer/status returns:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(result).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" >> %s\" % s)\n\t\t# -----------------------------------\n\n\t\tself.finished(request.id,result)",
"def get_status(job_key):\n job = Job.fetch(job_key, connection=conn)\n\n logs_url = \"{}{}/runner/logs/{}\".format(request.url_root, API_VERSION, job_key)\n status_dict = {\"status\": \"\", \"logs_url\": logs_url}\n return_code = 200\n if job.is_finished:\n status_dict['status'] = \"success\"\n return_code = 200\n elif job.is_failed:\n status_dict['status'] = \"terminal\"\n return_code = 400\n else:\n status_dict['status'] = \"running\"\n status_dict['logs_url'] = \"\"\n return_code = 202\n\n return jsonify(status_dict), return_code",
"def refresh_queue_status(self):\n \n # Get the jobid and state for all jobs pending/running/completed for the current user\n qacct_stdout=self.run_grid_command_resubmit([\"qacct\",\"-o\",getpass.getuser(),\"-j\",\"*\"])\n \n # info list should include jobid, state, cpus, time, and maxrss\n info=[]\n job_status=[]\n for line in qacct_stdout.split(\"\\n\"):\n if line.startswith(\"jobnumber\") or line.startswith(\"job_number\"):\n if job_status:\n info.append(job_status)\n job_status=[line.rstrip().split()[-1],\"NA\",\"NA\",\"NA\",\"NA\"]\n # get the states for completed jobs\n elif line.startswith(\"failed\"):\n failed_code = line.rstrip().split()[1]\n if failed_code != \"0\":\n if failed_code in [\"37\",\"100\"]:\n job_status[1]=self.job_code_terminated\n else:\n job_status[1]=self.job_code_error\n elif line.startswith(\"deleted_by\"):\n if line.rstrip().split()[-1] != \"NONE\" and job_status[1] == self.job_code_terminated:\n job_status[1]=self.job_code_deleted\n elif line.startswith(\"exit_status\"):\n # only record if status has not yet been set\n if job_status[1] == \"NA\":\n exit_status = line.rstrip().split()[-1]\n if exit_status == \"0\":\n job_status[1]=self.job_code_completed\n elif exit_status == \"137\":\n job_status[1]=self.job_code_terminated\n else:\n job_status[1]=self.job_code_error\n # get the current state for running jobs\n elif line.startswith(\"job_state\"):\n job_status[1]=line.rstrip().split()[-1]\n elif line.startswith(\"slots\"):\n job_status[2]=line.rstrip().split()[-1]\n elif line.startswith(\"ru_wallclock\"):\n try:\n # get the elapsed time in minutes\n job_status[3]=str(float(line.rstrip().split()[-1])/60.0)\n except ValueError:\n job_status[3]=\"NA\"\n elif line.startswith(\"ru_maxrss\"):\n job_status[4]=line.rstrip().split()[-1]+\"K\"\n \n if job_status:\n info.append(job_status)\n\n return info",
"def status():\n used = get_space_used()\n avail = get_space_available()\n allowed = config.download.space_to_use\n print \"Space used by downloaded files: %.2f GB of %.2f GB (%.2f%%)\" % \\\n (used/1024.0**3, allowed/1024.0**3, 100.0*used/allowed)\n print \"Space available on file system: %.2f GB\" % (avail/1024.0**3)\n\n numwait = jobtracker.query(\"SELECT COUNT(*) FROM requests \" \\\n \"WHERE status='waiting'\", \\\n fetchone=True)\n numfail = jobtracker.query(\"SELECT COUNT(*) FROM requests \" \\\n \"WHERE status='failed'\", \\\n fetchone=True)\n print \"Number of requests waiting: %d\" % numwait\n print \"Number of failed requests: %d\" % numfail\n\n numdlactive = jobtracker.query(\"SELECT COUNT(*) FROM files \" \\\n \"WHERE status='downloading'\", \\\n fetchone=True)\n numdlfail = jobtracker.query(\"SELECT COUNT(*) FROM files \" \\\n \"WHERE status='failed'\", \\\n fetchone=True)\n print \"Number of active downloads: %d\" % numdlactive\n print \"Number of failed downloads: %d\" % numdlfail",
"def _get_job_status(module_status, option='all'):\n\n # find the preceding job (1st is used, should be one job in most cases)\n if option == 'first':\n for job, job_status in module_status.items():\n if job != 'pipeline_index':\n out = job_status\n break\n elif option == 'all':\n out = []\n for job, job_status in module_status.items():\n if job != 'pipeline_index':\n out.append(job_status)\n else:\n raise KeyError('Did not recognize pipeline job status request '\n 'for \"{}\"'.format(option))\n return out",
"def get_games_in_progress(self):\n gip_url = 'scores/json/AreAnyGamesInProgress?key=<key>'\n contents = urllib.request.urlopen(self._base_url + gip_url.replace('<key>', self._ak, 1))\n return contents.getcode(), contents.read().decode(\"utf-8\")",
"def _get_current_job_status(acq_tframes):\n cur_job = dict()\n if acq_tframes:\n cur_job['employer'] = f'{str_sep}'.join(\n {tframe.employer if tframe.category == 'O' else tframe.category for tframe in\n acq_tframes}).replace(',', '')\n cur_job['start'] = [tf.start for tf in acq_tframes][0]\n cur_job['end'] = sorted([tf.end for tf in acq_tframes])[-1]\n return cur_job",
"def get_status(self):\n\t\treturn call_sdk_function('PrlJob_GetStatus', self.handle)",
"def query_job_progress():\n pass",
"def get_status(directory):\n\n os.system(\"squeue -o '%.18i %.9P %.16j %.8u %.8T %.10M %.9l %.6D %R %Z'\"\n \">> all_jobs.txt\")\n lines = open('all_jobs.txt').readlines()\n job_state = None\n for i in range(len(lines)):\n if directory in lines[i]:\n job_state = lines[i][4]\n break\n\n os.system('rm all_jobs.txt')\n\n return job_state",
"def test_status(self):\n\n url = '/%s/jobs/?status=RUNNING' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['job_type']['id'], self.job1.job_type.id)",
"async def _status():\n # TODO(Deepankar): should we add versions of executors?\n return {\n 'status_code': status.HTTP_200_OK,\n 'jina_version': jina_version\n }",
"def _status_summary(jobs):\n assert type(jobs) == list\n successful = 0\n pending = 0\n running = 0\n coalesced = 0\n\n for job in jobs:\n status = buildapi.query_job_status(job)\n if status == buildapi.PENDING:\n pending += 1\n if status == buildapi.RUNNING:\n running += 1\n if status == buildapi.SUCCESS:\n successful += 1\n if status == buildapi.COALESCED:\n coalesced += 1\n\n return (successful, pending, running, coalesced)",
"def test_get_refresh_job_status(self):\n pass",
"def _get_status(self):\n if self._state in [\"processed\", \"error\"]:\n return self._state\n \n get_resp = requests.get(self.location, cookies={\"session\": self.session})\n\n self._state = get_resp.json()[\"status\"]\n self.slice_time = get_resp.json()[\"slice_time\"]\n \n return self._state",
"def available_statuses(self):\n return self.pipeline.get(self.status, ())",
"def available_statuses(self):\n return self.pipeline.get(self.status, ())",
"def find_job_and_job_status(self):\n\n def find_job_and_job_status_log_history(f):\n rcelog('critical', \"find_job_and_status(): Found job {0} in history. Terminated in error.\".\n format(self.id))\n return f\n\n try:\n return self.__get_job_status_from_queue__()\n except:\n pass\n\n try:\n return find_job_and_job_status_log_history(self.__get_job_status_from_history__())\n except:\n return (None, None)",
"def get_job_status(job_url, build_number, username, password):\n try:\n url = \"{}{}/api/json\".format(job_url, str(build_number))\n res = requests.get(url, auth=(username, password))\n build_status_json = json.loads(res.text)\n return build_status_json[\"result\"]\n\n except requests.exceptions.RequestException as e:\n print (e)\n sys.exit(2)",
"def get_job_status(self):\n if self.worker_thread is None:\n return None\n else:\n return self.worker_thread.get_status()",
"def status(self) -> ExperimentStatus:\n if all(\n len(container) == 0\n for container in [\n self._result_data,\n self._jobs,\n self._job_futures,\n self._analysis_callbacks,\n self._analysis_futures,\n self._figures,\n self._analysis_results,\n ]\n ):\n return ExperimentStatus.EMPTY\n\n # Return job status is job is not DONE\n try:\n return {\n JobStatus.INITIALIZING: ExperimentStatus.INITIALIZING,\n JobStatus.QUEUED: ExperimentStatus.QUEUED,\n JobStatus.VALIDATING: ExperimentStatus.VALIDATING,\n JobStatus.RUNNING: ExperimentStatus.RUNNING,\n JobStatus.CANCELLED: ExperimentStatus.CANCELLED,\n JobStatus.ERROR: ExperimentStatus.ERROR,\n }[self.job_status()]\n except KeyError:\n pass\n\n # Return analysis status if Done, cancelled or error\n try:\n return {\n AnalysisStatus.DONE: ExperimentStatus.DONE,\n AnalysisStatus.CANCELLED: ExperimentStatus.CANCELLED,\n AnalysisStatus.ERROR: ExperimentStatus.ERROR,\n }[self.analysis_status()]\n except KeyError:\n return ExperimentStatus.POST_PROCESSING",
"def get_status(url):\n\n with active_tivos_lock:\n for tivo_tasks in active_tivos:\n with tivo_tasks['lock']:\n for status in tivo_tasks['queue']:\n if status['url'] == url:\n return status, tivo_tasks['lock']\n\n return None, None",
"def launch_status(self):\n print(\n f\"Starting job with {len(self.fe.get_network())} jobs total. \",\n end=\"\\r\",\n )"
] | [
"0.7707184",
"0.63947445",
"0.63776845",
"0.6374783",
"0.6318901",
"0.63123906",
"0.62420017",
"0.6170347",
"0.6152266",
"0.6072089",
"0.6031808",
"0.6019294",
"0.59752125",
"0.59679496",
"0.594853",
"0.59385747",
"0.59290457",
"0.5922479",
"0.5907669",
"0.5902595",
"0.58839726",
"0.58834803",
"0.58809835",
"0.58809835",
"0.5876914",
"0.58465517",
"0.5843674",
"0.58245033",
"0.5814731",
"0.581306"
] | 0.7045921 | 1 |
Sets the allowed_processes of this RuntimeAntiMalwareRule. | def allowed_processes(self, allowed_processes):
self._allowed_processes = allowed_processes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def denied_processes(self, denied_processes):\n\n self._denied_processes = denied_processes",
"def _set_processes(self, processes: int = 1):\n self.__processes = processes",
"def set_process_limits(): # pragma: no cover\n # Set a new session id so that this process and all its children will be\n # in a new process group, so we can kill them all later if we need to.\n os.setsid()\n\n # No subprocesses.\n resource.setrlimit(resource.RLIMIT_NPROC, (0, 0))\n\n # CPU seconds, not wall clock time.\n cpu = LIMITS[\"CPU\"]\n if cpu:\n resource.setrlimit(resource.RLIMIT_CPU, (cpu, cpu))\n\n # Total process virtual memory.\n vmem = LIMITS[\"VMEM\"]\n if vmem:\n resource.setrlimit(resource.RLIMIT_AS, (vmem, vmem))\n\n # Size of written files. Can be zero (nothing can be written).\n fsize = LIMITS[\"FSIZE\"]\n resource.setrlimit(resource.RLIMIT_FSIZE, (fsize, fsize))",
"def forbidden_processes(self):\n return {name for name, flag in self._required_processes.items() if not flag}",
"def allowed(self, allowed):\n if allowed is None:\n raise ValueError(\"Invalid value for `allowed`, must not be `None`\") # noqa: E501\n\n self._allowed = allowed",
"def allowed_vehicles(self, allowed_vehicles):\n\n self._allowed_vehicles = allowed_vehicles",
"def allowed_vehicles(self, allowed_vehicles):\n\n self._allowed_vehicles = allowed_vehicles",
"def disallowed_vehicles(self, disallowed_vehicles):\n\n self._disallowed_vehicles = disallowed_vehicles",
"def disallowed_vehicles(self, disallowed_vehicles):\n\n self._disallowed_vehicles = disallowed_vehicles",
"def shots_allowed(self, shots_allowed):\n\n self._shots_allowed = shots_allowed",
"def set_par_range(self, mins, maxs, frozen):\n self.parmins = mins\n self.parmaxs = maxs\n self.pars_frozen = frozen\n return",
"def add_worker_processes(self, value, smt_used):\n # nginx workers : vpp used phy workers = 2:1\n if smt_used:\n value = value * 4\n else:\n value = value * 2\n path = [u\"worker_processes\"]\n self.add_config_item(self._nodeconfig, value, path)",
"def _process_threadpool_limits_initializier():\n import numpy # required for loky's autodetection\n from threadpoolctl import threadpool_limits\n\n threadpool_limits(limits=1)",
"def setAllowUpscaling(self, allow):\n self._allow_upscaling = allow\n self.update()",
"def set_blacklist(self):\n\n for name in self.__ipset:\n if self.verbose:\n print(\"Start create: \" + self.__ipset[name]['ipset-name'])\n\n # create ipset\n self.__process(name, self.__parser.create(name))\n\n if self.verbose:\n print('Done')",
"def set_cpu_limit(self, nVmCpuLimit):\n\t\tcall_sdk_function('PrlVmCfg_SetCpuLimit', self.handle, nVmCpuLimit)",
"def SetAntLimit(cls, value=0):\n cls.antLimit = value",
"def setAllowAnnotations(self,value):\n self.PDFreactorConfiguration.in1[\"allowAnnotations\"] = value",
"def SetCannoLinks( self, cannotLinks ):\n\t\tself.cannotLinkConstraints = [ frozenset(constraint) for constraint in cannotLinks ]",
"def filter_safe_user_agents(self, filter_safe_user_agents: ConfigNodePropertyArray):\n\n self._filter_safe_user_agents = filter_safe_user_agents",
"def SetAllowUpscaling(self, allow):\n self._allow_upscaling = allow\n self.Refresh()",
"def set_asset_restrictions(self, restrictions, on_error='fail'):\n control = RestrictedListOrder(on_error, restrictions)\n self.register_trading_control(control)\n self.restrictions |= restrictions",
"def setrestricted(miner: Miner, login, allowsetting):\n commands = get_changeconfigcommands(getminerfilename(miner), 'api-allow', allowsetting)\n sendcommands_and_restart(miner, login, commands)",
"def restrict_robots(self, restrict_robots):\n if restrict_robots is None:\n raise ValueError(\"Invalid value for `restrict_robots`, must not be `None`\")\n\n self._restrict_robots = restrict_robots",
"def filter_enable_safe_user_agents(self, filter_enable_safe_user_agents: ConfigNodePropertyBoolean):\n\n self._filter_enable_safe_user_agents = filter_enable_safe_user_agents",
"def set_allowed_dock_areas(self, dock_areas):\n wx_areas = 0\n for area in dock_areas:\n wx_areas |= _ALLOWED_AREAS_MAP[area]\n self.widget.SetAllowedDockAreas(wx_areas)",
"def custom_launch_address_allowed(self, custom_launch_address_allowed):\n\n self._custom_launch_address_allowed = custom_launch_address_allowed",
"def set_allow_upscaling(self, allow):\n self.widget.SetAllowUpscaling(allow)",
"def set_mem_per_proc(self, mem_mb):\n super(SlurmAdapter, self).set_mem_per_proc(mem_mb)\n self.qparams[\"mem_per_cpu\"] = self.mem_per_proc\n # Remove mem if it's defined.\n #self.qparams.pop(\"mem\", None)",
"def set_allow_upscaling(self, allow):\n self.widget.setAllowUpscaling(allow)"
] | [
"0.6635991",
"0.6162344",
"0.5399079",
"0.52147853",
"0.50356615",
"0.48677087",
"0.48677087",
"0.48211288",
"0.48211288",
"0.4768049",
"0.4627171",
"0.46265787",
"0.45992205",
"0.4593496",
"0.45846242",
"0.45787704",
"0.45422518",
"0.4504017",
"0.45009166",
"0.44671568",
"0.44628266",
"0.44425792",
"0.44396797",
"0.44380534",
"0.44367805",
"0.44236174",
"0.44160378",
"0.44075075",
"0.4402365",
"0.4373221"
] | 0.8159298 | 0 |
Sets the crypto_miner of this RuntimeAntiMalwareRule. | def crypto_miner(self, crypto_miner):
self._crypto_miner = crypto_miner | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def SetMiningMethod(self,miningMethod):\n self.theMiningSystem.miningMethod = miningMethod",
"def setArmor(self, armor):\n self.av = armor",
"def setCryptographicHandlers( self , tkip = None , aes = None ):\n\t\tself.handleTKIP = tkip\n\t\tself.handleAES \t= aes",
"def set_encryption(key):\n global_scope['enc'] = Encryption(key.encode())",
"def mgmt_tool(self, mgmt_tool: MgmtClient):\n self._mgmt_tool = mgmt_tool",
"def set_etacalc(self, etacalc):\n self.__etacalc = etacalc",
"def setrestricted(miner: Miner, login, allowsetting):\n commands = get_changeconfigcommands(getminerfilename(miner), 'api-allow', allowsetting)\n sendcommands_and_restart(miner, login, commands)",
"def server_side_encryption_algorithm(self, server_side_encryption_algorithm):\n\n self._server_side_encryption_algorithm = server_side_encryption_algorithm",
"def algorithm(self, algorithm):\n allowed_values = [\"CG\", \"CR\", \"GCR\", \"GMRES\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and algorithm not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `algorithm` ({0}), must be one of {1}\" # noqa: E501\n .format(algorithm, allowed_values)\n )\n\n self._algorithm = algorithm",
"def _derive_crypto(self, pad_string): # XXX consider secret_seed\n secret = self.mac(pad_string,\n self.initiator_seed + self.responder_seed,\n self.shared_secret)\n return aes.AES_CTR_128(secret[:KEYLEN], secret[KEYLEN:])",
"def load_miner(miner, login):\n #ftp the new miner\n commands = []\n commands.append('cd /usr/bin')\n commands.append('cp bmminer bmminer.original')\n commands.append('cp bmminer880 bmminer')\n commands.append('chmod +x bmminer')\n sendcommands_and_restart(miner, login, commands)",
"def setprivileged(miner: Miner, login, allowsetting):\n commands = get_changeconfigcommands(getminerfilename(miner), 'api-allow', allowsetting)\n sendcommands_and_restart(miner, login, commands)",
"def set_algorithm(self, initmethod = 'pca', algtype = 'batch', neighborhoodmethod = 'gaussian', alfatype = 'inv', alfaini = .5, alfafinal = .005):\n self.initmethod = initmethod\n self.algtype = algtype\n self.alfaini = alfaini\n self.alfafinal = alfafinal\n self.neigh = neighborhoodmethod",
"def enable_encryption(self, output_key: bytes, input_key: bytes) -> None:\n self.chacha = chacha20.Chacha20Cipher(output_key, input_key)\n self.state.has_authenticated = True",
"def set_active_tool(self, tool=None):\n self.active_tool = tool",
"def set_smearing(self, smearing_Ha):\n self.smearing = smearing_Ha\n self.qptanalyzer.smearing = smearing_Ha",
"def SetStrengthThresh(self, strength):\n return _hypre.HypreBoomerAMG_SetStrengthThresh(self, strength)",
"def __init__(__self__, *,\n enabled: pulumi.Input[bool],\n encryption_settings: Optional[pulumi.Input[Sequence[pulumi.Input['EncryptionSettingsElementArgs']]]] = None,\n encryption_settings_version: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"enabled\", enabled)\n if encryption_settings is not None:\n pulumi.set(__self__, \"encryption_settings\", encryption_settings)\n if encryption_settings_version is not None:\n pulumi.set(__self__, \"encryption_settings_version\", encryption_settings_version)",
"def configure_enable_aes_encryption(device, master_key):\n dialog = Dialog(\n [\n Statement(\n pattern=r\".*New\\s*key.*\",\n action=f\"sendline({master_key})\",\n loop_continue=True,\n continue_timer=False,\n ),\n Statement(\n pattern=r\".*Confirm\\s*key.*\",\n action=f\"sendline({master_key})\",\n loop_continue=True,\n continue_timer=False,\n )\n ]\n )\n try:\n device.configure(\"key config-key password-encrypt\", reply=dialog)\n device.configure(\"password encryption aes\")\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not enables aes password encryption on device {device}.\\nError:\"\n \" {e}\".format(device=device.name, e=str(e))\n )",
"def key_manager(self, key_manager):\n\n self._key_manager = key_manager",
"def medal_tier_hash(self, medal_tier_hash):\n\n self._medal_tier_hash = medal_tier_hash",
"def SetToolPacking(self, packing):\r\n\r\n self._tool_packing = packing",
"def set_algorithm(self, algorithm):\n self._data_dict[self.ALGO_INFO] = {'module' : algorithm.algo_group.module,\n 'class' : algorithm.algo_group.classname,\n 'init_param' : algorithm.algo_group.init_parameter,\n 'identifier' : algorithm.identifier}",
"def algorithm(self, algorithm_):\n\n if algorithm_ is not None and algorithm_ not in checksumsModule.supportedAlgorithms:\n raise ValueError(\"Unsupported checksum algorithm '%s'.\" % algorithm_)\n self.__algorithm = algorithm_",
"def periphery(self, periphery):\n\n self._periphery = periphery",
"def __init__(self, encryption_method: str, encryption_key_size: int = 32, encryption_key: bytes = None,\r\n block_size: int = 32, block_mode: str = BlockMode.ECB):\r\n self.__encryption_method = encryption_method\r\n self.__encryption_key_size = encryption_key_size\r\n self.__encryption_key = encryption_key\r\n self.__block_size = block_size\r\n self.__block_mode = block_mode\r\n\r\n if self.__encryption_key is None:\r\n self.__randomize_key_on_every_encryption = True\r\n else:\r\n self.__randomize_key_on_every_encryption = False\r\n\r\n # Generate the next key to be used\r\n if self.__randomize_key_on_every_encryption:\r\n self.__encryption_key = get_random_bytes(self.__encryption_key_size)",
"def mcs(self, mcs):\n\n self._block.tx_policies[self._lvap.addr].mcs = mcs",
"def __init__(__self__, *,\n algorithm: str,\n protection_level: str):\n pulumi.set(__self__, \"algorithm\", algorithm)\n pulumi.set(__self__, \"protection_level\", protection_level)",
"def home_carrier_network(self, home_carrier_network):\n\n self._home_carrier_network = home_carrier_network",
"def setup_algo_delegate_smart_contract(self):\n if self.app_id == -1:\n raise ValueError('The application has not been created')\n\n algo_delegate_authority_compiled = compileTeal(algo_delegate_authority_logic(app_id=self.app_id),\n mode=Mode.Signature,\n version=self.teal_version)\n\n algo_delegate_authority_bytes = blockchain_utils.compile_program(client=self.client,\n source_code=algo_delegate_authority_compiled)\n\n self.algo_delegate_authority_address = algo_logic.address(algo_delegate_authority_bytes)"
] | [
"0.50891584",
"0.4544723",
"0.45225394",
"0.44707024",
"0.43791777",
"0.4358885",
"0.43324634",
"0.43239263",
"0.429425",
"0.42103016",
"0.4189996",
"0.41426286",
"0.41291493",
"0.41247395",
"0.40786296",
"0.40624338",
"0.40478006",
"0.40322292",
"0.40299213",
"0.4029869",
"0.4016311",
"0.40121064",
"0.4005026",
"0.39897203",
"0.39876863",
"0.3986784",
"0.3986193",
"0.39789766",
"0.3946691",
"0.3911422"
] | 0.7893291 | 0 |
Sets the custom_feed of this RuntimeAntiMalwareRule. | def custom_feed(self, custom_feed):
self._custom_feed = custom_feed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def intelligence_feed(self, intelligence_feed):\n\n self._intelligence_feed = intelligence_feed",
"def custom(self, custom):\n self._context[\"custom\"] = custom",
"def is_custom(self, is_custom):\n\n self._is_custom = is_custom",
"def is_custom(self, is_custom):\n\n self._is_custom = is_custom",
"def exempt_feed_retroactive(db, c, feed_uid, **kwargs):\n c.execute(\"\"\"update fm_items\n set item_rating=0, item_rule_uid=NULL\n where item_feed_uid=? and item_content!='' and exists (\n select rule_uid from fm_rules\n where rule_feed_uid is null and item_rule_uid=rule_uid\n )\"\"\", [feed_uid])",
"def custom_launch_address_allowed(self, custom_launch_address_allowed):\n\n self._custom_launch_address_allowed = custom_launch_address_allowed",
"def _do_custom(self, custom):\n if custom:\n self.words[-1].custom_tags.update(custom)",
"def __init__(self, main_feed):\n self.main_feed = main_feed",
"def custom_data(self, custom_data):\n\n self._custom_data = custom_data",
"def feed_link_decorator(context, feed):\n for item in feed.items:\n current_link = item['link']\n # print(current_link)\n new_link = current_link + FUD_DEFAULT['parameters']\n item['link'] = new_link\n # print(item)\n return feed",
"def wrap_feed(feed, max_iter=-1, **devtype):\n return FeedMover(FeedLimiter(feed, max_iter), **devtype)",
"def limit(self, custom_limit):\n # NOTE(gibi): this operation needs escalated privileges (e.g. admin)\n # as the owner of the app cannot set its own app's limits. But\n # authorization is out of scope.\n self._limit = custom_limit",
"def feed_id(self, feed_id):\n\n self._feed_id = feed_id",
"def set_custom(self, custom):\n custom = clamp(custom, 1, 12)\n self._state.mode = custom\n self.send_command(Command.SET_CUSTOM, [int(custom)])",
"def parse_feed(self):\n parsed_feed = feedparser.parse(self.rss_url)\n # Check for malformed feed\n if parsed_feed['bozo']:\n raise Exception('malformed rss feed!')\n self.parsed_feed = parsed_feed",
"def __init__(self, googleReader, type):\r\n super(SpecialFeed, self).__init__(\r\n googleReader,\r\n title = type,\r\n id = ReaderUrl.SPECIAL_FEEDS_PART_URL+type,\r\n unread = 0,\r\n categories = [],\r\n )\r\n self.type = type\r\n\r\n self.fetchUrl = ReaderUrl.CONTENT_BASE_URL + Category.urlQuote(self.id)",
"def custom_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FirewallPolicyCustomRuleArgs']]]]:\n return pulumi.get(self, \"custom_rules\")",
"def custom_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FirewallPolicyCustomRuleArgs']]]]:\n return pulumi.get(self, \"custom_rules\")",
"def is_armed_custom_bypass(self):\n return self == ArmingState.ARMED_CUSTOM_BYPASS",
"def MutateFeedItemSetLinks(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def custom_compliance_standard(self, custom_compliance_standard):\n\n self._custom_compliance_standard = custom_compliance_standard",
"def feed_treat(self):\r\n # TODO\r\n pass",
"def feed_link(self):\n return self.url.replace(\"http://\", \"feed://\")",
"def custom_connector(self) -> Optional[pulumi.Input['FlowSourceFlowConfigSourceConnectorPropertiesCustomConnectorArgs']]:\n return pulumi.get(self, \"custom_connector\")",
"def run_rss(self):\n\n pass",
"def user_custom_data(self, user_custom_data):\n\n self._user_custom_data = user_custom_data",
"def get_feed_for(self, user):\n pass",
"def select_feed(self, feed: None | Feed = None) -> None:\n self.current_feed = feed\n self.refresh()",
"def feed_read(self, feed):\n if feed != self.current_feed:\n return\n self.action_mark_all_read.setDisabled(True)\n for item in self.current_feed.get_items():\n self.update_item(item)",
"def set_CustomWebsiteRedirection(self, value):\n super(PutBucketWebsiteRedirectInputSet, self)._set_input('CustomWebsiteRedirection', value)"
] | [
"0.54938793",
"0.51673585",
"0.5140491",
"0.5140491",
"0.5040152",
"0.49456972",
"0.48383343",
"0.4757906",
"0.47423247",
"0.46796066",
"0.46738562",
"0.46644983",
"0.45845363",
"0.44624054",
"0.44565436",
"0.4375623",
"0.437332",
"0.437332",
"0.43702433",
"0.43596616",
"0.43333727",
"0.43262115",
"0.43150225",
"0.4300738",
"0.42950845",
"0.42717716",
"0.4259351",
"0.424703",
"0.4239216",
"0.42325446"
] | 0.7653512 | 0 |
Sets the denied_processes of this RuntimeAntiMalwareRule. | def denied_processes(self, denied_processes):
self._denied_processes = denied_processes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def allowed_processes(self, allowed_processes):\n\n self._allowed_processes = allowed_processes",
"def _set_processes(self, processes: int = 1):\n self.__processes = processes",
"def forbidden_processes(self):\n return {name for name, flag in self._required_processes.items() if not flag}",
"def deny(self, deny):\n if self.local_vars_configuration.client_side_validation and deny is None: # noqa: E501\n raise ValueError(\"Invalid value for `deny`, must not be `None`\") # noqa: E501\n\n self._deny = deny",
"def disable_attachments(self, disable_attachments):\n\n self._disable_attachments = disable_attachments",
"def set_process_limits(): # pragma: no cover\n # Set a new session id so that this process and all its children will be\n # in a new process group, so we can kill them all later if we need to.\n os.setsid()\n\n # No subprocesses.\n resource.setrlimit(resource.RLIMIT_NPROC, (0, 0))\n\n # CPU seconds, not wall clock time.\n cpu = LIMITS[\"CPU\"]\n if cpu:\n resource.setrlimit(resource.RLIMIT_CPU, (cpu, cpu))\n\n # Total process virtual memory.\n vmem = LIMITS[\"VMEM\"]\n if vmem:\n resource.setrlimit(resource.RLIMIT_AS, (vmem, vmem))\n\n # Size of written files. Can be zero (nothing can be written).\n fsize = LIMITS[\"FSIZE\"]\n resource.setrlimit(resource.RLIMIT_FSIZE, (fsize, fsize))",
"def set_all_ports_admin_disabled(self):\n pass",
"def outgoing_calls_disabled(self, outgoing_calls_disabled):\n\n self._outgoing_calls_disabled = outgoing_calls_disabled",
"def privileged(self, privileged):\n\n self._privileged = privileged",
"def set_blacklist(self):\n\n for name in self.__ipset:\n if self.verbose:\n print(\"Start create: \" + self.__ipset[name]['ipset-name'])\n\n # create ipset\n self.__process(name, self.__parser.create(name))\n\n if self.verbose:\n print('Done')",
"def security_policy_num_not(self, security_policy_num_not):\n\n self._security_policy_num_not = security_policy_num_not",
"def sms_disabled(self, sms_disabled):\n\n self._sms_disabled = sms_disabled",
"def setDiscardFlags(self, flags):\r\n self.__data.discardFlags = flags",
"def denyMethod(self, verb, resource):\n self._addMethod(\"Deny\", verb, resource, [])",
"def disallowed_vehicles(self, disallowed_vehicles):\n\n self._disallowed_vehicles = disallowed_vehicles",
"def disallowed_vehicles(self, disallowed_vehicles):\n\n self._disallowed_vehicles = disallowed_vehicles",
"def no_negatives_maturity_policy(self, no_negatives_maturity_policy):\n\n self._no_negatives_maturity_policy = no_negatives_maturity_policy",
"def invalid_password_attempts(self, invalid_password_attempts):\n\n self._invalid_password_attempts = invalid_password_attempts",
"def _StopMonitoringProcesses(self):\n # We need to make a copy of the list of pids since we are changing\n # the dict in the loop.\n for pid in list(self._process_information_per_pid.keys()):\n self._RaiseIfNotRegistered(pid)\n process = self._processes_per_pid[pid]\n\n self._StopMonitoringProcess(process)",
"def denyAllMethods(self):\n self._addMethod(\"Deny\", HttpVerb.ALL, \"*\", [])",
"def stopProcesses(*args):\n _stopProcessSet(_running)",
"def keyguard_disabled(self, keyguard_disabled):\n\n self._keyguard_disabled = keyguard_disabled",
"def adb_disabled(self, adb_disabled):\n\n self._adb_disabled = adb_disabled",
"def SetCannoLinks( self, cannotLinks ):\n\t\tself.cannotLinkConstraints = [ frozenset(constraint) for constraint in cannotLinks ]",
"def set_allowed_dock_areas(self, dock_areas):\n wx_areas = 0\n for area in dock_areas:\n wx_areas |= _ALLOWED_AREAS_MAP[area]\n self.widget.SetAllowedDockAreas(wx_areas)",
"def deny(self, role, *permissions):\n\n if not isinstance(role, string_types):\n role = role.id\n\n rec = self.get(Deny, role)\n if rec is None:\n rec = [Deny, role, set()]\n self.append(rec)\n\n if rec[2] is ALL_PERMISSIONS:\n return\n\n if ALL_PERMISSIONS in permissions:\n rec[2] = ALL_PERMISSIONS\n else:\n rec[2].update(permissions)",
"def non_heap_max(self, non_heap_max):\n\n self._non_heap_max = non_heap_max",
"def total_rejected_requests(self, total_rejected_requests):\n\n self._total_rejected_requests = total_rejected_requests",
"def setImpossiblePenultimates(self, impossible_penultimates):\n return self._set(impossiblePenultimates=impossible_penultimates)",
"def disable_bprot(self):\n result = self._lib.NRFJPROG_disable_bprot()\n if result != NrfjprogdllErr.SUCCESS:\n raise APIError(result)"
] | [
"0.67622226",
"0.56745166",
"0.5228589",
"0.49396628",
"0.47714043",
"0.46959263",
"0.46451238",
"0.46282858",
"0.46127596",
"0.45595217",
"0.45097315",
"0.45039085",
"0.44965488",
"0.4474816",
"0.44690263",
"0.44690263",
"0.44617447",
"0.445545",
"0.4427608",
"0.44184902",
"0.44168773",
"0.43556237",
"0.43441352",
"0.43348035",
"0.4329016",
"0.43210304",
"0.4320626",
"0.43138334",
"0.4311246",
"0.43104357"
] | 0.8523715 | 0 |
Sets the detect_compiler_generated_binary of this RuntimeAntiMalwareRule. | def detect_compiler_generated_binary(self, detect_compiler_generated_binary):
self._detect_compiler_generated_binary = detect_compiler_generated_binary | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compiler(self, target):\n self._check_target(target)\n return target.compiler or self._default_compiler",
"def _set_default_compiler():\n ccompiler = new_compiler()\n customize_compiler(ccompiler)\n # xcrun wrapper must bring all args\n if ccompiler.compiler[0] == 'xcrun':\n ccompiler.compiler[0] = get_config_var(\"CC\")\n ccompiler.compiler_cxx[0] = get_config_var(\"CXX\")\n os.environ.setdefault(\"CC\", ccompiler.compiler[0])\n os.environ.setdefault(\"CXX\", ccompiler.compiler_cxx[0])",
"def customize_compiler(compiler):\n _customize_compiler(compiler)\n\n if compiler.compiler_type == \"unix\":\n linker_exe = \" \".join(compiler.linker_exe)\n if 'LDFLAGS' in os.environ:\n linker_exe += ' ' + os.environ['LDFLAGS']\n if 'CFLAGS' in os.environ:\n linker_exe += ' ' + os.environ['CFLAGS']\n if 'CPPFLAGS' in os.environ:\n linker_exe += ' ' + os.environ['CPPFLAGS']\n\n compiler.set_executable(\"linker_exe\", linker_exe)",
"def customize_compiler_for_nvcc(compiler):\n # adapted from\n # https://stackoverflow.com/questions/10034325/can-python-distutils-compile-cuda-code\n # --- tell the compiler it can processes .cu\n compiler.src_extensions.append('.cu')\n # --- save references to the default compiler_so and _comple methods\n default_compiler_so = compiler.compiler_so\n super = compiler._compile\n\n # --- now redefine the _compile method. This gets executed for each\n # object but distutils doesn't have the ability to change compilers\n # based on source extension: we add it.\n def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):\n if os.path.splitext(src)[1] == '.cu':\n # use cuda for .cu files\n compiler.set_executable('compiler_so', CUDA['nvcc'])\n # use only a subset of the extra_postargs, which are 1-1\n # translated from the extra_compile_args in the Extension class\n postargs = extra_postargs['nvcc']\n else:\n if isinstance(extra_postargs, dict):\n postargs = extra_postargs['gcc']\n else:\n postargs = extra_postargs\n super(obj, src, ext, cc_args, postargs, pp_opts)\n # reset the default compiler_so, which we might have changed for\n # cuda\n compiler.compiler_so = default_compiler_so\n\n # inject our redefined _compile method into the class\n compiler._compile = _compile\n return compiler",
"def customize_compiler(compiler):\n if compiler.compiler_type == \"unix\":\n if sys.platform == \"darwin\":\n # Perform first-time customization of compiler-related\n # config vars on OS X now that we know we need a compiler.\n # This is primarily to support Pythons from binary\n # installers. The kind and paths to build tools on\n # the user system may vary significantly from the system\n # that Python itself was built on. Also the user OS\n # version and build tools may not support the same set\n # of CPU architectures for universal builds.\n global _config_vars\n # Use get_config_var() to ensure _config_vars is initialized.\n if not get_config_var('CUSTOMIZED_OSX_COMPILER'):\n import _osx_support\n _osx_support.customize_compiler(_config_vars)\n _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'\n\n (cc, cxx, opt, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \\\n get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS',\n 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS')\n\n if 'CC' in os.environ:\n newcc = os.environ['CC']\n if (sys.platform == 'darwin'\n and 'LDSHARED' not in os.environ\n and ldshared.startswith(cc)):\n # On OS X, if CC is overridden, use that as the default\n # command for LDSHARED as well\n ldshared = newcc + ldshared[len(cc):]\n cc = newcc\n if 'CXX' in os.environ:\n cxx = os.environ['CXX']\n if 'LDSHARED' in os.environ:\n ldshared = os.environ['LDSHARED']\n if 'CPP' in os.environ:\n cpp = os.environ['CPP']\n else:\n cpp = cc + \" -E\" # not always\n if 'LDFLAGS' in os.environ:\n ldshared = ldshared + ' ' + os.environ['LDFLAGS']\n if 'CFLAGS' in os.environ:\n cflags = opt + ' ' + os.environ['CFLAGS']\n ldshared = ldshared + ' ' + os.environ['CFLAGS']\n if 'CPPFLAGS' in os.environ:\n cpp = cpp + ' ' + os.environ['CPPFLAGS']\n cflags = cflags + ' ' + os.environ['CPPFLAGS']\n ldshared = ldshared + ' ' + os.environ['CPPFLAGS']\n if 'AR' in os.environ:\n ar = os.environ['AR']\n if 'ARFLAGS' in os.environ:\n archiver = ar + ' ' + os.environ['ARFLAGS']\n else:\n archiver = ar + ' ' + ar_flags\n\n cc_cmd = cc + ' ' + cflags\n compiler.set_executables(\n preprocessor=cpp,\n compiler=cc_cmd,\n compiler_so=cc_cmd + ' ' + ccshared,\n compiler_cxx=cxx,\n linker_so=ldshared,\n linker_exe=cc,\n archiver=archiver)\n\n compiler.shared_lib_extension = shlib_suffix",
"def CompiledBinary(\n makefile=None,\n compiler=\"gcc\",\n sources=None,\n binary_name=None,\n is_32_bit=True,\n executable_stack=True,\n no_stack_protector=True,\n aslr=False,\n compiler_flags=None,\n flag_file=None,\n static_flag=None,\n share_source=False,\n remote=False,\n):\n\n if compiler_flags is None:\n compiler_flags = []\n\n if is_32_bit and \"-m32\" not in compiler_flags:\n compiler_flags.append(\"-m32\")\n if executable_stack and \"-zexecstack\" not in compiler_flags:\n compiler_flags.append(\"-zexecstack\")\n if no_stack_protector and \"-fno-stack-protector\" not in compiler_flags:\n compiler_flags.append(\"-fno-stack-protector\")\n if no_stack_protector and \"-D_FORTIFY_SOURCE=0\" not in compiler_flags:\n compiler_flags.append(\"-D_FORTIFY_SOURCE=0\")\n\n if makefile is None and sources is None:\n assert False, \"You must provide either a makefile or a sources list\"\n\n if sources is None:\n assert (\n binary_name is not None\n ), \"You must provide the binary name if you use a makefile\"\n\n if flag_file is None:\n flag_file = \"flag.txt\"\n\n base_classes = [Compiled]\n if remote:\n base_classes.append(Remote)\n\n class Problem(*base_classes):\n files = copy([])\n\n remove_aslr = not aslr\n\n if share_source:\n files = copy([File(source) for source in sources])\n\n if binary_name is not None:\n program_name = binary_name\n else:\n program_name = os.path.splitext(sources[0])[0]\n\n def __init__(self):\n self.makefile = makefile\n self.compiler = compiler\n self.compiler_sources = sources\n self.compiler_flags = compiler_flags\n\n if not os.path.isfile(flag_file):\n with open(flag_file, \"w\") as f:\n f.write(\"{{flag}}\\n\")\n\n if static_flag is not None:\n self.generate_flag = lambda random: static_flag\n\n self.files.append(ProtectedFile(flag_file))\n\n return Problem",
"def set_compile_test(self, value):\n\n self._compileTest = value",
"def generate_linker_flags(self):\n self.__linker_flags = []\n if self.__command_basename.startswith(\"g++\") or self.__command_basename.startswith(\"gcc\"):\n self.__linker_flags += [\"-nostartfiles\", \"-nostdlib\", \"-Xlinker\", \"--strip-all\"]\n elif self.__command_basename.startswith(\"clang\"):\n self.__linker_flags += [\"-nostdlib\", \"-Xlinker\", \"--strip-all\"]\n elif self.__command_basename.startswith(\"ld\"):\n dynamic_linker = str(PlatformVar(\"interp\"))\n if dynamic_linker.startswith(\"\\\"\") and dynamic_linker.endswith(\"\\\"\"):\n dynamic_linker = dynamic_linker[1:-1]\n elif dynamic_linker.startswith(\"0x\"):\n dynamic_linker = \"\"\n self.__linker_flags += [\"-nostdlib\", \"--strip-all\", \"--dynamic-linker=%s\" % (dynamic_linker)]\n else:\n raise RuntimeError(\"compilation not supported with compiler '%s'\" % (op))",
"def customize_compiler(compiler):\n if compiler.compiler_type == \"unix\":\n (cc, cxx, opt, basecflags, ccshared, ldshared, so_ext) = \\\n distutils.sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS', 'CCSHARED', 'LDSHARED', 'SO')\n\n if os.environ.has_key('CC'):\n cc = os.environ['CC']\n if os.environ.has_key('CXX'):\n cxx = os.environ['CXX']\n if os.environ.has_key('LDSHARED'):\n ldshared = os.environ['LDSHARED']\n if os.environ.has_key('CPP'):\n cpp = os.environ['CPP']\n else:\n cpp = cc + \" -E\" # not always\n if os.environ.has_key('LDFLAGS'):\n ldshared = ldshared + ' ' + os.environ['LDFLAGS']\n if basecflags:\n opt = basecflags + ' ' + opt\n if os.environ.has_key('CFLAGS'):\n opt = opt + ' ' + os.environ['CFLAGS']\n ldshared = ldshared + ' ' + os.environ['CFLAGS']\n if os.environ.has_key('CPPFLAGS'):\n cpp = cpp + ' ' + os.environ['CPPFLAGS']\n opt = opt + ' ' + os.environ['CPPFLAGS']\n ldshared = ldshared + ' ' + os.environ['CPPFLAGS']\n\n cc_cmd = cc + ' ' + opt\n compiler.set_executables(\n preprocessor=cpp,\n compiler=cc_cmd,\n compiler_so=cc_cmd + ' ' + ccshared,\n compiler_cxx=cxx,\n linker_so=ldshared,\n linker_exe=cc)\n\n compiler.shared_lib_extension = so_ext",
"def detect(self):\n GCCLike.detect(self)\n\n if self._platform != platforms.lumin.NAME:\n err = self.detect_version_on_path_or_env('CPP', 'cpp', False)\n if err:\n return err\n err = self.detect_version_on_path_or_env('CC', 'clang',\n needs_version=self._suffix != '',\n allow_unversioned=not self._suffix)\n if err:\n return err\n err = self.detect_version_on_path_or_env('CXX', 'clang++',\n needs_version=self._suffix != '',\n allow_unversioned=not self._suffix)\n if err:\n return err\n err = self.detect_version_on_path_or_env('AS', 'llvm-as',\n needs_version=self._suffix != '',\n allow_unversioned=not self._suffix)\n if err:\n err = self.detect_version_on_path_or_env('AS', 'as', False)\n if err:\n return err\n err = self.detect_version_on_path_or_env('AR', 'ar', False)\n if err:\n return err\n else:\n err = self.add_cross_toolchain_tool('CPP', 'cpp')\n if err:\n return err\n err = self.add_cross_toolchain_tool('CC', 'clang')\n if err:\n return err\n err = self.add_cross_toolchain_tool('CXX', 'clang++')\n if err:\n return err\n err = self.add_cross_toolchain_tool('AS', 'as')\n if err:\n return err\n err = self.add_cross_toolchain_tool('AR', 'gcc-ar')\n if err:\n return err\n err = self.add_cross_toolchain_tool('OBJCOPY', 'objcopy')\n if err:\n return err\n err = self.add_cross_toolchain_tool('STRIP', 'strip')\n if err:\n return err\n\n return None",
"def set_jit_compile(self, enable):\n self._jit_compile = enable",
"def binary_compile_cmd(self):\n ld = self.nvcc_options_json[\"ld\"]\n objcopy = self.nvcc_options_json[\"objcopy\"]\n cmd = \" \".join([ld, \"-r -b binary -o {target} {src}\"])\n # Support models with >2GB constants on Linux only\n if is_linux():\n cmd += (\n f\" && {objcopy} --rename-section\"\n \" .data=.lrodata,alloc,load,readonly,data,contents\"\n \" {target} {target}\"\n )\n return cmd",
"def patch_for_specialized_compiler():\n if 'distutils' not in globals():\n # The module isn't available to be patched\n return\n\n if unpatched:\n # Already patched\n return\n\n unpatched.update(vars(distutils.msvc9compiler))\n\n distutils.msvc9compiler.find_vcvarsall = find_vcvarsall\n distutils.msvc9compiler.query_vcvarsall = query_vcvarsall",
"def compilePlatformBinary(self, target_platform):\r\n\r\n from ..util import compile_platform_binary\r\n\r\n platforms = supported_platforms(self.filename)\r\n\r\n if target_platform in platforms:\r\n button = QMessageBox.question(self, \"Platform binary already exists\",\r\n f'The FMU already contains a binary for the platform \"{target_platform}\".'\r\n ' Do you want to compile and overwrite the existing binary?')\r\n if button == QMessageBox.No:\r\n return\r\n\r\n if self.modelDescription.fmiVersion == '3.0':\r\n\r\n platform_map = {\r\n 'darwin64': 'x86_64-darwin',\r\n 'linux64': 'x86_64-linux',\r\n 'win32': 'x86-windows',\r\n 'win64': 'x86_64-windows',\r\n }\r\n \r\n target_platform = platform_map[target_platform]\r\n\r\n try:\r\n compile_platform_binary(self.filename, target_platform=target_platform)\r\n except Exception as e:\r\n QMessageBox.critical(self, \"Failed to compile platform binaries\", str(e))\r\n return\r\n\r\n self.load(self.filename)",
"def detect_compilers(self):\n # By users experience it is known which compiler flags are required\n # in some cases. This function tries to detect which compilers are used\n # and sets the flags accordingly\n\n print 'Detecting Fortran compiler...',\n if self.fc_is_intel():\n # The Intel FORTRAN compiler requires -nofor_main flag\n # for the linking and the -mp flag to maintain the\n # floating-point precision\n self.config.fcflags += ' -diag-disable vec -fltconsistency -fp_port'\n self.config.ldflags_c += ' ' # used to link\n self.config.ldflags_fc += ' '\n self.config.ld_fcmain = ' -nofor_main'\n self.config.noopt += ' -mp'\n self.testing = 0; # Cannot compile lintest with fc_main option\n print 'Intel'\n elif self.fc_is_gnu():\n print 'GNU'\n self.config.ld_fcmain = ''\n elif self.fc_is_xlf():\n self.config.fcflags += ' -qstrict -qthreaded'\n self.config.ld_fcmain = ''\n print 'IBM'\n elif self.fc_is_pgi():\n self.config.ldflags_c += ''\n self.config.ldflags_fc += ''\n self.config.ld_fcmain = ' -Mnomain'\n self.testing = 0; # Cannot compile lintest with fc_main option\n else:\n self.config.compiler = \"Unknown\"\n print 'unknown'\n\n print 'Detecting C compiler...',\n if self.cc_is_intel():\n self.config.compiler = \"Intel\"\n self.config.ccflags += ' -diag-disable vec'\n print 'Intel'\n elif self.cc_is_gnu():\n self.config.compiler = \"GNU\"\n print 'GNU'\n elif self.cc_is_xlc():\n self.config.compiler = \"XLC\"\n self.config.ccflags += ' -qstrict -qthreaded'\n print 'IBM'\n elif self.cc_is_pgi():\n self.config.compiler = \"PGI\"\n print 'PGI'\n else:\n print 'unknown'\n\n print 'Selected C compiler flags: '+self.config.ccflags\n print 'Selected Fortran compiler flags: '+self.config.fcflags\n print 'Selected loader flags (C main): '+self.config.ldflags_c\n print 'Selected loader flags (Fortran main): '+self.config.ldflags_fc\n return",
"def is_codegen(self):\r\n return self.has_label('codegen')",
"def dump_compiler(input_bytes):\n return dump_from_release(input_bytes, \"compiler\")",
"def compile(\n self,\n compiler: str,\n compile_dir: str = None,\n overwrite: bool = False,\n compile_options: dict = None\n ) -> str:\n\n # A bunch of ugly logic to check compile directory.\n if compile_dir is None:\n self.compile_dir = self.source_dir.joinpath('Run')\n else:\n self.compile_dir = pathlib.Path(compile_dir).absolute()\n if self.compile_dir.is_dir() is False:\n self.compile_dir.mkdir(parents=True)\n else:\n if self.compile_dir.is_dir() is True and overwrite is True:\n shutil.rmtree(str(self.compile_dir))\n self.compile_dir.mkdir()\n else:\n raise IOError(str(self.compile_dir) + ' directory already exists')\n\n # Add compiler and compile options as attributes and update if needed\n self.git_hash = get_git_revision_hash(self.source_dir)\n self.compiler = compiler\n\n if compile_options is not None:\n self.compile_options.update(compile_options)\n\n # Get directroy for setEnvar\n compile_options_file = self.source_dir.joinpath('compile_options.sh')\n\n # Write setEnvar file\n with open(compile_options_file,'w') as file:\n for option, value in self.compile_options.items():\n file.write(\"export {}={}\\n\".format(option, value))\n\n # Compile\n self.configure_log = subprocess.run(['./configure', compiler],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=self.source_dir)\n\n self.compile_log = subprocess.run(['./compile_offline_NoahMP.sh',\n str(compile_options_file.absolute())],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=self.source_dir)\n # Change to back to previous working directory\n\n # Add in unique ID file to match this object to prevent assosciating\n # this directory with another object\n self.object_id = str(uuid.uuid4())\n\n with open(self.compile_dir.joinpath('.uid'),'w') as f:\n f.write(self.object_id)\n\n if self.compile_log.returncode == 0:\n # Open permissions on compiled files\n subprocess.run(['chmod','-R','755',str(self.source_dir.joinpath('Run'))])\n\n # Wrf hydro always puts files in source directory under a new directory called 'Run'\n # Copy files to new directory if its not the same as the source code directory\n if str(self.compile_dir.parent) != str(self.source_dir):\n for file in self.source_dir.joinpath('Run').glob('*.TBL'):\n shutil.copyfile(file,str(self.compile_dir.joinpath(file.name)))\n\n shutil.copyfile(str(self.source_dir.joinpath('Run').joinpath('wrf_hydro.exe')),\n str(self.compile_dir.joinpath('wrf_hydro.exe')))\n\n #Remove old files\n shutil.rmtree(self.source_dir.joinpath('Run'))\n\n # Open permissions on copied compiled files\n subprocess.run(['chmod', '-R', '755', str(self.compile_dir)])\n\n #Get file lists as attributes\n # Get list of table file paths\n self.table_files = list(self.compile_dir.glob('*.TBL'))\n\n # Get wrf_hydro.exe file path\n self.wrf_hydro_exe = self.compile_dir.joinpath('wrf_hydro.exe')\n\n # Save the object out to the compile directory\n with open(self.compile_dir.joinpath('WrfHydroModel.pkl'), 'wb') as f:\n pickle.dump(self, f, 2)\n\n print('Model successfully compiled into ' + str(self.compile_dir))\n else:\n raise ValueError('Model did not successfully compile.')",
"def compile(self):\n return None # pragma: no cover",
"def setAllowAssembly(self,value):\n self.PDFreactorConfiguration.in1[\"allowAssembly\"] = value",
"def test_native_binary_target_no_warning(testdir: Testdir) -> None:\n with temp_env_update({'PRISMA_PY_DEBUG': '0'}):\n result = testdir.generate(options='binaryTargets = [\"native\"]')\n\n stdout = result.stdout.decode('utf-8')\n assert 'Warning' not in stdout\n assert 'binaryTargets option' not in stdout\n assert_no_generator_output(stdout)",
"def virtual_flag(self, value):\n if not isinstance(value, bool):\n raise TypeError(\"virtual_flag must be bool.\")\n self._virtual_flag = value",
"def compile(self, gen_optimizer, disc_optimizer):\n self.gen_optimizer = gen_optimizer\n self.disc_optimizer = disc_optimizer",
"def dbt_compiler(self):\n from dbt.compilation import Compiler as DbtCompiler\n\n self.dbt_compiler = DbtCompiler(self.dbt_config)\n return self.dbt_compiler",
"def compilation_options(self):\n #'-target','i386-pc-linux-gnu','-m32','-O2',\n opts = ['-Wno-implicit-function-declaration','-Wno-incompatible-library-redeclaration','-fno-vectorize',\n '-fno-slp-vectorize','-gline-tables-only','-Xclang','-disable-lifetime-markers','-Rpass=.*','-Rpass-missed=.*',\n '-Rpass-analysis=.*','-mllvm','-inline-threshold=15000','-Dassert=__VERIFIER_assert']\n if self._options.property.undefinedness():\n opts.append('-fsanitize=undefined')\n opts.append('-fno-sanitize=unsigned-integer-overflow')\n elif self._options.property.signedoverflow():\n opts.append('-fsanitize=signed-integer-overflow')\n opts.append('-fsanitize=shift')\n\n return opts",
"def run_compiler(executable):\n\n command = executable + sys.argv[1:]\n logging.debug('compilation: %s', command)\n result = subprocess.call(command)\n logging.debug('compilation exit code: %d', result)\n return result",
"def set_linker_script(self, op):\n self.__linker_script = [\"-T\", op]",
"def set_cb_engine(self, engine):\n if engine in ['numba','cython']:\n self.engine = engine\n else:\n raise ValueError('CB detection engine must be either numba or cython')",
"def _init_armclang(self, version):\n self.compiler = Compiler.ARMCLANG\n self.compiler_version = version\n\n self.c_compiler = 'armclang'\n self.cxx_compiler = 'armclang++'",
"def fix_compile(remove_flags):\n import distutils.ccompiler\n\n def _fix_compile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0,\n extra_preargs=None, extra_postargs=None, depends=None):\n for flag in remove_flags:\n if flag in self.compiler_so:\n self.compiler_so.remove(flag)\n macros, objects, extra_postargs, pp_opts, build = self._setup_compile(output_dir, macros,\n include_dirs, sources, depends, extra_postargs)\n cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)\n for obj in objects:\n try:\n src, ext = build[obj]\n except KeyError:\n continue\n self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)\n return objects\n\n distutils.ccompiler.CCompiler.compile = _fix_compile"
] | [
"0.530806",
"0.52711153",
"0.50642806",
"0.49096715",
"0.4879422",
"0.47851434",
"0.47082952",
"0.47038898",
"0.46928558",
"0.4621693",
"0.46136343",
"0.45942175",
"0.4587293",
"0.45800072",
"0.45751333",
"0.45120305",
"0.44635862",
"0.44575647",
"0.44558978",
"0.4375184",
"0.43684816",
"0.43550494",
"0.4326995",
"0.42998838",
"0.4269335",
"0.42005667",
"0.41969347",
"0.4195169",
"0.41888937",
"0.4161917"
] | 0.8249588 | 0 |
Sets the encrypted_binaries of this RuntimeAntiMalwareRule. | def encrypted_binaries(self, encrypted_binaries):
self._encrypted_binaries = encrypted_binaries | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def with_binaries(self, *args, **kw):\n for arg in args:\n if isinstance(arg, dict):\n self._binaries.update(arg)\n self._binaries.update(kw)\n return self",
"def with_binaries(self, *args, **kw):\r\n for arg in args:\r\n if isinstance(arg, dict):\r\n self._binaries.update(arg)\r\n self._binaries.update(kw)\r\n return self",
"def _binaries_to_symbolize(self):\n raise NotImplementedError()",
"def _disable_encryption(self):\n # () -> None\n self.encrypt = self._disabled_encrypt\n self.decrypt = self._disabled_decrypt",
"def _executables(self):\n # LOG: change the processing_type property to some other name or include in file_string\n self.set_property('processing_type', 'executable')\n self.make_dangerous('executable')",
"def volume_encryption_keys(self, volume_encryption_keys):\n\n self._volume_encryption_keys = volume_encryption_keys",
"def test_teardown_with_n2n_encryption(self):\n CbServer.use_https = True\n for level in [\"strict\", \"control\", \"all\"]:\n self.x509 = x509main(host=self.master, standard=self.standard,\n encryption_type=self.encryption_type,\n passphrase_type=self.passphrase_type,\n wildcard_dns=self.wildcard_dns)\n self.x509.generate_multiple_x509_certs(servers=self.servers[:self.nodes_init])\n self.x509.upload_root_certs(self.master)\n self.x509.upload_node_certs(servers=self.servers[:self.nodes_init])\n ntonencryptionBase().setup_nton_cluster([self.master], clusterEncryptionLevel=level)\n self.x509.teardown_certs(servers=self.servers)\n ntonencryptionBase().disable_nton_cluster([self.master])",
"def _set_executables(self):\n\n # add path from argument to env\n if self.home_path:\n if self.env:\n self.env += f\":{self.home_path}\"\n else:\n self.env = self.home_path\n\n # set fuzzer_exe \n self.fuzzer_exe = self._search_for_executable(self.fuzzer_exe)\n L.debug(\"Will use %s as fuzzer executable.\", self.fuzzer_exe)\n\n # set compiler_exe\n if self.compiler_exe:\n self.compiler_exe = self._search_for_executable(self.compiler_exe)\n L.debug(\"Will use %s as fuzzer compiler.\", self.compiler_exe)\n\n # set additional executables\n for exe_name, exe_file in self.EXECUTABLES.items():\n self.EXECUTABLES[exe_name] = self._search_for_executable(exe_file)",
"def set_passwords(self, passwords):\n self.passwords = {}\n for user_name in passwords:\n self.passwords[user_name] = sha512_crypt.hash(\n passwords[user_name], rounds=5000)",
"def fill_disable_ssl_verification(self, data):\n disable_ssl_verification = get_optional_value(data, self.DISABLE_SSL, False)\n self.verify_ssl = not bool(disable_ssl_verification)",
"def _disabled_encrypt(self, *args, **kwargs):\n raise NotImplementedError('\"encrypt\" is not supported by the \"{}\" algorithm'.format(self.java_name))",
"def set_programs(self, programs):\n self._programs = programs",
"def static_ips(self, static_ips):\n\n self._static_ips = static_ips",
"def build_passwords(self, ad_hoc_command, runtime_passwords):\n passwords = super(RunAdHocCommand, self).build_passwords(ad_hoc_command, runtime_passwords)\n cred = ad_hoc_command.credential\n if cred:\n for field in ('ssh_key_unlock', 'ssh_password', 'become_password'):\n value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))\n if value not in ('', 'ASK'):\n passwords[field] = value\n return passwords",
"def elf_images_none(self, elf_images_none):\n\n self._elf_images_none = elf_images_none",
"def should_check_for_binary_versions(self):\n explicitly_asked_for_binaries_check = 'CHECK_BINARIES_VERSIONS' in config_vars\n update_was_requested = \"__UPDATE_INSTALLED_ITEMS__\" in config_vars.get(\"MAIN_INSTALL_TARGETS\", []).list()\n retVal = explicitly_asked_for_binaries_check or update_was_requested\n return retVal",
"def program_ids(self, program_ids):\n\n self._program_ids = program_ids",
"def elf_images_some(self, elf_images_some):\n\n self._elf_images_some = elf_images_some",
"def __attrs_post_init__(self):\n # () -> None\n if self.java_name == \"AESWrap\":\n self._disable_encryption()",
"def _encrypt(self):\n self._outfile = os.path.join(self.dest, self.encrypted_file)\n self._infile = self.plain_file\n self._log.info(\"Encrypting '%s' to '%s'\", self.plain_file, self._outfile)\n with open(self.plain_file, \"rb\") as plain_file:\n openssl(\n \"enc\",\n \"-aes-256-cbc\",\n \"-pass\",\n \"file:{secret}\".format(secret=self.secret.keyfile),\n _in=plain_file,\n _out=self._outfile,\n )\n self._log.info(\"File '%s' encrypted to '%s'\", self.plain_file, self._outfile)\n return True",
"def setMergeByteArrays(self,value):\n self.PDFreactorConfiguration.in1[\"mergeByteArrays\"] = value",
"def update(self, instance, validated_data):\n\n password = validated_data.get('password', None)\n if password is not None:\n validated_data['password'] = AESCipher(password, self.context['request'].user.password).encrypt()\n return super().update(instance, validated_data)",
"def build_passwords(self, instance, runtime_passwords):\n return {\n 'yes': 'yes',\n 'no': 'no',\n '': '',\n }",
"def __load_encrypted_states(self) -> None:\n for section in config.sections():\n value = config.getstr('encryption', section=section)\n if value and value == self.encryption_short_name:\n self._enabled_tabs[section] = self.encrypt",
"def _encryptDBPass():\n #run encrypt tool on user given password\n controller.CONF[\"ENCRYPTED_DB_PASS\"] = utils.encryptEngineDBPass(password=controller.CONF[\"DB_PASS\"],\n maskList=masked_value_set)",
"def setAllowAssembly(self,value):\n self.PDFreactorConfiguration.in1[\"allowAssembly\"] = value",
"def _binary_app(self):\n self.make_binary()",
"def test_non_ssl_ports_after_enabling_tls(self):\n self.enable_tls_encryption_cli_on_nodes(nodes=[self.cluster.master])\n CbServer.use_https = True\n rest = RestConnection(self.cluster.master)\n for non_ssl_request in self.sample_urls_map.keys():\n api = non_ssl_request % self.cluster.master.ip\n try:\n rest._http_request(api=api, timeout=10)\n except Exception as _:\n ssl_request = self.sample_urls_map[non_ssl_request]\n api = ssl_request % self.cluster.master.ip\n status, content, response = rest._http_request(api=api, timeout=10)\n if not status:\n self.fail(\"{0} failed\".format(api))\n else:\n self.log.error(\"{0} worked\".format(api))\n\n self.disable_n2n_encryption_cli_on_nodes(nodes=[self.cluster.master])\n CbServer.use_https = False\n rest = RestConnection(self.cluster.master)\n for non_ssl_request in self.sample_urls_map.keys():\n api = non_ssl_request % self.cluster.master.ip\n status, content, response = rest._http_request(api=api, timeout=10)\n if not status:\n self.fail(\"{0} api failed with content {1}\".format(api, content))",
"def encrypt(self, plain):\n plain = bytearray(plain)\n key_len = len(self.key)\n env = bytes(c ^ self.key[i % key_len] for i, c in enumerate(plain))\n return env",
"def softwares(self, softwares):\n\n self._softwares = softwares"
] | [
"0.54808533",
"0.54465693",
"0.49112743",
"0.4622234",
"0.44396767",
"0.4362359",
"0.42449024",
"0.42383167",
"0.41990164",
"0.4191991",
"0.4178983",
"0.41608432",
"0.4141621",
"0.41358182",
"0.41355053",
"0.40910557",
"0.40817374",
"0.4080167",
"0.40563214",
"0.40544382",
"0.40445495",
"0.40325555",
"0.40274104",
"0.4022872",
"0.40105918",
"0.40030175",
"0.3997805",
"0.3992444",
"0.39734933",
"0.39684883"
] | 0.8090969 | 0 |
Sets the execution_flow_hijack of this RuntimeAntiMalwareRule. | def execution_flow_hijack(self, execution_flow_hijack):
self._execution_flow_hijack = execution_flow_hijack | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def flow(self, flow):\n\n self._flow = flow",
"def update_flow(self, flow):\r\n self.flow = flow",
"def action_flow(self, action_flow):\n\n self._action_flow = action_flow",
"def set_mass_flow(self):\n self.exh.mdot_exp = self.exh.flow_array * self.exh.rho_array\n self.exh.C = self.exh.mdot_exp * self.exh.c_p_air\n self.exh.enthalpy_flow = self.exh.C * self.exh.T_inlet_array",
"def set_cache(self, eval_hash: str, task_hash: str, args_hash: str, value: Any) -> None:\n self.backend.set_eval_cache(eval_hash, task_hash, args_hash, value, value_hash=None)",
"def set_suppress_flow(self):\n self.suppressed = self.packet_count\n self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'suppressed': self.suppressed},})",
"def setFlow(self, edge, value):\r\n self.flow[edge] = value\r\n self.flow[edge[::-1]] = - value",
"def grr_set_no_flow_timeout(line: Text) -> None:\n del line # Unused.\n magics_impl.grr_set_no_flow_timeout_impl()",
"def execution_type(self, execution_type):\n self._execution_type = execution_type",
"def set_cur_flow(self, flow):\n self.cur_flow = flow",
"def grr_set_default_flow_timeout(line: Text) -> None:\n del line # Unused.\n magics_impl.grr_set_default_flow_timeout_impl()",
"def _as_of_flow_mod(self, command):\n of_flow_mod = super()._as_of_flow_mod(command)\n of_flow_mod.cookie_mask = self.cookie_mask\n of_actions = [action.as_of_action() for action in self.actions]\n of_instruction = InstructionApplyAction(actions=of_actions)\n of_flow_mod.instructions = [of_instruction]\n return of_flow_mod",
"def set_assignment_game_frame(self, assignment):\n\n self.frames[\"game\"].set_assignment(assignment)",
"def flow_contents(self, flow_contents):\n if flow_contents is None:\n raise ValueError(\"Invalid value for `flow_contents`, must not be `None`\")\n\n self._flow_contents = flow_contents",
"def update_executionid(self, executionid):\n self.executionid = executionid",
"def grr_set_flow_timeout(line: Text) -> None:\n args = grr_set_flow_timeout.parser.parse_args(shlex.split(line))\n magics_impl.grr_set_flow_timeout_impl(args.timeout)",
"def add_flow(self, dp, priority, match, actions, idle_timeout=0, hard_timeout=0):\r\n ofproto = dp.ofproto\r\n parser = dp.ofproto_parser\r\n inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]\r\n mod = parser.OFPFlowMod(datapath=dp, priority=priority,\r\n idle_timeout=idle_timeout,\r\n hard_timeout=hard_timeout,\r\n match=match, instructions=inst)\r\n dp.send_msg(mod)",
"def execution_id(self, execution_id):\n if execution_id is None:\n raise ValueError(\"Invalid value for `execution_id`, must not be `None`\") # noqa: E501\n\n self._execution_id = execution_id",
"def set_flow_corrected(self):\n self.exh.temp_v_press_fit = (\n np.polyfit(self.exh.pressure_drop[0:4],\n self.exh.T_array[0:4], 2) ) \n self.flow_data.T_hx = np.polyval(self.exh.temp_v_press_fit,\n self.flow_data.pressure_drop) \n self.flow_data.flow = ( self.flow_data.flow_trash *\n self.flow_data.T_hx / self.flow_data.T )",
"def set_execution_type(self, type):\n self.execution_type = type",
"def set_matching_rule(self, matching_rule):\n if not ((matching_rule == self.BPQ_MATCHING_RULE_EXACT) or\n (matching_rule == self.BPQ_MATCHING_RULE_TOKENS) or\n (matching_rule == self.BPQ_MATCHING_RULE_NEVER)):\n raise ValueError\n \n self.matching_rule = matching_rule\n return",
"def set_trace(self, frame=None):\n if frame is None:\n frame = sys._getframe().f_back\n self.reset()\n while frame:\n frame.f_trace = self.trace_dispatch\n self.botframe = frame\n frame = frame.f_back\n self.set_step()\n sys.settrace(self.trace_dispatch)",
"def set_flow_control(self, iface, mode):\n pytest.skip(\"Method is not supported by Iperf TG\")",
"def flow_encoding_version(self, flow_encoding_version):\n\n self._flow_encoding_version = flow_encoding_version",
"def power_play_goals(self, power_play_goals):\n\n self._power_play_goals = power_play_goals",
"def data_flow(self, data_flow):\n\n self._data_flow = data_flow",
"def flow_file_expiration(self, flow_file_expiration):\n\n self._flow_file_expiration = flow_file_expiration",
"def _restart_attack(self):\n self._stop_attack()\n self._competing_chain_tip_antipast = set(self._honest_dag._antipast)\n self._currently_attacked_block_gid = self._honest_dag._coloring_tip_gid\n self._virtual_competing_chain_block_parents = \\\n self._get_competing_chain_tip_parents(self._currently_attacked_block_gid,\n self._competing_chain_tip_antipast,\n self[self._honest_dag._coloring_tip_gid].get_parents())",
"def set_trace(self, frame=None):\r\n if frame is None:\r\n frame = sys._getframe().f_back\r\n # See pudb issue #52. If this works well enough we should upstream to\r\n # stdlib bdb.py.\r\n #self.reset()\r\n while frame:\r\n frame.f_trace = self.trace_dispatch\r\n self.botframe = frame\r\n frame = frame.f_back\r\n self.set_step()\r\n sys.settrace(self.trace_dispatch)",
"def workflow_definition(self, workflow_definition):\n\n self._workflow_definition = workflow_definition"
] | [
"0.56771505",
"0.53746784",
"0.5147958",
"0.4890891",
"0.4810104",
"0.48073435",
"0.47732478",
"0.4772554",
"0.47238106",
"0.46238196",
"0.461203",
"0.45992157",
"0.45734128",
"0.45235664",
"0.44804186",
"0.44751072",
"0.44331065",
"0.442998",
"0.44087604",
"0.43752292",
"0.43197134",
"0.4317414",
"0.43074274",
"0.42477927",
"0.42372963",
"0.41931862",
"0.4189511",
"0.41761407",
"0.4162121",
"0.41381925"
] | 0.80146104 | 0 |
Sets the intelligence_feed of this RuntimeAntiMalwareRule. | def intelligence_feed(self, intelligence_feed):
self._intelligence_feed = intelligence_feed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def microsoft_emerging_threat_feed(self) -> 'outputs.MSTIDataConnectorDataTypesResponseMicrosoftEmergingThreatFeed':\n return pulumi.get(self, \"microsoft_emerging_threat_feed\")",
"def feed(self):\n # or intelligence discard\n pass",
"def exempt_feed_retroactive(db, c, feed_uid, **kwargs):\n c.execute(\"\"\"update fm_items\n set item_rating=0, item_rule_uid=NULL\n where item_feed_uid=? and item_content!='' and exists (\n select rule_uid from fm_rules\n where rule_feed_uid is null and item_rule_uid=rule_uid\n )\"\"\", [feed_uid])",
"def feed_id(self, feed_id):\n\n self._feed_id = feed_id",
"def custom_feed(self, custom_feed):\n\n self._custom_feed = custom_feed",
"def wrap_feed(feed, max_iter=-1, **devtype):\n return FeedMover(FeedLimiter(feed, max_iter), **devtype)",
"def set_is_ai(self, is_ai):\n self.__is_ai = is_ai",
"def parse_feed(self):\n parsed_feed = feedparser.parse(self.rss_url)\n # Check for malformed feed\n if parsed_feed['bozo']:\n raise Exception('malformed rss feed!')\n self.parsed_feed = parsed_feed",
"def enable_forecast(self, enable_forecast):\n\n self._enable_forecast = enable_forecast",
"def enable_forecast(self, enable_forecast):\n\n self._enable_forecast = enable_forecast",
"def feed_treat(self):\r\n # TODO\r\n pass",
"def set_eval(self, eval: bool):\n self.brain.set_eval(eval)",
"def select_feed(self, feed: None | Feed = None) -> None:\n self.current_feed = feed\n self.refresh()",
"def get_feed(data_feeds, data_loader, model_config, is_train):\n input_feed_dict = {}\n excluded_example_count = 0\n\n for data_feed in data_feeds:\n tmp_contexts, tmp_targets, tmp_lines = [], [], []\n tmp_masked_contexts, tmp_masked_words = [], []\n example_in_batch_count = 0\n while example_in_batch_count < model_config.batch_size:\n if model_config.it_train:\n example = next(data_loader.data_it, None)\n else:\n # TODO: not tested\n example = data_loader.get_sample()\n\n # mainly used in evaluation when testset file reaches EOF, create a dummy input to feed model\n if example is None:\n example = {}\n example['contexts'] = [0] * model_config.max_context_len\n example['target'] = {'pos_id': 0,\n 'abbr_id': 0,\n 'abbr': None,\n 'sense_id': 0,\n 'sense': None,\n 'line_id': data_loader.size,\n 'inst_id': 0\n }\n example['line'] = ''\n # sample['def'] = [0] * model_config.max_def_len\n # sample['stype'] = 0\n excluded_example_count += 1 # Assume eval use single GPU\n\n # print(example_in_batch_count)\n # print(excluded_example_count)\n # print(example)\n\n tmp_contexts.append(example['contexts'])\n tmp_targets.append(example['target'])\n tmp_lines.append(example['line'])\n # print('input:\\t%s\\t%s.' % (sample['line'], sample['target']))\n if model_config.lm_mask_rate and 'cur_masked_contexts' in example:\n tmp_masked_contexts.append(example['cur_masked_contexts'])\n tmp_masked_words.append(example['masked_words'])\n\n # print('done one example, current len(batch)=%d' % len(tmp_contexts))\n example_in_batch_count += 1\n\n for step in range(model_config.max_context_len):\n input_feed_dict[data_feed['contexts'][step].name] = [\n tmp_contexts[batch_idx][step]\n for batch_idx in range(model_config.batch_size)]\n\n if model_config.hub_module_embedding:\n input_feed_dict[data_feed['text_input'].name] = [\n tmp_lines[batch_idx]\n for batch_idx in range(model_config.batch_size)]\n\n input_feed_dict[data_feed['abbr_inp'].name] = [\n tmp_targets[batch_idx]['abbr_id']\n for batch_idx in range(model_config.batch_size)\n ]\n\n input_feed_dict[data_feed['sense_inp'].name] = [\n tmp_targets[batch_idx]['sense_id']\n for batch_idx in range(model_config.batch_size)\n ]\n\n if model_config.lm_mask_rate and tmp_masked_contexts:\n i = 0\n while len(tmp_masked_contexts) < model_config.batch_size:\n tmp_masked_contexts.append(tmp_masked_contexts[i % len(tmp_masked_contexts)])\n tmp_masked_words.append(tmp_masked_words[i % len(tmp_masked_contexts)])\n i += 1\n\n for step in range(model_config.max_context_len):\n input_feed_dict[data_feed['masked_contexts'][step].name] = [\n tmp_masked_contexts[batch_idx][step]\n for batch_idx in range(model_config.batch_size)]\n\n for step in range(model_config.max_subword_len):\n input_feed_dict[data_feed['masked_words'][step].name] = [\n tmp_masked_words[batch_idx][1][step]\n for batch_idx in range(model_config.batch_size)]\n\n return input_feed_dict, excluded_example_count, tmp_targets",
"def fit(self, train_set, val_set=None):\n Recommender.fit(self, train_set, val_set)\n\n if self.trainable:\n # user-item interactions\n (rat_uid, rat_iid, rat_val) = train_set.uir_tuple\n\n # item-item affinity network\n map_iid = train_set.item_indices\n (net_iid, net_jid, net_val) = train_set.item_graph.get_train_triplet(\n map_iid, map_iid\n )\n if [self.train_set.min_rating, self.train_set.max_rating] != [0, 1]:\n if self.train_set.min_rating == self.train_set.max_rating:\n rat_val = scale(rat_val, 0.0, 1.0, 0.0, self.train_set.max_rating)\n else:\n rat_val = scale(\n rat_val,\n 0.0,\n 1.0,\n self.train_set.min_rating,\n self.train_set.max_rating,\n )\n\n if [min(net_val), max(net_val)] != [0, 1]:\n if min(net_val) == max(net_val):\n net_val = scale(net_val, 0.0, 1.0, 0.0, max(net_val))\n else:\n net_val = scale(net_val, 0.0, 1.0, min(net_val), max(net_val))\n\n rat_val = np.array(rat_val, dtype=\"float32\")\n rat_uid = np.array(rat_uid, dtype=\"int32\")\n rat_iid = np.array(rat_iid, dtype=\"int32\")\n\n net_val = np.array(net_val, dtype=\"float32\")\n net_iid = np.array(net_iid, dtype=\"int32\")\n net_jid = np.array(net_jid, dtype=\"int32\")\n\n if self.verbose:\n print(\"Learning...\")\n\n from cornac.models.mcf import mcf\n\n res = mcf.mcf(\n rat_uid,\n rat_iid,\n rat_val,\n net_iid,\n net_jid,\n net_val,\n k=self.k,\n n_users=train_set.num_users,\n n_items=train_set.num_items,\n n_ratings=len(rat_val),\n n_edges=len(net_val),\n n_epochs=self.max_iter,\n lamda=self.lamda,\n learning_rate=self.learning_rate,\n gamma=self.gamma,\n init_params={\"U\": self.U, \"V\": self.V, \"Z\": self.Z},\n verbose=self.verbose,\n seed=self.seed,\n )\n\n self.U = np.asarray(res[\"U\"])\n self.V = np.asarray(res[\"V\"])\n self.Z = np.asarray(res[\"Z\"])\n\n if self.verbose:\n print(\"Learning completed\")\n elif self.verbose:\n print(\"%s is trained already (trainable = False)\" % self.name)\n\n return self",
"def change_feed(self) -> Optional[pulumi.Input['ChangeFeedArgs']]:\n return pulumi.get(self, \"change_feed\")",
"def set_train_mode(training, mnet, hnet, hhnet, dis):\n for net in [mnet, hnet, hhnet, dis]:\n if net is not None:\n if training:\n net.train()\n else:\n net.eval()",
"def set_as_feedback(self):\n if self.type == MessageTypes.AGENT:\n raise InvalidMessageTypeError(\n 'Cannot set feedback as True when msg is of type Agent')\n self.feedback = True",
"def set_suppress_flow(self):\n self.suppressed = self.packet_count\n self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'suppressed': self.suppressed},})",
"def refresh_feed(self, feed: Feed) -> None:\n self._update_thread.force_refresh_feed(feed)",
"def __init__(__self__, *,\n bing_safety_phishing_url: 'outputs.MSTIDataConnectorDataTypesResponseBingSafetyPhishingURL',\n microsoft_emerging_threat_feed: 'outputs.MSTIDataConnectorDataTypesResponseMicrosoftEmergingThreatFeed'):\n pulumi.set(__self__, \"bing_safety_phishing_url\", bing_safety_phishing_url)\n pulumi.set(__self__, \"microsoft_emerging_threat_feed\", microsoft_emerging_threat_feed)",
"def feed_read(self, feed):\n if feed != self.current_feed:\n return\n self.action_mark_all_read.setDisabled(True)\n for item in self.current_feed.get_items():\n self.update_item(item)",
"def apply_ruleset(self, ruleset):\n updates = [self._get_lexicon_update(ruleset['lexicon'])]\n updates += ruleset['rules']\n self.apply_updates(updates)",
"def torch_learner(self, x):\n self._torch_learner = x",
"def set_atom_intention(self, atom_name, intention):\n source, clone = self._atomdetail_by_name(atom_name, clone=True)\n if source.intention != intention:\n clone.intention = intention\n self._with_connection(self._save_atom_detail, source, clone)",
"def __init__(self, main_feed):\n self.main_feed = main_feed",
"def feed_link(self):\n return self.url.replace(\"http://\", \"feed://\")",
"def setAgility(self, agility):\n self.ag = agility",
"def initialize_ai(self):\n\n self.gid, self.genome = constants.genomes_to_run[self.identifier]\n self.genome.fitness = -1\n self.net = neat.nn.FeedForwardNetwork.create(self.genome, constants.conf)\n # self.net = neat.nn.RecurrentNetwork\n # .create(self.genome, constants.conf)",
"def _set_eval(self):\n\n if self.model.__dict__['training']:\n self.model.eval()"
] | [
"0.5143359",
"0.4976248",
"0.49088368",
"0.4797253",
"0.4771624",
"0.4526682",
"0.44206625",
"0.43195665",
"0.4269245",
"0.4269245",
"0.4240957",
"0.4240386",
"0.41880623",
"0.4182926",
"0.41601658",
"0.41359985",
"0.41315413",
"0.41298312",
"0.4120521",
"0.40738678",
"0.40468523",
"0.4036378",
"0.40243694",
"0.40218073",
"0.40081662",
"0.40072095",
"0.3999669",
"0.3987731",
"0.3958229",
"0.39539987"
] | 0.7596608 | 0 |
Sets the reverse_shell of this RuntimeAntiMalwareRule. | def reverse_shell(self, reverse_shell):
self._reverse_shell = reverse_shell | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_reverse(rev):\n global is_reverse\n is_reverse = rev",
"def reverse(self):\n self.command.append(\"reverse\")\n return self",
"def autodefined_reverse_flag(self) -> pulumi.Input['ResolverConfigAutodefinedReverseFlag']:\n return pulumi.get(self, \"autodefined_reverse_flag\")",
"def autodefined_reverse_flag(self) -> pulumi.Output['ResolverConfigAutodefinedReverseFlag']:\n return pulumi.get(self, \"autodefined_reverse_flag\")",
"def setReversible(self, *args):\n return _libsbml.Reaction_setReversible(self, *args)",
"def reverse(self):\n enabled = self.lib.iperf_get_test_reverse(self._test)\n\n if enabled:\n self._reverse = True\n else:\n self._reverse = False\n\n return self._reverse",
"def to_reverse_rule(self) -> \"ReverseRule\":\n assert (\n self.is_equivalence()\n ), \"reverse rule can only be created for equivalence rules\"\n return ReverseRule(self)",
"def on_reverse(self, callback):\n self._reverse_callback = callback if callable(callback) else _void",
"def web_shell(self, web_shell):\n\n self._web_shell = web_shell",
"def __reversed__(self):\n return reverse(self)",
"def reverse(self) -> str:\n return pulumi.get(self, \"reverse\")",
"def complement_reverse(self):\n self._data.switch_complement(whether=False)\n return self",
"def __neg__(self):\n try:\n return self._reverse\n except AttributeError:\n self._reverse = self.__class__(self.db, self.id,\n reversePath=self)\n return self._reverse",
"def autodefined_reverse(self) -> pulumi.Output['ResolverConfigAutodefinedReverse']:\n return pulumi.get(self, \"autodefined_reverse\")",
"def shell(self, shell):\n\n self._shell = shell",
"def _set_shell_obj(self, obj):\n self._shell_obj = weakref.ref(obj)",
"def __init__(__self__, *,\n autodefined_reverse_flag: pulumi.Input['ResolverConfigAutodefinedReverseFlag'],\n resource_id: pulumi.Input[str]):\n pulumi.set(__self__, \"autodefined_reverse_flag\", autodefined_reverse_flag)\n pulumi.set(__self__, \"resource_id\", resource_id)",
"def reverse(self):\n if self._can_reverse():\n list.reverse(self)",
"def reverse(self, *args, **kwargs):\n return reverse(*args, **kwargs)",
"def invert(self, val):\n self._invert = val\n if val:\n self.write_cmd(self.CMD_SET_DISP_REVERSE)\n else:\n self.write_cmd(self.CMD_SET_DISP_NORMAL)",
"def unsetReversible(self):\n return _libsbml.Reaction_unsetReversible(self)",
"def reverse_edges(self, edges, inplace=True, multiedges=None):\n tempG = self if inplace else copy(self)\n for e in edges:\n tempG.reverse_edge(e,inplace=True,multiedges=multiedges)\n if not inplace:\n return tempG",
"def reverse(self):\n self.left_motor.reverse()\n self.right_motor.reverse()",
"def reversed(self):\n return LINE(*self.elems,**{'reverse':(not self.reverse)})",
"def stack_sw_rev(self, stack_sw_rev):\n self._stack_sw_rev = stack_sw_rev",
"def mirror(self):\n self.__mirror = not self.__mirror",
"def reverse(self):\r\n if self.value == \"=\":\r\n self.values = \"!=\"\r\n elif self.value == \"!=\":\r\n self.values = \"=\"\r\n elif self.value == \"<\":\r\n self.values = \">=\"\r\n elif self.value == \"<=\":\r\n self.values = \">\"\r\n elif self.value == \">\":\r\n self.values = \"<=\"\r\n elif self.value == \">=\":\r\n self.values = \"<\"\r\n elif self.value == \"+\":\r\n self.values = \"-\"\r\n elif self.value == \"-\":\r\n self.values = \"+\"",
"def reverseString(self, s: List[str]) -> None:\n size = len(s)\n for i in range(size//2):\n s[i], s[~i] = s[~i], s[i]\n # s[i], s[size-i-1] = s[size-i-1], s[i]\n\n # s[:] = s[::-1]",
"def reverse(self):\n self._sequence.reverse()",
"def _channel_invoke_shell(self) -> None:\n self._shell = True\n self.channel.shell()"
] | [
"0.539246",
"0.51156425",
"0.50046355",
"0.48777938",
"0.4789494",
"0.4750256",
"0.47339043",
"0.46712825",
"0.46478534",
"0.46473414",
"0.46026647",
"0.45961767",
"0.4544838",
"0.45404893",
"0.45341587",
"0.4533405",
"0.4475226",
"0.44358587",
"0.44093126",
"0.436193",
"0.43000773",
"0.4299309",
"0.4295892",
"0.4291466",
"0.42573762",
"0.4255404",
"0.42452443",
"0.4238603",
"0.4235339",
"0.4226284"
] | 0.8084292 | 0 |
Sets the service_unknown_origin_binary of this RuntimeAntiMalwareRule. | def service_unknown_origin_binary(self, service_unknown_origin_binary):
self._service_unknown_origin_binary = service_unknown_origin_binary | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def user_unknown_origin_binary(self, user_unknown_origin_binary):\n\n self._user_unknown_origin_binary = user_unknown_origin_binary",
"def set_as_not_handled(self):\n if self.type == MessageTypes.AGENT:\n raise InvalidMessageTypeError(\n 'Cannot set not_handled as True when msg is of type Agent')\n self.not_handled = True",
"def set_as_handled(self):\n self.not_handled = False",
"def set_proxy_unsafe(self, proxy_string):\n\t\tself.__proxy = proxy_string",
"def setUnknown(self, key, val):\n # type: (str, tp.Any)->None\n if 'unknown' not in self._ifAttributes:\n self._ifAttributes['unknown'] = {}\n self._ifAttributes['unknown'][key] = val",
"def suspicious_elf_headers(self, suspicious_elf_headers):\n\n self._suspicious_elf_headers = suspicious_elf_headers",
"def run_unknown(self, line):\n pass",
"def unknown(self):\n self.add_file_string('Unknown file')\n self.should_copy = False",
"def set_state_unknown(value):\n\n global STATE_UNKNOWN\n STATE_UNKNOWN = value",
"def set_disable_apicsign(self, bDisableAPIC):\n\t\tcall_sdk_function('PrlVmCfg_SetDisableAPICSign', self.handle, bDisableAPIC)",
"def set_layer_os(layer, origin_str, binary):\n # check if the layer already has an os-release set and if not,\n # guess based on the binary\n if layer.os_guess:\n layer.origins.add_notice_to_origins(origin_str, Notice(\n formats.os_release.format(os_style=layer.os_guess), 'info'))\n else:\n layer.os_guess = command_lib.check_os_guess(binary)\n if layer.os_guess:\n layer.origins.add_notice_to_origins(origin_str, Notice(\n formats.os_style_guess.format(\n package_manager=bin, os_list=layer.os_guess), 'info'))\n else:\n layer.origins.add_notice_to_origins(origin_str, Notice(\n errors.no_etc_release, 'warning'))",
"def elf_images_none(self, elf_images_none):\n\n self._elf_images_none = elf_images_none",
"def _unknown_app(self):\n self.make_unknown()",
"def is_unknown_error(self):\n return self._tag == 'unknown_error'",
"def ignore_local_proxy_environment_variables(self):\n self._ignore_local_proxy = True",
"def __init__(__self__, *,\n other_native_crash: bool):\n pulumi.set(__self__, \"other_native_crash\", other_native_crash)",
"def labelUnknown(self):\n self.satisfiability = Satisfiability.UNKNOWN\n self.model = None\n self.unsatCore = []",
"def __set_unknown_effect(self, hgvs_str):\n unknown_effect_list = ['c.?', '?']\n if hgvs_str.lower() in unknown_effect_list:\n self.unknown_effect = True\n elif hgvs_str.startswith(\"(\"):\n self.unknown_effect = True\n else:\n self.unknown_effect = False",
"def elf_image_num_not(self, elf_image_num_not):\n\n self._elf_image_num_not = elf_image_num_not",
"def ignore_missing_v_net_service_endpoint(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"ignore_missing_v_net_service_endpoint\")",
"def set_remote(self, bRemote):\n\t\tcall_sdk_function('PrlVmDev_SetRemote', self.handle, bRemote)",
"def setAllowAssembly(self,value):\n self.PDFreactorConfiguration.in1[\"allowAssembly\"] = value",
"def testUnknownAgent(self):\n \n p = Parser(dora_domain.split(\"\\n\"))\n dom = domain.Domain.parse(p.root)\n p = Parser(unknownagent.split(\"\\n\"))\n try:\n scen = scenario.MapsimScenario.parse(p.root, dom)\n except ParseError, e:\n self.assertEqual(e.token.string, \"r2d3\")\n self.assertEqual(e.token.line, 10)\n return\n self.fail(\"Unknown agent triggered no error\")",
"def allow_undefined(self, value):\n self.is_allowUndefined = value\n return self",
"def set_unrecognized_field(self, key, value, variant):\n if not isinstance(variant, Variant):\n raise TypeError('Variant type %s is not valid.' % variant)\n self.__unrecognized_fields[key] = value, variant",
"def elf_image_num_not_in(self, elf_image_num_not_in):\n\n self._elf_image_num_not_in = elf_image_num_not_in",
"def initialize_non_bfd(self, architecture=None, machine=None,\n endian=ENDIAN_UNKNOWN):\n\n if None in [architecture, machine, endian]:\n return\n\n self.architecture = architecture\n self.machine = machine\n self.endian = endian",
"def test_no_recognized_root(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"wrong_root.agent_name\", \"value\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The root of the dotted path must be one of: {}\".format(\n ALLOWED_PATH_ROOTS\n )\n )",
"def dd_origin(self, value):\n # type: (Optional[Text]) -> None\n with self._lock:\n if value is None:\n if ORIGIN_KEY in self._meta:\n del self._meta[ORIGIN_KEY]\n return\n self._meta[ORIGIN_KEY] = value",
"def set_mask_bad(self, _=None):\n self.set_mask_type(\"bad\")"
] | [
"0.7573631",
"0.46184886",
"0.46148548",
"0.44912875",
"0.44772303",
"0.44517532",
"0.44046047",
"0.43538424",
"0.43421948",
"0.43409202",
"0.43242526",
"0.43002045",
"0.4295388",
"0.42937005",
"0.42800286",
"0.42621157",
"0.4238971",
"0.4207718",
"0.42001417",
"0.41846082",
"0.4182446",
"0.41578132",
"0.41528863",
"0.41264972",
"0.410398",
"0.40851957",
"0.40726236",
"0.40601626",
"0.40221962",
"0.4008313"
] | 0.86087584 | 0 |
Sets the skip_ssh_tracking of this RuntimeAntiMalwareRule. | def skip_ssh_tracking(self, skip_ssh_tracking):
self._skip_ssh_tracking = skip_ssh_tracking | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_suppress_flow(self):\n self.suppressed = self.packet_count\n self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'suppressed': self.suppressed},})",
"def set_skip_current_track(self):\n self.get(COMMAND_CPM, 'SetSkipCurrentTrack')",
"def ssh_tunnel(self, ssh_tunnel):\n\n self._ssh_tunnel = ssh_tunnel",
"def setIgnoreState(self, state):\n self.__ignore_white = state",
"def _set_var_ignore(self):\n self._var_ignore = [k for k in self.__dict__.keys() if k[0] != '_']",
"def scp_disable(task):\n cmd = \"no ip scp server enable\"\n task.run(task=netmiko_send_config, config_commands=cmd)\n task.run(task=netmiko_save_config)\n c_print(f\"*** {task.host}: SCP has been disabled ***\")",
"def skip_source_dest_check(self, skip_source_dest_check):\n self._skip_source_dest_check = skip_source_dest_check",
"def skip_verify(self, skip_verify):\n\n self._skip_verify = skip_verify",
"def killExperiment(self, **kwargs):\n if kwargs['kill']=='YES':\n killRobot.sshKill()",
"def setDiscardFlags(self, flags):\r\n self.__data.discardFlags = flags",
"def setIgnoreCertificateErrors(self, ignore: bool) -> Awaitable[Dict]:\n return self.client.send(\n \"Security.setIgnoreCertificateErrors\", {\"ignore\": ignore}\n )",
"def skip(self):\n self.skip_votes.clear()\n if self.is_playing():\n self.player.stop()",
"def set_ignore_flag(self, reag_item_id: int, do_ignore: bool) -> dict:\n raise NotImplementedError('not implemented')",
"def disable(self):\n\n super().disable()\n self._slo_image_size.disable()\n self._slo_neural_network.disable()\n self._slo_number_of_epochs.disable()\n self._slo_examples_per_batch.disable()",
"async def disable(self, **kwargs) -> None: # pylint: disable=unused-argument\r\n await self.set_ena(False)",
"def DisableByRunIf(self):\n self.run_if = 'False'",
"def setIgnoreTokenIds(self, value):\n return self._set(ignoreTokenIds=value)",
"def setIgnoreTokenIds(self, value):\n return self._set(ignoreTokenIds=value)",
"def setIgnoreTokenIds(self, value):\n return self._set(ignoreTokenIds=value)",
"async def async_turn_off(self):\n path = \"/ip/firewall/nat\"\n param = \".id\"\n value = None\n for uid in self._ctrl.data[\"nat\"]:\n if (\n self._ctrl.data[\"nat\"][uid][\"name\"]\n == f\"{self._data['protocol']}:{self._data['dst-port']}\"\n ):\n value = self._ctrl.data[\"nat\"][uid][\".id\"]\n\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n await self._ctrl.async_update()",
"def allow_ssh_only_management_account(self, allow_ssh_only_management_account):\n\n self._allow_ssh_only_management_account = allow_ssh_only_management_account",
"def Ignore(self, relative_file):\n return False",
"def set_skip(self, val):\n self.skip = val\n return self",
"def unset_ip_routing(self):\n os_type = os.getenv('server_os_type', None)\n if self.remote is not True and os_type not in ['Linux']:\n return\n self.log_output('Unsetting IP forwarding and iptables rules on {} host'.format(\n os_type))\n\n command = (\n \"echo '{0}' | sudo -S iptables -F && \"\n \"echo '{0}' | sudo -S iptables -X && \"\n \"echo '{0}' | sudo -S iptables -t nat -F && \"\n \"echo '{0}' | sudo -S iptables -t nat -X && \"\n \"echo '{0}' | sudo -S iptables -t mangle -F && \"\n \"echo '{0}' | sudo -S iptables -t mangle -X && \"\n \"echo '{0}' | sudo -S iptables -P INPUT ACCEPT && \"\n \"echo '{0}' | sudo -S iptables -P FORWARD ACCEPT && \"\n \"echo '{0}' | sudo -S iptables -P OUTPUT ACCEPT && \"\n \"echo '{0}' | sudo -S sysctl -w net.ipv4.ip_forward=0 && \"\n \"echo '{0}' | sudo -S sysctl -w net.ipv6.conf.all.forwarding=0 && \"\n \"echo '{0}' | sudo -S sysctl -w net.ipv4.conf.all.send_redirects=1\"\n )\n self.run_command(command.format(self.ssh_password))",
"def pssh(self, pssh):\n self._pssh = pssh\n return self",
"def ignore_dnt(self, ignore_dnt):\n # type: (bool) -> None\n\n if ignore_dnt is not None:\n if not isinstance(ignore_dnt, bool):\n raise TypeError(\"Invalid type for `ignore_dnt`, type has to be `bool`\")\n\n self._ignore_dnt = ignore_dnt",
"def __init__(__self__, *,\n ssh_access: Optional[pulumi.Input[Union[str, 'AgentPoolSSHAccess']]] = None):\n if ssh_access is not None:\n pulumi.set(__self__, \"ssh_access\", ssh_access)",
"def remote_connections_enabled(self, remote_connections_enabled):\n\n self._remote_connections_enabled = remote_connections_enabled",
"def disable_switch_port(self, mgr, interface):\n confstr = snipp.CMD_NO_SWITCHPORT % (interface)\n confstr = self.create_xml_snippet(confstr)\n LOG.debug(\"NexusDriver: %s\" % confstr)\n mgr.edit_config(target='running', config=confstr)",
"def tethering_disabled(self, tethering_disabled):\n\n self._tethering_disabled = tethering_disabled"
] | [
"0.52679175",
"0.51133686",
"0.50047815",
"0.49796999",
"0.49359134",
"0.48416126",
"0.47192234",
"0.4642725",
"0.46380436",
"0.4610448",
"0.454691",
"0.45164886",
"0.44933212",
"0.44694534",
"0.44682866",
"0.4461396",
"0.44300166",
"0.44300166",
"0.44300166",
"0.4425383",
"0.44230792",
"0.44013318",
"0.4382151",
"0.4382136",
"0.4377405",
"0.43641126",
"0.43616146",
"0.43551597",
"0.43499285",
"0.43357536"
] | 0.83099043 | 0 |
Sets the suspicious_elf_headers of this RuntimeAntiMalwareRule. | def suspicious_elf_headers(self, suspicious_elf_headers):
self._suspicious_elf_headers = suspicious_elf_headers | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def elf_images_some(self, elf_images_some):\n\n self._elf_images_some = elf_images_some",
"def allow_headers(self, allow_headers):\n\n self._allow_headers = allow_headers",
"def _enforce_user_agent(headers: dict) -> dict:\n headers.update(SYNAPSE_USER_AGENT_HEADER)\n return headers",
"def sanitize_headers(headers):\n auth_header = headers.pop(\"Authorization\", None)\n if auth_header:\n _logger.warning(\n f\"Possible fraud: Authorization header was set to {auth_header}\"\n )\n userinfo_header = headers.pop(\"X-Userinfo\", None)\n if userinfo_header:\n _logger.warning(\n f\"Possible fraud: X-Userinfo header was set to {userinfo_header}\"\n )",
"def Headers(self, headers):\n if len(headers) > 0:\n for key in headers:\n if headers[key] == \"%TARGET%\":\n headers[key] = self._target\n self._headers = headers\n else:\n self._headers = None",
"def webhook_headers(self, webhook_headers: \"Dict[str, List[str]]\"):\n self._attrs[\"webhookHeaders\"] = webhook_headers",
"def webhook_headers(self, webhook_headers: \"Dict[str, List[str]]\"):\n self._attrs[\"webhookHeaders\"] = webhook_headers",
"def fill_headers(self, headers):\n self.headers = {h[0]: h[1] for h in headers}",
"def add_custom_headers(self, headers):\n headers_to_remove = [x for x in headers if x.lower() in [y.lower() for y in self.headers]]\n for header in headers_to_remove:\n headers.pop(header, None)\n headers.update(self.headers)",
"def elf_images_none(self, elf_images_none):\n\n self._elf_images_none = elf_images_none",
"def hios_ids(self, hios_ids):\n\n self._hios_ids = hios_ids",
"def set_headers(self, headers: dict) -> None:\n self.headers = headers",
"def set_elf(self, elf):\n\n self.logger.info('set elf ' + elf)\n rc = self.debugger.set_elf(elf)\n\n self.elf_set_status_sig.emit(rc)\n \n # Console message\n if rc is False:\n self.write_console_output_sig.emit(\n \"[%s] Could not set elf target.\" % DEBUG)\n\n return None\n\n self.elf = elf\n self.bp_func = None\n\n self.write_console_output_sig.emit(\n (\"[%s] Elf target set to %s\" % (DEBUG, elf)))\n \n # Debug symbols\n debug_sym = self.debugger.is_compiled_with_debug_symbols()\n self.has_compile_symbols_sig.emit(debug_sym)\n\n self.state = self.ExecStates.START_BP_SET",
"def set_headers(self, headers):\n\n if isinstance(headers, dict):\n headers = headers.items()\n\n # NOTE(kgriffs): We can't use dict.update because we have to\n # normalize the header names.\n _headers = self._headers\n for name, value in headers:\n _headers[name.lower()] = value",
"def set_user_agent(self, user_agent: str) -> None:\n self.headers['User-Agent'] = user_agent",
"def set_headers(self, headers):\n self.headers = headers\n process_headers(self)\n self.character_encoding = self.parsed_headers.get(\n 'content-type', (None, {})\n )[1].get('charset', 'utf-8') # default isn't UTF-8, but oh well",
"def whitelist_file(self, fkey):\n self.whitelist.update([fkey])",
"def _fixHeaderLength(self):\n self.header.seek(0)\n lines = self.header.readlines()\n headlength = len(lines)\n lines[0] = wrapLine(\"NLHEAD_FFI\", self.annotation, self.delimiter, \"%d%s%d\\n\" % (headlength, self.delimiter, self.FFI))\n self.header = StringIO(\"\".join(lines))\n self.header.seek(0)",
"def process_request_headers(request):\n request.headers.setdefault('User-Agent',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/51.0.2704.103 Safari/537.36')\n if 'redirect_urls' not in request.meta:\n request.headers['Referer'] = None",
"def __prepare_headers(elements: list) -> str:\n return f'# ReaxFF_{\"-\".join(elements)} optimized with ff_optimum\\n'",
"def update_ev_whitelist(self, compressed_ev_whitelist):\n manifest = self._read_manifest_json()\n manifest[\"version\"] = str(self.manifest_version() + 1)\n self._write_manifest_json(manifest)\n with open(\n os.path.join(\n self._crx_dir,\n \"_platform_specific\",\n \"all\",\n \"ev_hashes_whitelist.bin\"),\n \"wb\") as hashes_file:\n hashes_file.write(compressed_ev_whitelist)",
"def response_headers(self, response_headers):\n\n self._response_headers = response_headers",
"def set_res_headers(response):\n response.headers[\"Server\"] = \"OurTieba\"\n response.headers[\"X-Content-Type-Options\"] = \"nosniff\"\n response.headers[\"X-Frame-Options\"] = \"sameorigin\"\n if app.config.get(\"ENABLE_CSP\"):\n response.headers[\"Content-Security-Policy\"] = \"script-src \" + \" \".join(WHITELIST) + \"; object-src 'self'\"\n return response",
"def setBlockedCookies(self, list_):\n if not self.__loaded:\n self.__load()\n \n self.__exceptionsBlock = list_[:]\n self.__exceptionsBlock.sort()\n self.__saveTimer.changeOccurred()",
"def authenticity_necessary_lights(self, authenticity_necessary_lights):\n\n self._authenticity_necessary_lights = authenticity_necessary_lights",
"def set_extra_headers(self, path):\n pass",
"def elf_images_every(self, elf_images_every):\n\n self._elf_images_every = elf_images_every",
"def elf_image_num(self, elf_image_num):\n\n self._elf_image_num = elf_image_num",
"def fix_headers(hParams,testMode=False):\n \n \n fileList = glob.glob(hParams['fileList'])\n for oneFile in fileList:\n with fits.open(oneFile,'update') as HDUList_orig:\n if testMode == True:\n print(\"Doing a dry run without modifying headers\")\n HDUList = fits.HDUList([fits.PrimaryHDU(None,header=HDUList_orig[0].header)])\n primHead = HDUList[0].header\n else:\n primHead = HDUList_orig[0].header\n\n colcorner = hParams['COLCORNR'][primHead['SCA_ID']]\n rowcorner = hParams['ROWCORNR'][primHead['SCA_ID']]\n \n detTiming = pynrc.pynrc_core.DetectorOps(detector=481,\n wind_mode=hParams['wind_mode'],\n xpix=hParams['xpix'],\n ypix=hParams['ypix'],\n x0=colcorner-1,\n y0=rowcorner-1,\n nint=hParams['nint'],\n ngroup=hParams['ngroup'],\n nf=hParams['nf'])\n correctHead = detTiming.make_header()\n\n obsId = primHead['OBS_ID']\n if obsId in hParams['expStart'].keys():\n expStart = hParams['expStart'][obsId]\n date, time = expStart.split('T')\n primHead['DATE-OBS'] = date\n primHead['TIME-OBS'] = time\n \n t_expStart = Time(expStart)\n t_expEnd = t_expStart + correctHead['EXPTIME'] * u.second\n expEnd = t_expEnd.fits\n date, time = expEnd.split('T')\n primHead['DATE-END'] = date\n primHead['TIME-END'] = time\n else:\n print(\"Couldn't find exp start for {}\".format(obsId))\n \n\n for oneKey in ['TFRAME','TGROUP','INTTIME','EXPTIME',\n 'TREFROW','BREFROW','LREFCOL','RREFCOL',\n 'COLCORNR','ROWCORNR']:\n primHead[oneKey] = correctHead[oneKey]\n \n if hParams['wind_mode'] == 'WINDOW':\n primHead['HWINMODE'] = 'ENABLE'\n else:\n primHead['HWINMODE'] = 'DISABLE'\n primHead['DETECTOR'] = detectorDict[primHead['SCA_ID']]\n \n primHead['TLDYNEID'] = hParams['teledyneID'][primHead['SCA_ID']]\n if testMode == True:\n pdb.set_trace()",
"def elf_image_num_lte(self, elf_image_num_lte):\n\n self._elf_image_num_lte = elf_image_num_lte"
] | [
"0.484491",
"0.47857866",
"0.4715581",
"0.46925652",
"0.46410558",
"0.46366873",
"0.46366873",
"0.46242186",
"0.46089914",
"0.4551534",
"0.45476323",
"0.45103273",
"0.4486765",
"0.4470622",
"0.43418282",
"0.43411455",
"0.42955545",
"0.42435467",
"0.42423066",
"0.42370942",
"0.42301556",
"0.4222715",
"0.42201573",
"0.4195076",
"0.41940603",
"0.41795188",
"0.41516718",
"0.414462",
"0.4132932",
"0.4121303"
] | 0.86457044 | 0 |
Sets the temp_fs_proc of this RuntimeAntiMalwareRule. | def temp_fs_proc(self, temp_fs_proc):
self._temp_fs_proc = temp_fs_proc | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_temp_file(self):\n\n index = self.filename.rfind('/') + 1\n self.temp_filename = self.filename[:index] + \"tmp_\" + self.filename[index:]",
"def fill_stat(self, fs=None, **options):\n self.filestat = StatTuple(fs=fs, **options)",
"def _register_temporary_file(self):\n _partition_file = self._subarray._partition_file\n _partition_dir = self._subarray._partition_dir\n if _partition_file not in _temporary_files:\n fd, _lock_file = mkstemp(\n prefix=_partition_file + \"_\", dir=_partition_dir\n )\n close(fd)\n _temporary_files[_partition_file] = (\n _partition_dir,\n _lock_file,\n set(),\n )\n else:\n _, _lock_file, _ = _temporary_files[_partition_file]\n\n return _lock_file",
"def FS(self, FS):\n self._FS = FS",
"def testSetExecutable(self):\n with tempfile.NamedTemporaryFile(delete=True) as temp_file:\n utils.SetExecutable(temp_file.name)\n self.assertEqual(os.stat(temp_file.name).st_mode & 0o777, 0o755)",
"def path_tmp(self) -> Path:\n return self.path_supervisor / TMP_DATA",
"def register_tmp_file(self, tmp_file: str):\n self.temp_files.add(pathlib.Path(tmp_file))",
"def processlocal(self, processlocal) :\n\t\ttry :\n\t\t\tself._processlocal = processlocal\n\t\texcept Exception as e:\n\t\t\traise e",
"def _use_temp_directory(self):\n if not self._is_temp_dir:\n self._orig_base_data_dir = self._base_data_dir\n self._orig_base_logs_dir = self._base_logs_dir\n temp_dir = Path(tempfile.mkdtemp())\n self._base_data_dir = temp_dir / \"data\"\n self._base_logs_dir = temp_dir / \"logs\"\n self.db.change_path(\":memory:\")\n self.set_current(\"default\", update=False)\n self._is_temp_dir = True\n return temp_dir",
"def setTmpDir(self):\n\t\tif os.name != 'nt':\n\t\t\t# On unix use /tmp by default\n\t\t\tself.tmpDir = os.environ.get(\"TMPDIR\", \"/tmp\")\n\t\t\tself.tmpDir = os.environ.get(\"TMP\", self.tmpDir)\n\t\telse:\n\t\t\t# On Windows use the current directory\n\t\t\tself.tmpDir = os.environ.get(\"TMPDIR\", \"\")\n\t\t\tself.tmpDir = os.environ.get(\"TMP\", self.tmpDir)\n\t\t\tself.tmpDir = os.environ.get(\"TEMP\", self.tmpDir)\n\t\tif not os.path.isdir(self.tmpDir):\n\t\t\tself.tmpDir = \"\"\n\t\telif not os.access(self.tmpDir, os.F_OK + os.W_OK):\n\t\t\tself.tmpDir = \"\"",
"def set_udfs(self):\n\n flowcell_type = self.process.all_inputs()[0].udf.get('Flowcell Type')\n\n for key, val in self.process_settings[flowcell_type].items():\n self.process.udf[key] = val\n self.process.put()\n\n for art in self.artifacts:\n for key, val in self.artifact_settings[flowcell_type].items():\n art.udf[key] = val\n art.put()",
"def _temp_file(self, val):\n fd, fn = tempfile.mkstemp()\n fp = os.fdopen(fd, \"wb\")\n if val:\n if not isinstance(val, bytes):\n fp.write(val.encode(\"utf-8\", \"surrogateescape\"))\n else:\n fp.write(val)\n fp.close()\n return fn",
"def test_temp_file_lock(tmp_path, monkeypatch):\n monkeypatch.setenv(\"RAY_TMPDIR\", str(tmp_path))\n assert str(tmp_path) in ray._private.utils.get_user_temp_dir()\n with TempFileLock(path=\"abc.txt\"):\n assert RAY_LOCKFILE_DIR in os.listdir(tmp_path)\n assert os.listdir(tmp_path / RAY_LOCKFILE_DIR)",
"def _get_temp_path(self):\n handle, path = tempfile.mkstemp()\n # windows can't write to a file that is already open by another process\n # (tests use pipe redirection to a log file)\n os.close(handle)\n return path",
"def setFastFile(self,fname):\n self.fst_file = fname",
"def temp_staff_speciality(self, temp_staff_speciality):\n\n self._temp_staff_speciality = temp_staff_speciality",
"def gradio_temp_dir(monkeypatch, tmp_path):\n monkeypatch.setenv(\"GRADIO_TEMP_DIR\", str(tmp_path))\n return tmp_path",
"def set_temp_entry(self, val):\n self.temp = val",
"def mark_assembly_as_temporary(self):\n self.assemblyIsTemporary = True",
"def __call__(self):\n for tmp_file in filter(lambda x: x.exists(), self.temp_files):\n tmp_file.unlink()\n\n for proc in self.processes:\n try:\n os.kill(proc, signal.SIGTERM)\n except ProcessLookupError:\n pass",
"def _temp_dir(self):\n tmp_dir = os.path.join(self.output_dir, self.config.find_tune[\"run_dir\"])\n try:\n os.makedirs(tmp_dir)\n except OSError:\n pass\n os.chdir(tmp_dir)\n self.tmp_dir = \"./\"",
"def stopWatchingFileSystem(self) :\n\n self.continueWatchingFS = False",
"def set_file_system( # pylint: disable=too-many-arguments\n self,\n user_open,\n user_close,\n user_read,\n user_seek,\n user_async_read,\n user_async_cancel,\n block_align=-1,\n ):\n self._call_fmod(\n \"FMOD_System_SetFileSystem\",\n FILE_OPEN_CALLBACK(user_open),\n FILE_CLOSE_CALLBACK(user_close),\n FILE_READ_CALLBACK(user_read),\n FILE_SEEK_CALLBACK(user_seek),\n FILE_ASYNCREAD_CALLBACK(user_async_read),\n FILE_ASYNCCANCEL_CALLBACK(user_async_cancel),\n block_align,\n )",
"def _get_tmp_file_path(self):\n return os.path.join(self.tmp_dir, self.hash)",
"def _create_temp_batch_file(self):\n return tempfile.NamedTemporaryFile(delete=False)",
"def setTemperature(self, temp):\n with self.lock:\n self.temp = temp",
"def secure_temp_dir(context):\n tmpd = tempfile.TemporaryDirectory()\n context.tempdir = tmpd",
"def prepare_filesystem(self):\n if self._is_filesystem_prepared:\n return True\n ms = self.getModelObj()\n if ms is None:\n self.warning(\"Could not prepare local filesystem to store macros\")\n return False\n ms_name = ms.getSimpleName().replace('/', '_')\n self._tmp_dir = tempfile.mkdtemp(prefix=ms_name, dir=self._base_tmp_dir)\n self._is_filesystem_prepared = True\n return True",
"def activateLocalFastPath() -> None:\n global _FAST_PATH, _FAST_PATH_IS_TEMPORARY, APP_DATA\n\n # Try to fix pathing issues in Windows.\n if os.name == \"nt\":\n APP_DATA = APP_DATA.replace(\"/\", \"\\\\\")\n\n _FAST_PATH = os.path.join(\n APP_DATA,\n \"{}{}-{}\".format(\n MPI_RANK,\n os.environ.get(\"PYTEST_XDIST_WORKER\", \"\"), # for parallel unit testing,\n datetime.datetime.now().strftime(\"%Y%m%d%H%M%S%f\"),\n ),\n )\n\n _FAST_PATH_IS_TEMPORARY = True",
"def StoreAntirollback(now, ar_filename, kern_f):\n print 'antirollback time now ' + str(now)\n sys.stdout.flush()\n kern_f.write(str(now))\n kern_f.flush()\n tmpdir = os.path.dirname(ar_filename)\n with tempfile.NamedTemporaryFile(mode='w', dir=tmpdir, delete=False) as f:\n f.write(str(now) + '\\n')\n f.flush()\n os.fsync(f.fileno())\n os.rename(f.name, ar_filename)"
] | [
"0.55050707",
"0.48121423",
"0.47581938",
"0.47221127",
"0.46554834",
"0.46197382",
"0.46076837",
"0.4597256",
"0.45485073",
"0.4537833",
"0.44469118",
"0.44351405",
"0.44220942",
"0.43939075",
"0.43752405",
"0.4364896",
"0.4347796",
"0.43418652",
"0.4332066",
"0.4307438",
"0.42888075",
"0.42857555",
"0.42801917",
"0.42564318",
"0.42353573",
"0.42320472",
"0.421839",
"0.41959223",
"0.41920725",
"0.41872355"
] | 0.8137083 | 0 |
Sets the user_unknown_origin_binary of this RuntimeAntiMalwareRule. | def user_unknown_origin_binary(self, user_unknown_origin_binary):
self._user_unknown_origin_binary = user_unknown_origin_binary | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def service_unknown_origin_binary(self, service_unknown_origin_binary):\n\n self._service_unknown_origin_binary = service_unknown_origin_binary",
"def set_unknown_user(self):\n self.objects[self.ids.TOPBAR].setText(\"User: <Unknown card>\")",
"def set_as_not_handled(self):\n if self.type == MessageTypes.AGENT:\n raise InvalidMessageTypeError(\n 'Cannot set not_handled as True when msg is of type Agent')\n self.not_handled = True",
"def set_as_handled(self):\n self.not_handled = False",
"def setUnknown(self, key, val):\n # type: (str, tp.Any)->None\n if 'unknown' not in self._ifAttributes:\n self._ifAttributes['unknown'] = {}\n self._ifAttributes['unknown'][key] = val",
"def suspicious_elf_headers(self, suspicious_elf_headers):\n\n self._suspicious_elf_headers = suspicious_elf_headers",
"def allow_undefined(self, value):\n self.is_allowUndefined = value\n return self",
"def ignoreUnimacroDirectoryInPathIfNotUserDirectory(self):\n key = 'IncludeUnimacroInPythonPath'\n Keys = list(self.userregnl.keys())\n\n if key in Keys:\n print(('clearing variable %s'% key))\n self.userregnl.delete(key)\n else:\n print(('was not set %s'% key))",
"def unknown(self):\n self.add_file_string('Unknown file')\n self.should_copy = False",
"def elf_images_none(self, elf_images_none):\n\n self._elf_images_none = elf_images_none",
"def run_unknown(self, line):\n pass",
"def elf_image_num_not(self, elf_image_num_not):\n\n self._elf_image_num_not = elf_image_num_not",
"def set_proxy_unsafe(self, proxy_string):\n\t\tself.__proxy = proxy_string",
"def set_state_unknown(value):\n\n global STATE_UNKNOWN\n STATE_UNKNOWN = value",
"def malicious(self):\n return self.probably_malicious",
"def elf_image_num_not_in(self, elf_image_num_not_in):\n\n self._elf_image_num_not_in = elf_image_num_not_in",
"def set_mask_bad(self, _=None):\n self.set_mask_type(\"bad\")",
"def ignore_local_proxy_environment_variables(self):\n self._ignore_local_proxy = True",
"def dd_origin(self, value):\n # type: (Optional[Text]) -> None\n with self._lock:\n if value is None:\n if ORIGIN_KEY in self._meta:\n del self._meta[ORIGIN_KEY]\n return\n self._meta[ORIGIN_KEY] = value",
"def _set_origin_value(self, origin):\n self.origin_value = origin",
"def set_remote(self, bRemote):\n\t\tcall_sdk_function('PrlVmDev_SetRemote', self.handle, bRemote)",
"def labelUnknown(self):\n self.satisfiability = Satisfiability.UNKNOWN\n self.model = None\n self.unsatCore = []",
"def __init__(self, ignore_unknown=True):\n self.ignore_unknown = ignore_unknown",
"def check_origin(self, origin):\n return True",
"def check_origin(self, origin):\n return True",
"def check_origin(self, origin):\n return True",
"def check_origin(self, origin):\n return True",
"def allow_unresolved_file_tokens(self):\n return self._allow_unresolved_file_tokens",
"def removeAllUserScripts(self):\n self.PDFreactorConfiguration.in1[\"UserScripts\"] = None",
"def set_unauthorized_mode(unauthorized):\n BonsaiWS._UNAUTHORIZED = unauthorized"
] | [
"0.7047199",
"0.5043389",
"0.47182634",
"0.46471974",
"0.45888796",
"0.45770887",
"0.45130247",
"0.45042887",
"0.4406569",
"0.44012833",
"0.43978664",
"0.43925205",
"0.433955",
"0.43356243",
"0.42788303",
"0.42750442",
"0.42617148",
"0.42207563",
"0.4216584",
"0.4216363",
"0.419537",
"0.41908142",
"0.41877455",
"0.41749758",
"0.41749758",
"0.41749758",
"0.41749758",
"0.41543484",
"0.4145625",
"0.413585"
] | 0.8767857 | 0 |
Sets the web_shell of this RuntimeAntiMalwareRule. | def web_shell(self, web_shell):
self._web_shell = web_shell | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shell(self, shell):\n\n self._shell = shell",
"def reverse_shell(self, reverse_shell):\n\n self._reverse_shell = reverse_shell",
"def use_shell(self, shell):\n return ShellContext(self, shell)",
"def use_shell(self):\n return self._shell",
"def set(self, shell=None):\n\n # iterate over the env variable objects and set them in the env\n for var in self._vars.itervalues():\n var.set(shell=shell)",
"def item_web_url(self, item_web_url):\n\n self._item_web_url = item_web_url",
"def _set_shell_obj(self, obj):\n self._shell_obj = weakref.ref(obj)",
"def handle_webdisable(bot, ievent):\n cfg.set('webenable', 0)\n cfg.save()\n if plugins.reload('gozerplugs', 'webserver'):\n ievent.reply('done')\n else:\n ievent.reply('error reloading webserver plugin')",
"def fast_web_view(self, fast_web_view):\n\n self._fast_web_view = fast_web_view",
"def web_id(self, web_id):\n\n self._web_id = web_id",
"def set(self, shell=None):\n os.environ[self.name] = str(self.value)\n if shell:\n print shell.set_env_var(self.name, self.value)",
"def get_shell(self, shell):",
"def init_shell(self):\n self.shell = PlayerTerminalInteractiveShell.instance(\n commands=self.commands,\n speed=self.speed,\n parent=self,\n display_banner=False,\n profile_dir=self.profile_dir,\n ipython_dir=self.ipython_dir,\n user_ns=self.user_ns,\n )\n self.shell.configurables.append(self)",
"def _channel_invoke_shell(self) -> None:\n self._shell = True\n self.channel.shell()",
"def do_shell(self, line):\n subprocess.call(line, shell=True)",
"def __init__(self, browser):\n super(Shell, self).__init__()\n self.browser = browser",
"def loginShell(self, shell=None):\n\n\t\tif shell is None:\n\t\t\traise exceptions.BadArgumentError(\n\t\t\t\t_(u'You must specify a shell'))\n\n\t\tif shell not in LMC.configuration.users.shells:\n\t\t\t\traise exceptions.BadArgumentError(_(u'Invalid shell \"{0}\". '\n\t\t\t\t\t'Valid shells are {1}.').format(stylize(ST_BAD, shell),\n\t\t\t\t\t', '.join(stylize(ST_COMMENT, shell)\n\t\t\t\t\t\tfor shell in LMC.configuration.users.shells)))\n\n\t\twith self.lock:\n\t\t\tself.__loginShell = shell\n\t\t\tself.serialize()\n\n\t\t\tLicornEvent('user_loginShell_changed', user=self.proxy).emit(priorities.LOW)\n\n\t\t\tlogging.notice(_(u'Changed user {0} shell to {1}.').format(\n\t\t\t\tstylize(ST_NAME, self.__login), stylize(ST_COMMENT, shell)))",
"def web(self):\n if not self.__web:\n self.__web = Web(self)\n return self.__web",
"def __init__(__self__, *,\n rule: pulumi.Input[str],\n se_linux_options: Optional[pulumi.Input['_core.v1.SELinuxOptionsArgs']] = None):\n pulumi.set(__self__, \"rule\", rule)\n if se_linux_options is not None:\n pulumi.set(__self__, \"se_linux_options\", se_linux_options)",
"def set_webex_para(self,\n webex_dict):\n\n self.__webex_flag__ = 1\n\n # webex teams\n self.__webex_url__ = webex_dict[\"webex_url\"]\n self.__webex_space__ = webex_dict[\"webex_space\"]\n self.__webex_token__ = webex_dict[\"webex_token\"]\n\n print(\"NotifyManager Webex Teams Space=%s\"\n % self.__webex_space__)\n print(\"NotifyManager Webex Teams URL=%s\"\n % self.__webex_url__)\n return",
"def do_shell(self, line):\n eval(line)",
"def web(self) -> Optional[pulumi.Input['ApplicationWebArgs']]:\n return pulumi.get(self, \"web\")",
"def web(self) -> Optional[pulumi.Input['ApplicationWebArgs']]:\n return pulumi.get(self, \"web\")",
"def set(self, subrule):\n self.__rule = subrule",
"def set_webhook(self, webhook):\n self.webhook = webhook\n return",
"def connect_browser_handler(self, wh):\n self.web_handler = wh",
"def web_site_status_enum(self, web_site_status_enum):\n\n self._web_site_status_enum = web_site_status_enum",
"def handle_webdefaultallow(bot, ievent):\n cfg.set('whitelistenable', 0)\n ievent.reply('ok')",
"def hide_web(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"hide_web\")",
"def hide_web(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"hide_web\")"
] | [
"0.5257477",
"0.50713885",
"0.5045325",
"0.50029993",
"0.49303526",
"0.4911177",
"0.47862864",
"0.47516662",
"0.47228616",
"0.47115886",
"0.46809548",
"0.46645325",
"0.46355623",
"0.4607884",
"0.45009336",
"0.44623724",
"0.44550326",
"0.4440212",
"0.44241655",
"0.44138148",
"0.43848598",
"0.43228215",
"0.43228215",
"0.4293564",
"0.42640227",
"0.4256712",
"0.42501205",
"0.42430124",
"0.42201146",
"0.42201146"
] | 0.76740456 | 0 |
Sets the wild_fire_analysis of this RuntimeAntiMalwareRule. | def wild_fire_analysis(self, wild_fire_analysis):
self._wild_fire_analysis = wild_fire_analysis | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tax_analysis(self, tax_analysis):\n\n self._tax_analysis = tax_analysis",
"def wildtype(self, wildtype):\n self._wildtype = wildtype\n self.add_binary()",
"def set_suppress_flow(self):\n self.suppressed = self.packet_count\n self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'suppressed': self.suppressed},})",
"def setAllowAnnotations(self,value):\n self.PDFreactorConfiguration.in1[\"allowAnnotations\"] = value",
"def mime_allowed(self, mime_allowed: ConfigNodePropertyArray):\n\n self._mime_allowed = mime_allowed",
"def set_smearing(self, smearing_Ha):\n self.smearing = smearing_Ha\n self.qptanalyzer.smearing = smearing_Ha",
"def shots_allowed(self, shots_allowed):\n\n self._shots_allowed = shots_allowed",
"def allow_warnings(self, allow_warnings):\n self._allow_warnings = allow_warnings",
"def set_has_fan(self, value: bool = True):\r\n self._logger.info(log_message_formatter(\r\n \"set\", f\"{self}\", \"has_fan\", value))\r\n self._has_fan = value",
"def sanitize_log(self, value: bool):\n self._sanitize = value",
"def allowance(self, allowance):\n\n self._allowance = allowance",
"def setSilent(self) -> None:\n ...",
"def set_mask_bad(self, _=None):\n self.set_mask_type(\"bad\")",
"def setWhitelist(self, w):\n return self._set(whitelist=w)",
"def frequency_reject(self, frequency_reject):\n self._frequency_reject = frequency_reject",
"def set_flammable(self, f):\n self.flammable = f",
"def setAllowUpscaling(self, allow):\n self._allow_upscaling = allow\n self.update()",
"def allow_shadowing(self, allow: bool) -> None:\n if not isinstance(allow, bool):\n raise TypeError(\"expected a boolean\")\n ffi.wasmtime_linker_allow_shadowing(self._ptr, allow)",
"def set_afferents_fr(self,fr):\n\t\t# Iterate over all dictionaries\n\t\tfor muscle in self.cells:\n\t\t\tfor cellName in self.cells[muscle]:\n\t\t\t\tif cellName in self._afferentsNames:\n\t\t\t\t\tfor cell in self.cells[muscle][cellName]:\n\t\t\t\t\t\tcell.set_firing_rate(fr[muscle][cellName])",
"def accepting_medicare_patients(self, accepting_medicare_patients):\n\n self._accepting_medicare_patients = accepting_medicare_patients",
"def update_analysis(self):\n # Read our analysis scripts into an internal structure\n self.analysis_list = Configs.import_analysis_scripts(\n self.analysis_directory)",
"def set_scanning_filter(self, **kwargs):\n warn(\n \"This method will be removed in a future version of Bleak. Use BleakScanner constructor args instead.\",\n FutureWarning,\n stacklevel=2,\n )\n self._backend.set_scanning_filter(**kwargs)",
"def handle_webdefaultdeny(bot, ievent):\n cfg.set('whitelistenable', 1)\n ievent.reply('ok')",
"def allowed(self, allowed):\n if allowed is None:\n raise ValueError(\"Invalid value for `allowed`, must not be `None`\") # noqa: E501\n\n self._allowed = allowed",
"def set_import_filter(self, regex: Union[str, re.Pattern], blacklist: bool = False):\n self._variables['IMPORT_FILTER'] = (\n re.compile(regex) if isinstance(regex, str) else regex,\n bool(blacklist)\n )",
"def setsignals(self, alarmMode):\n self.immediate = self._immediate_by_state[alarmMode].copy()\n self.delayed = self._delayed_by_state[alarmMode].copy()\n self.override = self._override_by_state[alarmMode].copy()\n self.ignored = set(self._allsensors) - (set(self.immediate) | set(self.delayed))",
"def setAllowAssembly(self,value):\n self.PDFreactorConfiguration.in1[\"allowAssembly\"] = value",
"def set_firerate(self, firerate):\n self._firerate = firerate",
"def SetAllowUpscaling(self, allow):\n self._allow_upscaling = allow\n self.Refresh()",
"def set_allow_replace(self, allow_replace):\n self.allow_replace = allow_replace"
] | [
"0.4805896",
"0.47317153",
"0.46334144",
"0.45518073",
"0.45493808",
"0.45445997",
"0.4506532",
"0.4447285",
"0.44388187",
"0.43485162",
"0.43185204",
"0.4318399",
"0.4295372",
"0.42945984",
"0.42902604",
"0.42520434",
"0.42520025",
"0.42445207",
"0.42425737",
"0.4240613",
"0.42331418",
"0.42164958",
"0.42115003",
"0.42077187",
"0.4195197",
"0.41768697",
"0.4174435",
"0.4172374",
"0.41721457",
"0.41687012"
] | 0.8094492 | 0 |
Find and attach everything in the scene which has an oceanHeightHook attribute to the ocean | def connectAllWithOceanHeightAttr(object = ''):
inprogressBar = pbui.ProgressBarUI(title = 'Hooking To Ocean:')
inprogressBar.show()
inprogressBar.updateProgress(percent = 0, doingWhat = 'Hooking everything to ocean now...')
debug(None, method = 'connectAllWithOceanHeightAttr', message = 'Rebuilding Ocean Hooks', verbose = False)
## Find all in scene with oceanHeightHook Attribute
## Setup the progress bar
if not cmds.objExists('Shot_FX_hrc'):
cmds.group(n = 'Shot_FX_hrc', em = True)
if not cmds.objExists('BOAT_OceanLocators_hrc'):
cmds.group(n = 'BOAT_OceanLocators_hrc', em = True)
cmds.parent('BOAT_OceanLocators_hrc', 'Shot_FX_hrc')
cmds.setAttr('BOAT_OceanLocators_hrc.visibility', 0)
## Clean up existing hooks in the scene
inprogressBar.updateProgress(percent = 50, doingWhat = 'Cleaning up now..')
if object.startswith('All'):
object = ''
elif object.startswith('Dock'):
object = 'BLD'
elif object.startswith('Boat'):
object = 'CHAR'
elif object.startswith('Prop'):
object = 'PROP'
cleanUpExisting(filters = [object])
if object == 'BLD':
## Clean-up the ADEF BLD's oceanHook control animatable groups...
[cmds.delete(transform) for transform in cmds.ls(type = 'transform') if transform.endswith('_oceanCtrls_hrc')]
## Rebuild / build the hooks for boats in the scene
inprogressBar.updateProgress(percent = 75, doingWhat = 'performHookBoatsToOcean now..')
startTime = time.time()
performHookBoatsToOcean()
print 'TOTAL TIME FOR performHookBoatsToOcean: %s' % (time.time()-startTime)
## Now make sure cycle check is off because it's a fn pita
cmds.cycleCheck(e = 0)
_finished(inprogressBar) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter(self):\n new_nodes_to_update = {}\n nodes_to_update = {}\n\n for agent_id in self.cameras.keys():\n nodes_to_update[agent_id] = []\n new_nodes_to_update[agent_id] = []\n if agent_id not in self.beliefs:\n world_name = self.cameras[agent_id].name.replace(\"-\",\"_\")+\"_beliefs\"\n rospy.logdebug(\"[perspective_filter] create new world <%s>\" % str(world_name))\n self.beliefs[agent_id] = self.ctx.worlds[world_name]\n self.node_mapping[agent_id] = {}\n\n dq = deque()\n dq.append(self.source.scene.rootnode)\n\n while not rospy.is_shutdown() and 0 < len(dq):\n node = dq.pop()\n if node.id != self.source.scene.rootnode.id:\n # Process start here\n if node.id in self.cameras.keys(): # if the node is the agent POV\n nodes_to_update[node.id].append(node) # we add it to his belief\n\n if node.parent in self.cameras.keys() and node.type == MESH: # if the node is part of an agent\n nodes_to_update[node.parent].append(node) # we add it to his belief\n\n for agent_id, visible_nodes in self.visible_nodes.items(): # then we add the visible nodes\n if agent_id in self.cameras.keys():\n if node in visible_nodes:\n nodes_to_update[agent_id].append(node)\n\n # And end here\n for child_id in node.children:\n dq.append(self.source.scene.nodes[child_id])\n\n for agent_id, nodes in nodes_to_update.items():\n if nodes:\n for node in nodes:\n new_node = node.copy()\n if node.id in self.node_mapping[agent_id]:\n new_node.id = self.node_mapping[agent_id][node.id]\n if new_node.id in self.nodes_transform:\n if not numpy.allclose(self.nodes_transform[new_node.id], new_node.transformation):\n new_nodes_to_update[agent_id].append(new_node)\n self.nodes_transform[new_node.id] = new_node.transformation\n else:\n self.nodes_transform[new_node.id] = new_node.transformation\n new_nodes_to_update[agent_id].append(new_node)\n else:\n self.node_mapping[agent_id][node.id] = new_node.id\n new_nodes_to_update[agent_id].append(new_node)\n\n # Finally we update the corresponding beliefs worlds\n for agent_id, nodes in new_nodes_to_update.items():\n for node in nodes:\n node.parent = self.node_mapping[agent_id][node.parent] if node.parent in self.node_mapping[agent_id] \\\n else self.beliefs[agent_id].scene.rootnode.id\n if nodes:\n self.beliefs[agent_id].scene.nodes.update(nodes)",
"def add_heights():\n fac_min = 50\n fac_max = 40\n\n print 'Get lands and oceans'\n t = time.time()\n lands, oceans = get_lands_oceans()\n print 'lands and oceans getted: ', time.time() - t\n\n # TODO: create one def with params: mount_level and other for create heights\n # add default heights\n for coord in lands:\n self[coord] = self.config.land_mount_level[1]\n\n for coord in oceans:\n self[coord] = -self.config.mid_mount_level[1]\n\n # add low heights for lands\n count_land = int(round(len(lands) * config.factor_low_mount / 100.))\n land_coords = []\n\n starts = random.randint(count_land / fac_min, count_land / fac_max)\n for start in xrange(starts):\n start_coord = lands[random.randint(0, len(lands)-1)]\n land_coords.append(start_coord)\n self[start_coord] = random.randint(self.config.low_mount_level[0], self.config.low_mount_level[1])\n\n while count_land > 0:\n # for lands\n if count_land > 0:\n dx = random.randint(-1,1)\n dy = random.randint(-1,1)\n coord = land_coords[random.randint(0, len(land_coords) - 1)]\n coord = coord[0] + dx, coord[1] + dy\n if coord not in land_coords:\n self[coord] = random.randint(self.config.low_mount_level[0], self.config.low_mount_level[1])\n land_coords.append(coord)\n count_land -= 1\n\n\n target_lands = land_coords\n\n # -------------------------------------------------------------------------------\n # add mid heights for lands\n count_land = int(round(len(target_lands) * (config.factor_mid_mount / 100.)))\n land_coords = []\n\n starts = random.randint(count_land / (fac_min * 3), count_land / (fac_max*3))\n for start in xrange(starts):\n start_coord = target_lands[random.randint(0, len(target_lands)-1)]\n land_coords.append(start_coord)\n self[start_coord] = random.randint(self.config.mid_mount_level[0],\n self.config.mid_mount_level[1])\n\n if land_coords == []:\n return\n\n while count_land > 0:\n # for lands\n if count_land > 0:\n dx = random.randint(-1,1)\n dy = random.randint(-1,1)\n coord = land_coords[random.randint(0, len(land_coords) - 1)]\n coord = coord[0] + dx, coord[1] + dy\n #if coord not in land_coords:\n self[coord] = random.randint(self.config.mid_mount_level[0],\n self.config.mid_mount_level[1])\n land_coords.append(coord)\n count_land -= 1\n\n\n target_lands = land_coords\n\n\n # -------------------------------------------------------------------------------\n # add high heights for lands\n count_land = int(round(len(target_lands) * (config.factor_high_mount / 100.)))\n land_coords = []\n\n starts = random.randint(count_land / (fac_min * 4), count_land / (fac_max * 3))\n for start in xrange(starts):\n start_coord = target_lands[random.randint(0, len(target_lands)-1)]\n land_coords.append(start_coord)\n self[start_coord] = random.randint(self.config.high_mount_level[0],\n self.config.high_mount_level[1])\n\n while count_land > 0:\n # for lands\n if count_land > 0:\n dx = random.randint(-1,1)\n dy = random.randint(-1,1)\n try:\n coord = land_coords[random.randint(0, len(land_coords) - 1)]\n except ValueError:\n coord = lands[random.randint(0, len(lands) - 1)]\n coord = coord[0] + dx, coord[1] + dy\n #if coord not in land_coords:\n self[coord] = random.randint(self.config.high_mount_level[0],\n self.config.high_mount_level[1])\n land_coords.append(coord)\n count_land -= 1",
"def add_heights(self, lon_lat):\n lon_lat_msl = self.msl_nn_srtm_interp(lon_lat)\n return self.msl_to_wgs84(lon_lat_msl)",
"def _on_hires_assets(self):\n\n scene_assets = artellapipe.AssetsMgr().get_scene_assets()\n if not scene_assets:\n return\n\n for scene_asset in scene_assets:\n scene_asset.switch_to_hires()",
"def height(self):\n return len(self.mine_map)",
"def height(self) -> int:",
"def height(self) -> int:",
"def height(self) -> int:",
"def regenerate_heightmap(self):\n\n for x in range(16):\n for z in range(16):\n column = x * 16 + z\n for y in range(255, -1, -1):\n if self.get_block((x, y, z)):\n break\n\n self.heightmap[column] = y",
"def performHookBoatsToOcean(oceanShader = CONST.OCEANANIMSHADER, interactiveOceanShader = CONST.OCEANINTERACTIVESHADER):\n\t## cleanup the old build if it exists\n\t_removeLegacyOceanHooks()\n\n\t## Find all hooks in the scene...\n\tboatHookList = getBoatHooks()\n\tdebug(None, method = 'performHookBoatsToOcean', message = 'boatHookList: %s' % boatHookList, verbose = False)\n\n\t## If we have a valid list of hooks in the scene proceed....\n\tif boatHookList:\n\t\tfor eachBoatHook in boatHookList:\n\t\t\tif ':' in eachBoatHook:\n\t\t\t\tboatName = eachBoatHook.split(':')[0]\n\t\t\t\tworldCtrl = '%s:world_ctrl' % boatName\n\t\t\t\toceanLocName = '%s_boatOceanLoc' % boatName\n\t\t\t\tcharOceanLock = '%s:oceanLock' % boatName\n\t\t\telse:\n\t\t\t\tboatName = ''\n\t\t\t\tworldCtrl = 'world_ctrl'\n\t\t\t\toceanLocName = 'temp_boatOceanLoc'\n\t\t\t\tcharOceanLock = 'oceanLock'\n\n\t\t\tdebug(None, method = 'performHookBoatsToOcean', message = 'boatName: %s' % boatName, verbose = False)\n\t\t\tdebug(None, method = 'performHookBoatsToOcean', message = 'worldCtrl: %s' % worldCtrl, verbose = False)\n\t\t\tdebug(None, method = 'performHookBoatsToOcean', message = 'oceanLocName: %s' % oceanLocName, verbose = False)\n\t\t\tdebug(None, method = 'performHookBoatsToOcean', message = 'charOceanLock: %s' % charOceanLock, verbose = False)\n\n\t\t\t###################\n\t\t\t###### START BUILD\n\t\t\t###################\n\t\t\tif not cmds.objExists(oceanLocName):\n\t\t\t\tstartmyLocator = time.time()\n\t\t\t\t_buildMayaBoatOceanLocator()\n\n\t\t\t\t## Now perform a check to find the locator name, because maya has the dumb and doesn't allow us to make this with a name cleanly\n\t\t\t\tmyLocator = [myLoc for myLoc in cmds.ls(type = 'transform') if cmds.objExists('%s.buoyancy' % myLoc) and 'locator' in myLoc]\n\t\t\t\tdebug(None, method = 'performHookBoatsToOcean', message = 'myLocator: %s' % myLocator, verbose = False)\n\n\t\t\t\t## If we have a newly made maya boat locator lets work on it...\n\t\t\t\tif myLocator:\n\t\t\t\t\tcmds.rename(myLocator, oceanLocName)\n\t\t\t\t\tdebug(None, method = 'performHookBoatsToOcean', message = 'myLocator: %s renamed to : %s' % (myLocator, oceanLocName), verbose = False)\n\n\t\t\t\t\t## Add the new attrs tot the world_ctrl\n\t\t\t\t\tif not isAssemblyRef(oceanLocName) and 'ADef' not in oceanLocName:\n\t\t\t\t\t\t_addAttrsToWorldCtrl(worldCtrl)\n\n\t\t\t\t\t## Now rename the expression\n\t\t\t\t\tstart = time.time()\n\t\t\t\t\tconnectedExpressions = [exp for exp in cmds.listConnections(oceanLocName, source =True) if cmds.nodeType(exp) == 'expression']\n\t\t\t\t\tif connectedExpressions:\n\t\t\t\t\t\texp = list(set(connectedExpressions))\n\t\t\t\t\t\t## DELETE THE EXPRESSION AND BUILD A NEW ONE WITH THE RIGHT FCKN NAMES IN IT!!!!\n\t\t\t\t\t\tcmds.delete(exp)\n\n\t\t\t\t\t\t## Define the expression:\n\t\t\t\t\t\tlocShapeName = '%sShape' % oceanLocName\n\t\t\t\t\t\tdebug(None, method = 'performHookBoatsToOcean', message = 'locShapeName: %s' % locShapeName, verbose = False)\n\n\t\t\t\t\t\t## Now build the expression for the locator\n\t\t\t\t\t\tdebug(None, method = 'performHookBoatsToOcean', message = 'Now build the expression for the locator...', verbose = False)\n\t\t\t\t\t\tif not isAssemblyRef(oceanLocName) and 'ADef' not in oceanLocName:\n\t\t\t\t\t\t\t_buildExpression(locShapeName, oceanLocName, oceanShader, boatName)\n\t\t\t\t\t\t\tdebug(None, method = 'performHookBoatsToOcean', message = '_buildExpression success..', verbose = False)\n\t\t\t\t\tprint 'TIME to process connected expressions: %s' % (time.time()-start)\n\n\t\t\t\t\t## Now parent it to the right group\n\t\t\t\t\tstart = time.time()\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcmds.parent(oceanLocName, 'BOAT_OceanLocators_hrc')\n\t\t\t\t\t\tdebug(None, method = 'performHookBoatsToOcean', message = 'parent to BOAT_OceanLocators_hrc success...', verbose = False)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\t\t\t\t\tprint 'TIME to parent: %s' % (time.time()-start)\n\n\t\t\t\tprint 'Total time to build default ocean boat locator: %s' % (time.time()-startmyLocator)\n\n\t\t\t\tstart = time.time()\n\t\t\t\t## Delete the current legacy expressions in the Y axis of the charOceanLocator setup\n\t\t\t\tconnectedExpressions = [exp for exp in cmds.listConnections(charOceanLock, source =True) if cmds.nodeType(exp) == 'expression']\n\t\t\t\tdebug(None, method = 'performHookBoatsToOcean', message = 'connectedExpressions:%s' % connectedExpressions, verbose = False)\n\t\t\t\tif connectedExpressions:\n\t\t\t\t\texp = list(set(connectedExpressions))\n\t\t\t\t\tcmds.delete(exp)\n\t\t\t\t\tdebug(None, method = 'performHookBoatsToOcean', message = 'Deleted ... %s' % exp , verbose = False)\n\n\t\t\t\tif not isAssemblyRef(oceanLocName) and 'ADef' not in oceanLocName:\n\t\t\t\t\tdebug(None, method = 'performHookBoatsToOcean', message = 'Point Constraint now for %s' % oceanLocName , verbose = False)\n\t\t\t\t\t## Now connect the oceanLocName locator X and Z to the world ctrl so that this locator follows the world ctrl around\n\t\t\t\t\t## This locator drives the Up and Down of the world ctrl.\n\t\t\t\t\tcmds.pointConstraint(worldCtrl, oceanLocName, skip = ['y'], mo = False, n = '%s_%s_PointConstraint' % (worldCtrl, oceanLocName))\n\t\t\t\t\tdebug(None, method = 'performHookBoatsToOcean', message = 'Point Constraint Success...' , verbose = False)\n\t\t\t\telse:\n\t\t\t\t\t## Ensure oceanLock translation Y is reset back to 0 because it will cause some minor offset Y value between the 2 locators\n\t\t\t\t\tcmds.setAttr('%s.translateY' % eachBoatHook, 0) # This is essential line for both locators to match 100%\n\n\t\t\t\t\t## Create a ctrl grp for the BLD\n\t\t\t\t\tassRefCtrlGrp = '%s_oceanCtrls_hrc' % boatName\n\t\t\t\t\tif not cmds.objExists(assRefCtrlGrp):\n\t\t\t\t\t\tassRefCtrlGrp = cmds.group(n = assRefCtrlGrp, em = True)\n\t\t\t\t\t\tcmds.parent(assRefCtrlGrp, 'BOAT_OceanLocators_hrc')\n\n\t\t\t\t\t## Now add the attrs to it.\n\t\t\t\t\tfor key, var in _getAttrsToAdd().items():\n\t\t\t\t\t\tif not cmds.objExists('%s.%s' % (assRefCtrlGrp, key)):\n\t\t\t\t\t\t\tdebug(None, method = 'performHookBoatsToOcean', message = 'Adding attr: %s.%s' % (assRefCtrlGrp, key), verbose = False)\n\t\t\t\t\t\t\tcmds.addAttr(assRefCtrlGrp, ln = key, at = 'double', min = var[0], max = var[1], dv = var[2])\n\t\t\t\t\t\t\tcmds.setAttr('%s.%s' % (assRefCtrlGrp, key), keyable = True)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tdebug(None, method = 'performHookBoatsToOcean', message = 'Skipping attr %s.%s exists already' % (assRefCtrlGrp, key), verbose = False)\n\n\t\t\t\t\t### Put the darn locator for this thing in the right world space\n\t\t\t\t\tgetLocation = cmds.xform(eachBoatHook, query = True, pivots = True, ws = True)\n\t\t\t\t\tcmds.setAttr('%s.translateX' % oceanLocName, getLocation[0])\n\t\t\t\t\tcmds.setAttr('%s.translateY' % oceanLocName, getLocation[1])\n\t\t\t\t\tcmds.setAttr('%s.translateZ' % oceanLocName, getLocation[2])\n\t\t\t\t\tcmds.makeIdentity(oceanLocName, apply = True, t = 1, r = 1, s = 1, n = 0)\n\n\t\t\t\t\t## Now build the expression for the assembly Ref loc\n\t\t\t\t\t_buildExpression(locShapeName, oceanLocName, oceanShader, boatName)\n\t\t\t\t### Now connect the referenced charName:oceanLock.translateY to the new ocean locator which will in turn move the world_ctrl accordingly as a result\n\t\t\t\tcmds.connectAttr('%s.translateY' % oceanLocName, '%s.translateY' % charOceanLock, f = True)\n\n\t\t\t\t## Now connect the orientation for oceanLocName locator to the charOceanLock locator which will in turn move the world_ctrl accordingly as a result\n\t\t\t\ttry:\n\t\t\t\t\tcmds.connectAttr('%s.rotateX' % oceanLocName, '%s.rotateX' % charOceanLock, f = True)\n\t\t\t\texcept RuntimeError:\n\t\t\t\t\tpass\n\t\t\t\ttry:\n\t\t\t\t\tcmds.connectAttr('%s.rotateY' % oceanLocName, '%s.rotateY' % charOceanLock, f = True)\n\t\t\t\texcept RuntimeError:\n\t\t\t\t\tpass\n\t\t\t\ttry:\n\t\t\t\t\tcmds.connectAttr('%s.rotateZ' % oceanLocName, '%s.rotateZ' % charOceanLock, f = True)\n\t\t\t\texcept RuntimeError:\n\t\t\t\t\tpass\n\n\t\t\t\t### Now connect the world_ctrl attrs to the locators\n\t\t\t\tfor key, var in _getAttrsToAdd().items():\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif not isAssemblyRef(oceanLocName) and 'ADef' not in oceanLocName:\n\t\t\t\t\t\t\tcmds.connectAttr('%s.%s' % (worldCtrl, key), '%s.%s' % (oceanLocName, key), f = True)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcmds.connectAttr('%s.%s' % ('%s_oceanCtrls_hrc' % boatName, key), '%sShape.%s' % (oceanLocName, key), f = True)\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\n\t\t\t\t## Get / Set transform limit of oceanLock\n\t\t\t\tfor x in ['translationX', 'translationY', 'translationZ', 'rotationX', 'rotationY', 'rotationZ']:\n\t\t\t\t\ttransformEnabler = eval( 'cmds.transformLimits(\"%s\", enable%s%s = True, q = True)' %(eachBoatHook, x[0].upper(), x[1:]) )\n\t\t\t\t\ttransformValue = eval( 'cmds.transformLimits(\"%s\", %s = True, q = True)' %(eachBoatHook, x) )\n\t\t\t\t\teval( 'cmds.transformLimits(\"%s\", enable%s%s = %s, %s = %s)' %(oceanLocName, x[0].upper(), x[1:], transformEnabler, x, transformValue) )\n\t\t\t\t\teval( 'cmds.transformLimits(\"%s\", enable%s%s = %s, %s = %s)' %(oceanLocName, x[0].upper(), x[1:], transformEnabler, x, transformValue) )\n\t\t\t\tprint 'Time to cleanup expressions and prop and assembly ref locs: %s' % (time.time()-start)\n\t\t\telse:\n\t\t\t\tprint \"Skipping %s already in scene...\" % oceanLocName",
"def add_altitude():\n\n doc = Metashape.app.document\n if not len(doc.chunks):\n raise Exception(\"No chunks!\")\n\n # alt = Metashape.app.getFloat(\"Please specify the height to be added:\", 100)\n alt = float(sys.argv[1])\n\n\n chunk = doc.chunk\n\n for camera in chunk.cameras:\n if camera.reference.location:\n coord = camera.reference.location\n camera.reference.location = Metashape.Vector([coord.x, coord.y, coord.z + alt])\n print(\"Add : \"+str(sys.argv[1]))",
"def on_body_height_add(self, val):\n val = max(0, int(val))\n self.mdl.cmp.s_add_height = val\n self.refresh_svg_canvas()",
"def analyze(self, event):\n jets = Collection(event, \"Jet\")\n\n BTagWeightN = 1.0\n BTagWeightN_up = 1.0\n BTagWeightN_down = 1.0\n BTagWeightN_FS = 1.0\n BTagWeightN_up_FS = 1.0\n BTagWeightN_down_FS = 1.0\n BTagWeightD = 1.0\n BTagWeightNHeavy = 1.0\n BTagWeightNHeavy_up = 1.0\n BTagWeightNHeavy_down = 1.0\n BTagWeightNHeavy_FS = 1.0\n BTagWeightNHeavy_up_FS = 1.0\n BTagWeightNHeavy_down_FS = 1.0\n BTagWeightDHeavy = 1.0\n BTagWeightNLight = 1.0\n BTagWeightNLight_FS = 1.0\n BTagWeightNLight_up = 1.0\n BTagWeightNLight_up_FS= 1.0\n BTagWeightNLight_down = 1.0\n BTagWeightNLight_down_FS = 1.0\n BTagWeightDLight = 1.0\n\n for jet in jets:\n pt = jet.pt\n eta = abs(jet.eta)\n flavor = jet.hadronFlavour\n\n if not ( pt > self.jetPtMin and eta < self.jetEtaMax): continue\n\n if flavor == 5:\n pt_bin = self.h_eff_b.GetXaxis().FindBin(pt); \n if pt_bin > self.h_eff_b.GetXaxis().GetNbins():\n pt_bin = self.h_eff_b.GetXaxis().GetNbins(); \n eta_bin = self.h_eff_b.GetYaxis().FindBin(eta); \n if eta_bin > self.h_eff_b.GetYaxis().GetNbins():\n eta_bin = self.h_eff_b.GetYaxis().GetNbins();\n\n eff = self.h_eff_b.GetBinContent(pt_bin, eta_bin);\n\n elif flavor == 4:\n pt_bin = self.h_eff_c.GetXaxis().FindBin(pt); \n if pt_bin > self.h_eff_c.GetXaxis().GetNbins():\n pt_bin = self.h_eff_c.GetXaxis().GetNbins(); \n eta_bin = self.h_eff_c.GetYaxis().FindBin(eta); \n if eta_bin > self.h_eff_c.GetYaxis().GetNbins():\n eta_bin = self.h_eff_c.GetYaxis().GetNbins();\n\n eff = self.h_eff_c.GetBinContent(pt_bin, eta_bin);\n\n else:\n pt_bin = self.h_eff_udsg.GetXaxis().FindBin(pt); \n if pt_bin > self.h_eff_udsg.GetXaxis().GetNbins():\n pt_bin = self.h_eff_udsg.GetXaxis().GetNbins(); \n eta_bin = self.h_eff_udsg.GetYaxis().FindBin(eta); \n if eta_bin > self.h_eff_udsg.GetYaxis().GetNbins():\n eta_bin = self.h_eff_udsg.GetYaxis().GetNbins();\n\n eff = self.h_eff_udsg.GetBinContent(pt_bin, eta_bin);\n \n if self.FastSim:\n btagSF = jet.btagSF\n btagSF_FS=jet.btagSF_FS\n btagSF_up_FS = jet.btagSF_FS_up\n btagSF_down_FS = jet.btagSF_FS_down\n btagSF_down = jet.btagSF_down\n btagSF_up = jet.btagSF_up\n else:\n btagSF = jet.btagSF\n btagSF_FS= 1.0\n btagSF_up = jet.btagSF_up\n btagSF_down = jet.btagSF_down\n btagSF_up_FS = 1.0\n btagSF_down_FS = 1.0\n \n if jet.btagDeepB > self.bDiscCut:\n #check if eff is zero\n if eff < 0.001:\n eff = 0.001\n \n BTagWeightN *= btagSF * eff\n BTagWeightN_FS *= btagSF_FS * eff\n BTagWeightN_up *= btagSF_up * eff\n BTagWeightN_down *= btagSF_down * eff\n BTagWeightN_up_FS *= btagSF_up_FS * eff\n BTagWeightN_down_FS *= btagSF_down_FS * eff\n\n if abs(flavor) == 5:\n BTagWeightNHeavy *= btagSF * eff\n BTagWeightNHeavy_FS *= btagSF_FS * eff\n BTagWeightNHeavy_up *= btagSF_up * eff\n BTagWeightNHeavy_down *= btagSF_down * eff\n BTagWeightNHeavy_up_FS *= btagSF_up_FS * eff\n BTagWeightNHeavy_down_FS *= btagSF_down_FS * eff\n BTagWeightDHeavy *= eff\n else:\n BTagWeightNLight *= btagSF * eff\n BTagWeightNLight_FS *= btagSF_FS * eff\n BTagWeightNLight_up *= btagSF_up * eff\n BTagWeightNLight_down *= btagSF_down * eff\n BTagWeightNLight_up_FS *= btagSF_up_FS * eff\n BTagWeightNLight_down_FS *= btagSF_down_FS * eff\n BTagWeightDLight *= eff\n\n BTagWeightD *= eff\n else:\n #check if eff is 1.0\n if eff > 0.999:\n eff = 0.999\n\n BTagWeightN *= 1 - btagSF * eff\n BTagWeightN_FS *= 1 - btagSF_FS * eff\n BTagWeightN_up *= 1 - btagSF_up * eff\n BTagWeightN_down *= 1 - btagSF_down * eff\n BTagWeightN_up_FS *= 1 - btagSF_up_FS * eff\n BTagWeightN_down_FS *= 1 - btagSF_down_FS * eff\n\n if abs(flavor) == 5:\n BTagWeightNHeavy *= 1 - btagSF * eff\n BTagWeightNHeavy_FS *= 1 - btagSF_FS * eff\n BTagWeightNHeavy_up *= 1 - btagSF_up * eff\n BTagWeightNHeavy_down *= 1 - btagSF_down * eff\n BTagWeightNHeavy_up_FS *= 1 - btagSF_up_FS * eff\n BTagWeightNHeavy_down_FS *= 1 - btagSF_down_FS * eff\n BTagWeightDHeavy *= 1 - eff\n else:\n BTagWeightNLight *= 1 - btagSF * eff\n BTagWeightNLight_FS *= 1 - btagSF_FS * eff\n BTagWeightNLight_up *= 1 - btagSF_up * eff\n BTagWeightNLight_up_FS *= 1 - btagSF_up_FS * eff\n BTagWeightNLight_down *= 1 - btagSF_down * eff\n BTagWeightNLight_down_FS *= 1 - btagSF_down_FS * eff\n BTagWeightDLight *= 1 - eff\n\n BTagWeightD *= 1 - eff\n \n if self.FastSim:\n self.out.fillBranch(\"BTagWeight_FS\", BTagWeightN_FS / BTagWeightD)\n self.out.fillBranch(\"BTagWeight_Up_FS\", BTagWeightN_up_FS / BTagWeightD)\n self.out.fillBranch(\"BTagWeight_Down_FS\", BTagWeightN_down_FS / BTagWeightD)\n self.out.fillBranch(\"BTagWeightHeavy_FS\", BTagWeightNHeavy_FS / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightHeavy_Up_FS\", BTagWeightNHeavy_up_FS / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightHeavy_Down_FS\", BTagWeightNHeavy_down_FS / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightLight_FS\", BTagWeightNLight_FS / BTagWeightDLight)\n self.out.fillBranch(\"BTagWeightLight_Up_FS\", BTagWeightNLight_up_FS / BTagWeightDLight)\n self.out.fillBranch(\"BTagWeightLight_Down_FS\", BTagWeightNLight_down_FS / BTagWeightDLight)\n self.out.fillBranch(\"BTagWeight\", BTagWeightN / BTagWeightD)\n self.out.fillBranch(\"BTagWeight_Up\", BTagWeightN_up / BTagWeightD)\n self.out.fillBranch(\"BTagWeight_Down\", BTagWeightN_down / BTagWeightD)\n self.out.fillBranch(\"BTagWeightHeavy\", BTagWeightNHeavy / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightHeavy_Up\", BTagWeightNHeavy_up / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightHeavy_Down\", BTagWeightNHeavy_down / BTagWeightDHeavy)\n self.out.fillBranch(\"BTagWeightLight\", BTagWeightNLight / BTagWeightDLight)\n self.out.fillBranch(\"BTagWeightLight_Up\", BTagWeightNLight_up / BTagWeightDLight)\n self.out.fillBranch(\"BTagWeightLight_Down\", BTagWeightNLight_down / BTagWeightDLight)\n return True",
"def _setup_children(self):\n\n for i in xrange(self._nhalos):\n self._halos[i + 1].properties['children'] = []\n\n for i in xrange(self._nhalos):\n host = self._halos[i + 1].properties.get('hostHalo', -2)\n if host > -1:\n try:\n self._halos[host + 1].properties['children'].append(i + 1)\n except KeyError:\n pass",
"def setup_terrain(self):\r\n self.terrain_scale = LVector3(512, 512, 100)\r\n self.terrain_pos = LVector3(-256, -256, -70)\r\n # sample values for a 4096 x 4096px heightmap.\r\n #self.terrain_scale = LVector3(4096, 4096, 1000)\r\n #self.terrain_pos = LVector3(-2048, -2048, -70)\r\n \"\"\"\r\n Diamond_subdivision is an alternating triangulation scheme and may\r\n produce better results.\r\n \"\"\"\r\n use_diamond_subdivision = True\r\n \r\n \"\"\"\r\n Construct the terrain\r\n Without scaling, any ShaderTerrainMesh is 1x1x1 units.\r\n \"\"\"\r\n self.terrain_node = ShaderTerrainMesh()\r\n \"\"\"\r\n Set a heightfield, the heightfield should be a 16-bit png and\r\n have a quadratic size of a power of two.\r\n \"\"\"\r\n heightfield = Texture()\r\n heightfield.read(self.heightfield_fn)\r\n heightfield.set_keep_ram_image(True) \r\n self.terrain_node.heightfield = heightfield\r\n \r\n # Display characteristic values of the heightfield texture\r\n #minpoint, maxpoint, avg = LPoint3(), LPoint3(), LPoint3()\r\n #heightfield.calc_min_max(minpoint, maxpoint)\r\n #heightfield.calc_average_point(avg, 0.5, 0.5, 0.5)\r\n #print(\"avg: {} min: {} max: {}\".format(avg.x, minpoint.x, maxpoint.x))\r\n\r\n \"\"\"\r\n Set the target triangle width. For a value of 10.0 for example,\r\n the ShaderTerrainMesh will attempt to make every triangle 10 pixels\r\n wide on screen.\r\n \"\"\"\r\n self.terrain_node.target_triangle_width = 10.0\r\n if use_diamond_subdivision:\r\n \"\"\"\r\n This has to be specified before calling .generate()\r\n The default is false.\r\n \"\"\"\r\n load_prc_file_data(\"\", \"stm-use-hexagonal-layout true\")\r\n \r\n self.terrain_node.generate()\r\n \"\"\"\r\n Attach the terrain to the main scene and set its scale. With no scale\r\n set, the terrain ranges from (0, 0, 0) to (1, 1, 1)\r\n \"\"\"\r\n self.terrain = self.render.attach_new_node(self.terrain_node)\r\n self.terrain.set_scale(self.terrain_scale)\r\n self.terrain.set_pos(self.terrain_pos)\r\n \"\"\"\r\n Set a vertex and a fragment shader on the terrain. The\r\n ShaderTerrainMesh only works with an applied shader.\r\n \"\"\"\r\n terrain_shader = Shader.load(Shader.SL_GLSL, \r\n \"samples/shader-terrain/terrain.vert.glsl\", \r\n \"samples/shader-terrain/terrain.frag.glsl\")\r\n self.terrain.set_shader(terrain_shader)\r\n self.terrain.set_shader_input(\"camera\", base.camera)\r\n # Set some texture on the terrain\r\n grass_tex = self.loader.load_texture(\r\n \"samples/shader-terrain/textures/grass.png\")\r\n grass_tex.set_minfilter(SamplerState.FT_linear_mipmap_linear)\r\n grass_tex.set_anisotropic_degree(16)\r\n self.terrain.set_texture(grass_tex)\r\n\r\n \"\"\"\r\n Set up the DynamicHeightfield (it's a type of PfmFile). We load the\r\n same heightfield image as with ShaderTerrainMesh.\r\n \"\"\"\r\n self.DHF = DynamicHeightfield()\r\n self.DHF.read(self.heightfield_fn)\r\n \"\"\"\r\n Set up empty PfmFiles to prepare stuff in that is going to\r\n dynamically modify our terrain.\r\n \"\"\"\r\n self.StagingPFM = PfmFile()\r\n self.RotorPFM = PfmFile()\r\n \r\n \"\"\"\r\n Set up the BulletHeightfieldShape (=collision terrain) and give it\r\n some sensible physical properties.\r\n \"\"\"\r\n self.HFS = BulletHeightfieldShape(self.DHF, self.terrain_scale.z,\r\n STM=True)\r\n if use_diamond_subdivision:\r\n self.HFS.set_use_diamond_subdivision(True)\r\n HFS_rigidbody = BulletRigidBodyNode(\"BulletTerrain\")\r\n HFS_rigidbody.set_static(True)\r\n friction = 2.0\r\n HFS_rigidbody.set_anisotropic_friction(\r\n LVector3(friction, friction, friction/1.3))\r\n HFS_rigidbody.set_restitution(0.3)\r\n HFS_rigidbody.add_shape(self.HFS)\r\n self.world.attach(HFS_rigidbody)\r\n \r\n HFS_NP = NodePath(HFS_rigidbody)\r\n HFS_NP.reparent_to(self.worldNP)\r\n \"\"\"\r\n This aligns the Bullet terrain with the ShaderTerrainMesh rendered\r\n terrain. It will be exact as long as the terrain vertex shader from\r\n the STM sample is used and no additional tessellation shader.\r\n For Bullet (as for other physics engines) the origin of objects is at\r\n the center.\r\n \"\"\"\r\n HFS_NP.set_pos(self.terrain_pos + self.terrain_scale/2)\r\n HFS_NP.set_sx(self.terrain_scale.x / heightfield.get_x_size())\r\n HFS_NP.set_sy(self.terrain_scale.y / heightfield.get_y_size())\r\n \r\n # Disables Bullet debug rendering for the terrain, because it is slow.\r\n #HFS_NP.node().set_debug_enabled(False)\r\n \r\n \"\"\"\r\n Finally, link the ShaderTerrainMesh and the BulletHeightfieldShape to\r\n the DynamicHeightfield. From now on changes to the DynamicHeightfield\r\n will propagate to the (visible) ShaderTerrainMesh and the (collidable)\r\n BulletHeightfieldShape.\r\n \"\"\"\r\n self.HFS.set_dynamic_heightfield(self.DHF)\r\n self.terrain_node.set_dynamic_heightfield(self.DHF)",
"def ship_container(self):",
"def _setup_children(self):\n\n for i in xrange(self._nhalos):\n self._halos[i+1].properties['children'] = []\n\n for i in xrange(self._nhalos):\n host = self._halos[i+1].properties.get('hostHalo', -2)\n if host > -1:\n try:\n self._halos[host+1].properties['children'].append(i+1)\n except KeyError:\n pass",
"def afterLoadSceneObject(self):\n\t\tpass",
"def footprint_height():",
"def Pool2DOptionsAddFilterHeight(builder, filterHeight):\n return AddFilterHeight(builder, filterHeight)",
"def setup(self):\n self.debug(\"Setup ..\")\n\n if self.pipeline.settings.useHardwarePCF:\n self.error(\n \"Global Illumination does not work in combination with PCF!\")\n import sys\n sys.exit(0)\n return\n\n self.settings = VoxelSettingsManager()\n self.settings.loadFromFile(join(self.sceneRoot, \"voxels.ini\"))\n\n self.debug(\n \"Loaded voxels, grid resolution is\", self.settings.GridResolution)\n\n self.gridScale = self.settings.GridEnd - self.settings.GridStart\n self.voxelSize = self.gridScale / float(self.settings.GridResolution)\n self.entrySize = Vec2(\n 1.0 / float(self.settings.StackSizeX), 1.0 / float(self.settings.StackSizeY))\n self.frameIndex = 0\n\n invVoxelSize = Vec3(\n 1.0 / self.voxelSize.x, 1.0 / self.voxelSize.y, 1.0 / self.voxelSize.z)\n invVoxelSize.normalize()\n self.normalizationFactor = invVoxelSize / \\\n float(self.settings.GridResolution)\n\n # Debugging of voxels, VERY slow\n self.debugVoxels = False\n\n if self.debugVoxels:\n self.createVoxelDebugBox()\n\n # Load packed voxels\n packedVoxels = Globals.loader.loadTexture(\n join(self.sceneRoot, \"voxels.png\"))\n packedVoxels.setFormat(Texture.FRgba8)\n packedVoxels.setComponentType(Texture.TUnsignedByte)\n # packedVoxels.setKeepRamImage(False)\n\n # Create 3D Texture to store unpacked voxels\n self.unpackedVoxels = Texture(\"Unpacked voxels\")\n self.unpackedVoxels.setup3dTexture(self.settings.GridResolution, self.settings.GridResolution, self.settings.GridResolution,\n Texture.TFloat, Texture.FRgba8)\n self.unpackedVoxels.setMinfilter(Texture.FTLinearMipmapLinear)\n self.unpackedVoxels.setMagfilter(Texture.FTLinear)\n\n self.unpackVoxels = NodePath(\"unpackVoxels\")\n self.unpackVoxels.setShader(\n BetterShader.loadCompute(\"Shader/GI/UnpackVoxels.compute\"))\n\n print \"setting inputs ..\"\n self.unpackVoxels.setShaderInput(\"packedVoxels\", packedVoxels)\n print \"setting inputs ..\"\n self.unpackVoxels.setShaderInput(\n \"stackSizeX\", LVecBase3i(self.settings.StackSizeX))\n print \"setting inputs ..\"\n self.unpackVoxels.setShaderInput(\n \"gridSize\", LVecBase3i(self.settings.GridResolution))\n print \"setting inputs ..\"\n self.unpackVoxels.setShaderInput(\"destination\", self.unpackedVoxels)\n print \"executing shader ..\"\n self._executeShader(\n self.unpackVoxels, self.settings.GridResolution / 8, self.settings.GridResolution / 8, self.settings.GridResolution / 8)\n\n print \"creating direct radiance texture ..\"\n # Create 3D Texture to store direct radiance\n self.directRadianceCache = Texture(\"Direct radiance cache\")\n self.directRadianceCache.setup3dTexture(self.settings.GridResolution, self.settings.GridResolution, self.settings.GridResolution,\n Texture.TInt, Texture.FR32i)\n\n self.directRadiance = Texture(\"Direct radiance\")\n self.directRadiance.setup3dTexture(self.settings.GridResolution, self.settings.GridResolution, self.settings.GridResolution,\n Texture.TFloat, Texture.FRgba16)\n\n print \"setting texture states ..\"\n for prepare in [self.directRadiance, self.unpackedVoxels]:\n prepare.setMagfilter(Texture.FTLinear)\n prepare.setMinfilter(Texture.FTLinearMipmapLinear)\n prepare.setWrapU(Texture.WMBorderColor)\n prepare.setWrapV(Texture.WMBorderColor)\n prepare.setWrapW(Texture.WMBorderColor)\n prepare.setBorderColor(Vec4(0,0,0,1))\n\n self.unpackedVoxels.setBorderColor(Vec4(0))\n # self.directRadiance.setBorderColor(Vec4(0))\n\n self.populateVPLNode = NodePath(\"PopulateVPLs\")\n self.clearTextureNode = NodePath(\"ClearTexture\")\n self.copyTextureNode = NodePath(\"CopyTexture\")\n self.generateMipmapsNode = NodePath(\"GenerateMipmaps\")\n self.convertGridNode = NodePath(\"ConvertGrid\")\n\n\n if False:\n surroundingBox = Globals.loader.loadModel(\n \"Models/CubeFix/Model.egg\")\n surroundingBox.setPos(self.settings.GridStart)\n surroundingBox.setScale(self.gridScale)\n\n # surroundingBox.setTwoSided(True)\n surroundingBox.flattenStrong()\n surroundingBox.reparentTo(Globals.render)\n\n self.bindTo(self.populateVPLNode, \"giData\")\n self.reloadShader()\n\n self._generateMipmaps(self.unpackedVoxels)",
"def setup(self):\n\n # Set up the Cameras\n viewport = (0, 0, self.window.width, self.window.height)\n self.camera = arcade.SimpleCamera(viewport=viewport)\n self.gui_camera = arcade.SimpleCamera(viewport=viewport)\n\n # Map name\n map_name = \":resources:tiled_maps/map_with_ladders.json\"\n\n # Layer Specific Options for the Tilemap\n layer_options = {\n LAYER_NAME_PLATFORMS: {\n \"use_spatial_hash\": True,\n },\n LAYER_NAME_MOVING_PLATFORMS: {\n \"use_spatial_hash\": False,\n },\n LAYER_NAME_LADDERS: {\n \"use_spatial_hash\": True,\n },\n LAYER_NAME_COINS: {\n \"use_spatial_hash\": True,\n },\n }\n\n # Load in TileMap\n self.tile_map = arcade.load_tilemap(map_name, TILE_SCALING, layer_options)\n\n # Initiate New Scene with our TileMap, this will automatically add all layers\n # from the map as SpriteLists in the scene in the proper order.\n self.scene = arcade.Scene.from_tilemap(self.tile_map)\n\n # Keep track of the score\n self.score = 0\n\n # Shooting mechanics\n self.can_shoot = True\n self.shoot_timer = 0\n\n # Set up the player, specifically placing it at these coordinates.\n self.player_sprite = PlayerCharacter()\n self.player_sprite.center_x = (\n self.tile_map.tile_width * TILE_SCALING * PLAYER_START_X\n )\n self.player_sprite.center_y = (\n self.tile_map.tile_height * TILE_SCALING * PLAYER_START_Y\n )\n self.scene.add_sprite(LAYER_NAME_PLAYER, self.player_sprite)\n\n # Calculate the right edge of the my_map in pixels\n self.end_of_map = self.tile_map.width * GRID_PIXEL_SIZE\n\n # -- Enemies\n enemies_layer = self.tile_map.object_lists[LAYER_NAME_ENEMIES]\n\n for my_object in enemies_layer:\n cartesian = self.tile_map.get_cartesian(\n my_object.shape[0], my_object.shape[1]\n )\n enemy_type = my_object.properties[\"type\"]\n if enemy_type == \"robot\":\n enemy = RobotEnemy()\n elif enemy_type == \"zombie\":\n enemy = ZombieEnemy()\n enemy.center_x = math.floor(\n cartesian[0] * TILE_SCALING * self.tile_map.tile_width\n )\n enemy.center_y = math.floor(\n (cartesian[1] + 1) * (self.tile_map.tile_height * TILE_SCALING)\n )\n if \"boundary_left\" in my_object.properties:\n enemy.boundary_left = my_object.properties[\"boundary_left\"]\n if \"boundary_right\" in my_object.properties:\n enemy.boundary_right = my_object.properties[\"boundary_right\"]\n if \"change_x\" in my_object.properties:\n enemy.change_x = my_object.properties[\"change_x\"]\n self.scene.add_sprite(LAYER_NAME_ENEMIES, enemy)\n\n # Add bullet spritelist to Scene\n self.scene.add_sprite_list(LAYER_NAME_BULLETS)\n\n # --- Other stuff\n # Set the background color\n if self.tile_map.background_color:\n self.window.background_color = self.tile_map.background_color\n\n # Create the 'physics engine'\n self.physics_engine = arcade.PhysicsEnginePlatformer(\n self.player_sprite,\n platforms=self.scene[LAYER_NAME_MOVING_PLATFORMS],\n gravity_constant=GRAVITY,\n ladders=self.scene[LAYER_NAME_LADDERS],\n walls=self.scene[LAYER_NAME_PLATFORMS],\n )",
"def set_hooks(self):\n for layer_idx, layer in enumerate(self.vgg_features):\n if layer_idx in self.layers_to_watch:\n layer.register_forward_hook(self.layer_watch_hooks(layer_idx))",
"def on_enter(self):\n\n super(BaseScene, self).on_enter()\n\n self.load_map()\n self.load_players()\n self.load_enemies()\n self.load_status_bar()\n\n self.enemies_layer.next_wave()",
"def properties(self):\n return self.world.find(self.ehandle)",
"def update_world(self):\n pass",
"def _findBottom(self,col):\n min = GAME_HEIGHT\n mpos = 0\n for x in range(self.getLengthAlien()):\n if self._aliens[x][col] != None and self._aliens[x][col].y < min:\n min = self._aliens[x][col].y\n mpos = x\n return mpos",
"def add_altitude(chunk, flightHeightFile): \n # Get the flight height\n try:\n # flightHeightFile = \"/SNOWDATA/SnowDrones-Processing/LDP/01-31-2020/RGB/100MEDIA/FlightHeight.txt\"\n with open(flightHeightFile , 'r') as myfile:\n data = myfile.read()\n alt = int(data)\n except:\n alt = int(55)\n\n # Update flight altitudes\n for camera in chunk.cameras:\n if camera.reference.location:\n coord = camera.reference.location\n camera.reference.location = PhotoScan.Vector([coord.x, coord.y, alt])",
"def populate_blocks_with_blockheights(self):\n for (height, block) in enumerate(self.blocks):\n block[\"height\"] = height",
"def getHeight(*args):"
] | [
"0.49975762",
"0.49543297",
"0.4924213",
"0.4898339",
"0.47883928",
"0.4763674",
"0.4763674",
"0.4763674",
"0.47636473",
"0.47518608",
"0.46925193",
"0.4639078",
"0.45988846",
"0.45873675",
"0.4581882",
"0.45816454",
"0.45418066",
"0.45392635",
"0.4534433",
"0.45265785",
"0.45071945",
"0.4505746",
"0.4505142",
"0.45029265",
"0.4498078",
"0.44939062",
"0.44901785",
"0.44839904",
"0.44812497",
"0.44748774"
] | 0.6640332 | 0 |
Tries to cast a value to a Python timedeltaobject. It supports datetime, time and nicely formatted strings. It will return timedelta(0) in case it fails. | def to_timedelta(value) -> timedelta:
# For values >=24hrs, Pandas converts them to a datetime object.
# For values <24hrs, Pandas converts them to time object.
if isinstance(value, timedelta):
return value
elif isinstance(value, datetime):
return value - datetime(1900, 1, 1) + timedelta(hours=24)
elif isinstance(value, time):
return datetime.combine(date.min, value) - datetime.min
elif isinstance(value, str):
duration_regex = re.compile(
r"^(?P<sign>-?)(?P<hours>[0-9]+?):(?P<minutes>[0-9]{2})$"
)
parts = duration_regex.match(value.strip())
if parts is not None:
sign = parts.group("sign")
hours = float(parts.group("hours"))
minutes = float(parts.group("minutes"))
if sign == "-":
hours = hours * (-1)
minutes = minutes * (-1)
return timedelta(hours=hours, minutes=minutes)
else:
logging.warning(
"Could not convert overtime value to timedelta "
"object. "
f"Values was {value} and type was {type(value)}."
)
else:
logging.warning(
"Could not convert overtime value to timedelta object. "
f"Value was {value} and type was {type(value)}."
)
return timedelta(0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_timedelta(obj: \"Any\") -> \"timedelta\":\n if obj is None:\n raise ValueError(\"obj cannot be None\")\n if isinstance(obj, timedelta):\n return obj\n elif isinstance(obj, (int, float)):\n return timedelta(seconds=obj)\n elif isinstance(obj, Decimal):\n return timedelta(seconds=float(obj))\n elif isinstance(obj, str):\n return timedelta(seconds=float(obj))\n else:\n raise TypeError(\"could not convert {obj!r} to timedelta\")",
"def parse_timedelta(value: Optional[str]):\n if not value:\n return None\n unit = value[-1]\n amount = int(value[0:-1])\n if unit == \"h\":\n return timedelta(hours=amount)\n elif unit == \"m\":\n return timedelta(minutes=amount)\n elif unit == \"d\":\n return timedelta(days=amount)\n else:\n raise ValueError(f\"Invalid time unit: {value}\")",
"def handle(self, value, context: typing.MutableMapping):\n if isinstance(value, timedelta):\n return value\n elif isinstance(value, int):\n return timedelta(milliseconds=int(value * self.resolution))\n try:\n return timedelta(\n milliseconds=int(Decimal(value) * self.resolution))\n except (ValueError, InvalidOperation):\n pass\n\n match = self.duration_re.match(value)\n if not match:\n self.report(value, context)\n return None\n\n params = {\n key: int(value)\n for key, value in match.groupdict().items()\n if value\n }\n return timedelta(**params)",
"def dehydrate_timedelta(value):\n months = 0\n days = value.days\n seconds = value.seconds\n nanoseconds = 1000 * value.microseconds\n return Structure(ord(b\"E\"), months, days, seconds, nanoseconds)",
"def _convert_to_timedelta(time_diff):\n return timedelta(seconds=time_diff)",
"def timedelta_to_duration(obj: \"timedelta\") -> \"Duration\":\n d = Duration()\n d.seconds = int(obj.total_seconds())\n d.nanos = obj.microseconds * 1000\n return d",
"def convert_string_to_timedelta(string):\n # type: (str) -> datetime.timedelta\n if is_none_or_empty(string):\n raise ValueError('{} is not a valid timedelta string'.format(string))\n # get days\n tmp = string.split('.')\n if len(tmp) == 2:\n days = int(tmp[0])\n tmp = tmp[1]\n elif len(tmp) == 1:\n days = 0\n tmp = tmp[0]\n else:\n raise ValueError('{} is not a valid timedelta string'.format(string))\n # get total seconds\n tmp = tmp.split(':')\n if len(tmp) != 3:\n raise ValueError('{} is not a valid timedelta string'.format(string))\n totsec = int(tmp[2]) + int(tmp[1]) * 60 + int(tmp[0]) * 3600\n return datetime.timedelta(days, totsec)",
"def parse_timedelta(time_str):\n regex = re.compile(r'^((?P<days>[\\.\\d]+?)d)?((?P<hours>[\\.\\d]+?)h)?((?P<minutes>[\\.\\d]+?)m)?((?P<seconds>[\\.\\d]+?)s)?$')\n time_str=replace(time_str,{\n 'sec':'s',\n 'second': 's',\n 'seconds': 's',\n 'minute':'m',\n 'minutes':'m',\n 'min':'m',\n 'mn':'m',\n 'days':'d',\n 'day':'d',\n 'hours':'h',\n 'hour':'h'})\n parts = regex.match(time_str)\n if parts is None: raise ValueError(\"Could not parse any time information from '{}'. Examples of valid strings: '8h', '2d8h5m20s', '2m4s'\".format(time_str))\n time_params = {name: float(param) for name, param in parts.groupdict().items() if param}\n return timedelta(**time_params)",
"def parseTimeDelta(s):\n if s is None:\n return None\n\n d = re.match(r'((?P<days>\\d+) days, )?(?P<hours>\\d+):' r'(?P<minutes>\\d+):(?P<seconds>\\d+)', s).groupdict(0)\n return datetime.timedelta(**dict(((key, int(value)) for key, value in d.items())))",
"def _convert_to_timedelta(time_diff):\n return timedelta(microseconds=time_diff / _NANO_TO_MICRO)",
"def parse_delta(delta):\n match = TIMEDELTA_PATTERN.match(delta)\n if match:\n parts = {k: int(v) for k, v in match.groupdict().items() if v}\n return datetime.timedelta(**parts)",
"def str_to_timedelta(time_str):\n\n err_message = (\"Wrong format of the time string '\" + time_str +\n \"'. Example - 1w2d3h4m5s means 1 week, 2 days, 3 hours, \"\n \"4 minutes and 5 seconds. Note that order is important, '5s1w' \"\n \"is not valid as weeks may not precede seconds\")\n\n regex = re.compile(\n r'^((?P<weeks>\\d+?)w)?((?P<days>\\d+?)d)?((?P<hours>\\d+?)h)?((?P<minutes>\\d+?)m)?((?P<seconds>\\d+?)s)?$')\n parts = regex.search(time_str)\n if not parts:\n raise ValueError(err_message)\n\n parts = parts.groupdict()\n if len(filter(lambda x: parts[x] is not None, parts.keys())) == 0:\n raise ValueError(err_message)\n\n time_params = {}\n for (name, param) in parts.iteritems():\n if param:\n time_params[name] = int(param)\n return timedelta(**time_params)",
"def clean(self, value):\n super(DurationField, self).clean(value)\n try:\n return str_to_timedelta(value)\n except ValueError:\n raise ValidationError(self.default_error_messages['invalid'])",
"def string_to_timedelta(time_string: str) -> relativedelta:\n\n replace_dict = {\"years\": \"yrs\",\n \"yrs\": \"y\",\n \"months\": \"mon\",\n \"mon\": \"m\",\n \"days\": \"d\",\n \"hours\": \"H\",\n \"h\": \"H\",\n \"minutes\": \"min\",\n \"min\": \"M\",\n \"seconds\": \"sec\",\n \"sec\": \"S\",\n \"s\": \"S\",\n \" \": \"\"}\n\n for old in replace_dict.keys():\n new = replace_dict[old]\n time_string = time_string.replace(old, new)\n\n time_units = {\"y\": 0, \"m\": 0, \"d\": 0, \"H\": 0, \"M\": 0, \"S\": 0}\n\n # Extract all different time units from string\n for char in time_string:\n if char not in list(time_units):\n if not char.isdigit():\n raise ValueError(\"Invalid character in timedelta string.\")\n continue\n\n char_idx = time_string.find(char)\n time_units[char] = int(time_string[:char_idx])\n\n target_substring = time_string[:char_idx + 1]\n time_string = time_string.replace(target_substring, \"\")\n\n timedelta = relativedelta(years=time_units[\"y\"],\n months=time_units[\"m\"],\n days=time_units[\"d\"],\n hours=time_units[\"H\"],\n minutes=time_units[\"M\"],\n seconds=time_units[\"S\"])\n return timedelta",
"def timedelta(self, *a, **kw):\n from datetime import timedelta\n return timedelta(*a, **kw)",
"def test_int_to_timedelta(self):\n @converters.wrap\n def inner_test(param: datetime.timedelta):\n \"\"\"Make sure the parameter was converted correctly.\"\"\"\n self.assertEqual(param, datetime.timedelta(\n days=3, hours=2, minutes=5, seconds=43\n ))\n inner_test(param=266743)",
"def convert_timedelta(item):\r\n if isinstance(item, timedelta):\r\n seconds = int(item.total_seconds())\r\n hours, remainder = divmod(seconds, 3600)\r\n minutes, seconds = divmod(remainder, 60)\r\n formated = '{}h {}m {}s'.format(hours, minutes, seconds)\r\n else:\r\n raise TypeError(item, 'is not timedelta object')\r\n return formated",
"def timedelta_parse(string):\n string = string.strip()\n if not string:\n raise TypeError(f'{string!r} is not a valid time interval')\n # This is the format we get from sometimes PostgreSQL, sqlite,\n # and from serialization.\n d = re.match(\n r'^((?P<days>[-+]?\\d+) days?,? )?(?P<sign>[-+]?)(?P<hours>\\d+):'\n r'(?P<minutes>\\d+)(:(?P<seconds>\\d+(\\.\\d+)?))?$',\n string\n )\n if d:\n d = d.groupdict(0)\n if d['sign'] == '-':\n for k in 'hours', 'minutes', 'seconds':\n d[k] = '-' + d[k]\n d.pop('sign', None)\n else:\n # This is the more flexible format.\n d = re.match(\n r'^((?P<weeks>-?((\\d*\\.\\d+)|\\d+))\\W*w((ee)?(k(s)?)?)(,)?\\W*)?'\n r'((?P<days>-?((\\d*\\.\\d+)|\\d+))\\W*d(ay(s)?)?(,)?\\W*)?'\n r'((?P<hours>-?((\\d*\\.\\d+)|\\d+))\\W*h(ou)?(r(s)?)?(,)?\\W*)?'\n r'((?P<minutes>-?((\\d*\\.\\d+)|\\d+))\\W*m(in(ute)?(s)?)?(,)?\\W*)?'\n r'((?P<seconds>-?((\\d*\\.\\d+)|\\d+))\\W*s(ec(ond)?(s)?)?)?\\W*$',\n string\n )\n if not d:\n raise TypeError(f'{string!r} is not a valid time interval')\n d = d.groupdict(0)\n return datetime.timedelta(**{k: float(v) for k, v in d.items()})",
"def readable_timedelta(timedeltaobj):\n # stolen from https://stackoverflow.com/a/46928226/8207\n if not timedeltaobj:\n return '---'\n secs = timedeltaobj.total_seconds()\n timetot = \"\"\n if secs > 86400: # 60sec * 60min * 24hrs\n days = secs // 86400\n timetot += \"{} days\".format(int(days))\n secs = secs - days * 86400\n\n if secs > 3600:\n hrs = secs // 3600\n timetot += \" {} hours\".format(int(hrs))\n secs = secs - hrs * 3600\n\n if secs > 60:\n mins = secs // 60\n timetot += \" {} minutes\".format(int(mins))\n secs = secs - mins * 60\n\n if secs > 0:\n timetot += \" {} seconds\".format(int(secs))\n return timetot",
"def str_to_timedelta(time):\n (hours, minutes) = time.split(':')\n return datetime.timedelta(hours=int(hours), minutes=int(minutes))",
"def datetime_to_epoch_timedelta(obj: \"datetime\") -> \"timedelta\":\n if obj.tzinfo is not None and obj.tzinfo.utcoffset(obj) is not None:\n # aware time; translate to UTC\n obj = obj.astimezone(timezone.utc)\n obj = obj.replace(tzinfo=None)\n return obj - datetime(1970, 1, 1, 0, 0, 0)",
"def strptimedelta(timelapse):\n parser = re.compile(\n r\"(\"\n \"((?P<days>\\d+)d|day|days)?\"\n \"((?P<hours>\\d+)hr?)?\"\n \"((?P<min>\\d+)m(?!s))?\"\n \"((?P<sec>(\\d+.\\d+)|(\\d+))s)?\"\n \"((?P<ms>\\d+)ms)?\"\n \"((?P<us>\\d+)us)?\"\n \")?$\"\n )\n\n match = parser.match(s)\n if match is None:\n raise ValueError(\"'%s' time delta string has invalid format\" % timelapse)\n match = match.groupdict()\n\n args = {}\n for name, value in match.items():\n if not value:\n continue\n if name == \"min\":\n args[\"minutes\"] = int(value)\n elif name == \"sec\":\n args[\"seconds\"] = float(value)\n elif name == \"ms\":\n args[\"seconds\"] = args.get(\"seconds\", 0) + int(value) / 1000.0\n elif name == \"us\":\n args[\"seconds\"] = args.get(\"seconds\", 0) + int(value) / 1000000.0\n else:\n args[name] = int(value)\n\n return timedelta(**args)",
"def from_str(duration):\n\n if duration in (\"0\", \"+0\", \"-0\"):\n return datetime.timedelta()\n\n pattern = re.compile('([\\d\\.]+)([a-zµμ]+)')\n total = 0\n sign = -1 if duration[0] == '-' else 1\n matches = pattern.findall(duration)\n\n if not len(matches):\n raise Exception(\"Invalid duration {}\".format(duration))\n\n for (value, unit) in matches:\n if unit not in units:\n raise Exception(\n \"Unknown unit {} in duration {}\".format(unit, duration))\n try:\n total += float(value) * units[unit]\n except:\n raise Exception(\n \"Invalid value {} in duration {}\".format(value, duration))\n\n microseconds = total / _microsecond_size\n return datetime.timedelta(microseconds=sign * microseconds)",
"def timedelta(self) -> datetime.timedelta:\n factor = -1 if self.negative else 1\n return datetime.timedelta(\n hours=factor * self.hours, minutes=factor * self.minutes\n )",
"def create_timedelta():\n # timedelta(days, seconds, microseconds, milliseconds, minutes, hours, weeks)\n td = datetime.timedelta(microseconds=-1)\n # Why is this (-1, 86399, 999999)?\n # Because -1 days + (86,400 - 1) seconds = -1 second, and -1,000,000 microseconds + 999,999 microseconds = -1 microsecond\n print(td.days, td.seconds, td.microseconds) # (-1, 86399, 999999)",
"def dt_to_pytimedelta(self):\n return DateTimeDefault.register(pandas.Series.dt.to_pytimedelta)(self)",
"def freq_to_timedelta(freq):\n # Add '1' to freq that doesn't have any digit\n if isinstance(freq, str) and not bool(re.search(r\"\\d\", freq)):\n freq = \"1{}\".format(freq)\n\n # Convert str to datetime.timedelta\n return pd.to_timedelta(freq)",
"def parse_duration(duration: str) -> timedelta:\n match = DURATION_REGEX.match(duration)\n if not match:\n raise ValueError(f'Unable to convert \"{duration}\" to a timedelta.')\n sign = _parse_sign(match.group(1))\n years = _parse_int(match.group(2))\n months = _parse_int(match.group(3))\n days_or_weeks = _parse_int(match.group(4))\n is_weeks = match.group(5) == 'W'\n hours = _parse_int(match.group(6))\n minutes = _parse_int(match.group(7))\n seconds = _parse_int(match.group(8))\n\n total_days = days_or_weeks * 7 if is_weeks else days_or_weeks\n total_days += months * DAYS_IN_MONTH\n total_days += years * DAYS_IN_YEAR\n\n total_seconds = seconds\n total_seconds += minutes * SECONDS_IN_MINUTE\n total_seconds += hours * SECONDS_IN_HOUR\n total_seconds += total_days * SECONDS_IN_DAY\n\n total_seconds *= sign\n\n return timedelta(seconds=total_seconds)",
"def traverse(self, traverser, **kwargs):\n return traverser.timedelta(self, **kwargs)",
"def _parse_duration(\n duration_str: Optional[str]) -> Optional[datetime.timedelta]:\n if not duration_str:\n return None\n pattern = re.compile(r'(\\d+)(\\w)*')\n match = pattern.match(duration_str)\n if (not match or len(match.groups()) != 2 or\n match.group(2) not in {None, 's', 'm', 'h', 'd'}):\n raise ValueError(f'Unable to parse string duration `{duration_str}`.')\n int_value = int(match.group(1))\n if match.group(2) is None or match.group(2) == 's':\n pass\n elif match.group(2) == 'm':\n int_value *= 60\n elif match.group(2) == 'h':\n int_value *= 3600\n elif match.group(2) == 'd':\n int_value *= 86400\n else:\n raise ValueError(f'Unable to parse string duration `{duration_str}`.')\n return datetime.timedelta(seconds=int_value)"
] | [
"0.7840131",
"0.7054532",
"0.67763466",
"0.677437",
"0.67289245",
"0.66318434",
"0.65796596",
"0.638643",
"0.63777906",
"0.62535816",
"0.62382054",
"0.6190961",
"0.6165697",
"0.6086364",
"0.60375947",
"0.5968157",
"0.595131",
"0.593763",
"0.5933334",
"0.5932459",
"0.58310056",
"0.5774092",
"0.5760186",
"0.5751318",
"0.5746162",
"0.5733245",
"0.5690108",
"0.5631705",
"0.56090206",
"0.56002927"
] | 0.74394506 | 1 |
Sets microseconds of a timedeltaobject to 0. | def td_remove_microseconds(td: timedelta) -> timedelta:
try:
return td - timedelta(microseconds=td.microseconds)
except ValueError:
logging.warning(
"There was an error removing the microseconds from a"
f" timedelta object. The object was {td}."
)
return timedelta(0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _from_microseconds(value):\n return _UTC_EPOCH + datetime.timedelta(microseconds=value)",
"def changenonetotimedeltazero(s): \r\n if s=='None' or s is None: \r\n return pd.Timedelta(0)\r\n else:\r\n return s",
"def clear_time_override():\r\n utcnow.override_time = None",
"def zero(self):\n real_dt = self.clock.tick(self.framerate)\n #self.notify(GameClock.REAL_TIME_ADVANCE, real_dt)",
"def zero_timings(self):\r\n self.step = 0\r\n self.current_T = 0.0",
"def set_utc(date_time):\n utc = datetime.timezone(datetime.timedelta(0))\n date_time = date_time.replace(tzinfo=utc)\n return date_time",
"def reset_timer(self):\r\n self.time_minutes = 0\r\n self.time_seconds = 0",
"def __init__(self):\n super(FakeTime, self).__init__()\n # Note that time.time() and divmod return floating point values.\n timestamp, fraction_of_second = divmod(time.time(), 1)\n self._microseconds = int(fraction_of_second * 1000000)\n self._number_of_seconds = int(timestamp)\n self.precision = definitions.PRECISION_1_MICROSECOND",
"def __init__(self, days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0):\n # Between 0 and 86399 inclusive \n self.seconds = 0\n # Between -999999999 and 999999999 inclusive \n self.days = 0\n # Between 0 and 999999 inclusive \n self.microseconds = 0",
"def set_time(self, value: float):\n super().set_time(value)\n self.music.set_time(value)",
"def set_time(self, value: float):\n if value < 0:\n value = 0\n\n self.player.seek(value)",
"def set_time(self, value: float):\n raise NotImplementedError()",
"def setInitialTime(self, T0):\n raise \"use method setInitialTime of class ReactorNet\"\n #_cantera.reactor_setInitialTime(self.__reactor_id, T0)",
"def reset(self):\n\n self.elapsed_time = 0",
"def zero(self, value):\n raise NotImplementedError",
"def unsetTimeUnits(self):\n return _libsbml.Model_unsetTimeUnits(self)",
"def unsetTimeUnits(self):\n return _libsbml.Event_unsetTimeUnits(self)",
"def advance_time_seconds(seconds):\r\n advance_time_delta(datetime.timedelta(0, seconds))",
"def unsetTimeUnits(self):\n return _libsbml.KineticLaw_unsetTimeUnits(self)",
"def reset_sync_time(self):\n self._sync_timestamp = 0",
"def create_timedelta():\n # timedelta(days, seconds, microseconds, milliseconds, minutes, hours, weeks)\n td = datetime.timedelta(microseconds=-1)\n # Why is this (-1, 86399, 999999)?\n # Because -1 days + (86,400 - 1) seconds = -1 second, and -1,000,000 microseconds + 999,999 microseconds = -1 microsecond\n print(td.days, td.seconds, td.microseconds) # (-1, 86399, 999999)",
"def unsetTimeConversionFactor(self):\n return _libsbml.Submodel_unsetTimeConversionFactor(self)",
"def changenattotimedeltazero(s):\r\n #if type(s)==pd.tslib.NaTType:\r\n if pd.isnull(s):\r\n return pd.Timedelta(0)\r\n else:\r\n return s",
"def test_without_milliseconds(self):\n for seconds, expected_output in [(0, '0:00:00'), (1, '0:00:01'), (1.337, '0:00:01'), (2.9, '0:00:02'),\n (123456.78901234, '1 day, 10:17:36')]:\n with self.subTest(seconds=seconds):\n self.assertEqual(format_seconds(seconds, with_milliseconds=False), expected_output)",
"def microsecond(self, microsecond):\n\n self._microsecond = microsecond",
"def setInitialTime(self, t0):\n _cantera.reactornet_setInitialTime(self.__reactornet_id, t0)",
"def reset_timer():\n resetTimer = time.time()\n target_time.clear()\n target_time.append(resetTimer)",
"def set_umeastime(self, time):\n self.utime = time",
"def reset(self):\n self.cumtime = 0\n self.start_time = self.time()",
"def setStartTime(self, t0):\n self._simulator_.update(t0=t0)\n return"
] | [
"0.60122955",
"0.58582336",
"0.5800418",
"0.5604118",
"0.55773675",
"0.5552772",
"0.54199773",
"0.5383407",
"0.53719836",
"0.52956426",
"0.5292495",
"0.52892756",
"0.5240672",
"0.521666",
"0.5192984",
"0.5188158",
"0.51750684",
"0.5170097",
"0.5154471",
"0.5137825",
"0.5137553",
"0.5116093",
"0.5089402",
"0.5082809",
"0.507469",
"0.50440836",
"0.50364214",
"0.5028465",
"0.50222033",
"0.5010343"
] | 0.6468977 | 0 |
Permutate a mesh, record the maximum it deviates from the original mesh and the resulting value of an identifier function. | def permutations(mesh,
function=lambda x: x.identifier,
displacement_max=1e-8,
count=1000,
subdivisions=2,
cutoff=3600):
identifiers = []
start = time.time()
# do subdivisions
divided = [mesh.copy()]
for _j in range(subdivisions - 1):
divided.append(divided[-1].copy().subdivide())
for i, _displacement in enumerate(np.linspace(0.0,
displacement_max / mesh.scale,
count)):
# get one of the subdivided meshes
current = np.random.choice(divided).copy()
if i > (count / 10):
# run first bunch without tessellation permutation
current = current.permutate.tessellation()
# after the first few displace it a lot
transformed = trimesh.permutate.transform(current)
# noisy = trimesh.permutate.noise(transformed, displacement)
identifier = function(transformed)
identifiers.append(identifier)
if (time.time() - start) > cutoff:
log.debug('bailing for time:{} count:{}'.format(
time.time() - start,
i))
return np.array(identifiers)
return np.array(identifiers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def identifier_simple(mesh):\n # verify the cache once\n mesh._cache.verify()\n\n # don't check hashes during identifier as we aren't\n # changing any data values of the mesh inside block\n # if we did change values in cache block things would break\n with mesh._cache:\n # pre-allocate identifier so indexes of values can't move around\n # like they might if we used hstack or something else\n identifier = np.zeros(7, dtype=np.float64)\n # avoid thrashing the cache unnecessarily\n mesh_area = mesh.area\n # start with properties that are valid regardless of watertightness\n # note that we're going to try to make all parameters relative\n # to area so other values don't get blown up at weird scales\n identifier[0] = mesh_area\n # avoid divide-by-zero later\n if mesh_area < tol.merge:\n mesh_area = 1.0\n # topological constant and the only thing we can really\n # trust in this fallen world\n identifier[1] = mesh.euler_number\n\n # if we have a watertight mesh include volume and inertia\n if mesh.is_volume:\n # side length of a cube ratio\n # 1.0 for cubes, different values for other things\n identifier[2] = (((mesh_area / 6.0) ** (1.0 / 2.0)) /\n (mesh.volume ** (1.0 / 3.0)))\n # save vertices for radius calculation\n vertices = mesh.vertices - mesh.center_mass\n # we are going to special case radially symmetric meshes\n # to replace their surface area with ratio of their\n # surface area to a primitive sphere or cylinder surface area\n # this is because tessellated curved surfaces are really rough\n # to reliably hash as they are very sensitive to floating point\n # and tessellation error. By making area proportionate to a fit\n # primitive area we are able to reliably hash at more sigfigs\n if mesh.symmetry == 'radial':\n # cylinder height\n h = np.dot(vertices, mesh.symmetry_axis).ptp()\n # section radius summed per row then overall max\n R2 = np.dot((np.dot(vertices, mesh.symmetry_section.T)\n ** 2), [1, 1]).max()\n # area of a cylinder primitive\n area = (2 * np.pi * (R2**.5) * h) + (2 * np.pi * R2)\n # replace area in this case with area ratio\n identifier[0] = mesh_area / area\n elif mesh.symmetry == 'spherical':\n # handle a spherically symmetric mesh\n R2 = np.dot((vertices ** 2), [1, 1, 1]).max()\n area = 4 * np.pi * R2\n identifier[0] = mesh_area / area\n else:\n # if we don't have a watertight mesh add information about the\n # convex hull which is slow to compute and unreliable\n try:\n # get the hull area and volume\n hull = mesh.convex_hull\n hull_area = hull.area\n hull_volume = hull.volume\n except BaseException:\n # in-plane or single point geometry has no hull\n hull_area = 6.0\n hull_volume = 1.0\n # just what we're looking for in a hash but hey\n identifier[3] = mesh_area / hull_area\n # cube side length ratio for the hull\n identifier[4] = (((hull_area / 6.0) ** (1.0 / 2.0)) /\n (hull_volume ** (1.0 / 3.0)))\n # calculate maximum mesh radius\n vertices = mesh.vertices - mesh.centroid\n # add in max radius^2 to area ratio\n R2 = np.dot((vertices ** 2), [1, 1, 1]).max()\n identifier[5] = R2 / mesh_area\n\n # mirrored meshes will look identical in terms of\n # area, volume, etc: use a count of relative edge\n # lengths to differentiate identical but mirrored meshes\n # this doesn't work well on meshes with a small number of faces\n # TODO : compare with \"cross product of 2 orthogonal metrics\"\n # for a more principled way to detect mirrored meshes\n if len(mesh.faces) > 50:\n count = face_ordering(mesh).sum()\n sign = float(count) / len(mesh.faces)\n if abs(count) > 10 and abs(sign) > 0.02:\n identifier[6] = sign\n return identifier",
"def max(self):\n if self._mesh.is_1d():\n ind = 1\n elif self._mesh.is_2d():\n ind = 2\n else:\n if self._logger:\n self._logger.error(\"mesh dimension not implemented\")\n raise NotImplementedError(\"mesh dimension not implemented\")\n\n def __map(m):\n return m[ind]\n\n return self.data.map(\n __map\n ).max()",
"def _write_mesh_change(self, writer):\n bloc = \"\"\n grps = self._grp_names_for_mesh\n model = writer.get(Modelisation)\n self.model_give_dim = model.give_dim()\n #if grps and model: \n # lines = WC.Lines()\n # cmd = \"MAIL=MODI_MAILLAGE(\"\n # lines.add(cmd + \"reuse=MAIL,\")\n # lines.init_idt = \" \" * len(cmd)\n # lines.add(\"MAILLAGE=MAIL,\")\n # opt = \"ORIE_PEAU_%sD=_F(GROUP_MA=%s,),\"\n # #lines.add(opt % (model.give_dim(), tuple(grps)))\n # lines.add(opt % (self.model_give_dim, tuple(grps)))\n # lines.add(\");\")\n # bloc = lines.build_part()\n writer.subs(\"mesh_change_key\", bloc)",
"def _write_mesh_change(self, writer):\n bloc = \"\"\n grps = self._grp_names_for_mesh\n model = writer.get(Modelisation)\n self.model_give_dim = model.give_dim()\n #if grps and model: \n # lines = WC.Lines()\n # cmd = \"MAIL=MODI_MAILLAGE(\"\n # lines.add(cmd + \"reuse=MAIL,\")\n # lines.init_idt = \" \" * len(cmd)\n # lines.add(\"MAILLAGE=MAIL,\")\n # opt = \"ORIE_PEAU_%sD=_F(GROUP_MA=%s,),\"\n # #lines.add(opt % (model.give_dim(), tuple(grps)))\n # lines.add(opt % (self.model_give_dim, tuple(grps)))\n # lines.add(\");\")\n # bloc = lines.build_part()\n writer.subs(\"mesh_change_key\", bloc)",
"def _write_mesh_change(self, writer):\n bloc = \"\"\n grps = self._grp_names_for_mesh\n model = writer.get(Modelisation)\n self.model_give_dim = model.give_dim()\n #if grps and model: \n # lines = WC.Lines()\n # cmd = \"MAIL=MODI_MAILLAGE(\"\n # lines.add(cmd + \"reuse=MAIL,\")\n # lines.init_idt = \" \" * len(cmd)\n # lines.add(\"MAILLAGE=MAIL,\")\n # opt = \"ORIE_PEAU_%sD=_F(GROUP_MA=%s,),\"\n # #lines.add(opt % (model.give_dim(), tuple(grps)))\n # lines.add(opt % (self.model_give_dim, tuple(grps)))\n # lines.add(\");\")\n # bloc = lines.build_part()\n writer.subs(\"mesh_change_key\", bloc)",
"def _write_mesh_change(self, writer):\n bloc = \"\"\n grps = self._grp_names_for_mesh\n model = writer.get(Modelisation)\n if grps and model: \n lines = WC.Lines()\n cmd = \"MAIL=MODI_MAILLAGE(\"\n lines.add(cmd + \"reuse=MAIL,\")\n lines.init_idt = \" \" * len(cmd)\n lines.add(\"MAILLAGE=MAIL,\")\n opt = \"ORIE_PEAU_%sD=_F(GROUP_MA=%s,),\"\n lines.add(opt % (model.give_dim(), tuple(grps)))\n lines.add(\");\")\n bloc = lines.build_part()\n writer.subs(\"mesh_change_key\", bloc)",
"def mesh_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"mesh_id\")",
"def new_mesh_set(self, all_meshes):\n if isinstance(all_meshes, Mesh):\n mesh_tp = []\n mesh_tp.append(all_meshes)\n all_meshes = mesh_tp\n\n if not isinstance(all_meshes, list):\n raise TypeError(\"Please send a list of mesh to update_mesh\")\n self.all_meshes = all_meshes\n\n # Remove previous actors from the scene\n for actor in self.mesh_actors:\n self.parent_window.ren.RemoveActor(actor)\n self.mesh_actors = list()\n\n # Create the geometry of a point (the coordinate) points = vtkPoints()\n for i, mesh in enumerate(self.all_meshes):\n if mesh.time.size != 1:\n raise IndexError(\"Mesh should be from one frame only\")\n\n points = vtkPoints()\n for j in range(mesh.channel.size):\n # points.InsertNextPoint([0, 0, 0])\n points.InsertNextPoint(mesh.data[:3, j, 0].tolist())\n\n # Create an array for each triangle\n draw_patch = not mesh.automatic_triangles and not self.force_wireframe\n if draw_patch:\n poly_type = vtkPolygon\n n_ids = 3\n color = self.patch_color[i]\n else:\n poly_type = vtkPolyLine\n n_ids = 4\n color = self.mesh_color\n cells = vtkCellArray()\n\n # Create the polygons\n for j in range(mesh.triangles.shape[1]):\n poly = poly_type()\n poly.GetPointIds().SetNumberOfIds(n_ids) # make a tri\n for k in range(len(mesh.triangles[:, j])):\n poly.GetPointIds().SetId(k, mesh.triangles[k, j])\n if not draw_patch:\n poly.GetPointIds().SetId(3, mesh.triangles[0, j]) # Close the triangle\n cells.InsertNextCell(poly)\n\n poly_data = vtkPolyData()\n poly_data.SetPoints(points)\n if draw_patch:\n poly_data.SetPolys(cells)\n else:\n poly_data.SetLines(cells)\n\n mapper = vtkPolyDataMapper()\n mapper.SetInputData(poly_data)\n\n # Create an actor\n self.mesh_actors.append(vtkActor())\n self.mesh_actors[i].SetMapper(mapper)\n self.mesh_actors[i].GetProperty().SetColor(color)\n self.mesh_actors[i].GetProperty().SetOpacity(self.mesh_opacity)\n\n self.parent_window.ren.AddActor(self.mesh_actors[i])\n\n # Update marker position\n self.update_mesh(self.all_meshes)",
"def mesh_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"mesh_id\")",
"def MeshMachine(main):\n\n # oDesign definition\n oDesign = main['ANSYS']['oDesign']\n\n # Data for the rotor mesh\n RotorName = main['ANSYS']['Rotor&Magnets']['Name'][0]\n RotorNumMaxElem = main['ANSYS']['Mesh']['Rotor']['NumMaxElem']\n RotorMaxLength = main['ANSYS']['Mesh']['Rotor']['MaxLength']\n\n # Data for the magnets mesh\n PMNames = main['ANSYS']['Rotor&Magnets']['PMNames']\n PMNumMaxElem = main['ANSYS']['Mesh']['Magnets']['NumMaxElem']\n PMMaxLength = main['ANSYS']['Mesh']['Magnets']['MaxLength']\n\n # Data for the Stator mesh\n StatorName = main['ANSYS']['Stator']['Name']\n StatorNormalDev = main['ANSYS']['Mesh']['Stator']['NormalDev']\n StatorAspectRatio = main['ANSYS']['Mesh']['Stator']['AspectRatio']\n\n # Data for the Stator mesh\n CoilNames = main['ANSYS']['Winding']['CoilNames']\n WindingNumMaxElem = main['ANSYS']['Mesh']['Winding']['NumMaxElem']\n WindingMaxLength = main['ANSYS']['Mesh']['Winding']['MaxLength']\n\n WindingName = []\n for phase in CoilNames:\n for direction in phase:\n WindingName += direction\n\n # Creating meshes\n oModule = oDesign.GetModule(\"MeshSetup\")\n\n # Rotor meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Rotor\",\n \"RefineInside:=\", True,\n \"Enabled:=\", True,\n \"Objects:=\", [RotorName],\n \"RestrictElem:=\", False,\n \"NumMaxElem:=\", str(RotorNumMaxElem),\n \"RestrictLength:=\", True,\n \"MaxLength:=\", str(RotorMaxLength)+\"mm\"\n ]\n )\n # Magnet meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Magnets\",\n \"RefineInside:=\", True,\n \"Enabled:=\", True,\n \"Objects:=\", PMNames,\n \"RestrictElem:=\", False,\n \"NumMaxElem:=\", str(PMNumMaxElem),\n \"RestrictLength:=\", True,\n \"MaxLength:=\", str(PMMaxLength)+\"mm\"\n ]\n )\n # Stator meshes\n oModule.AssignTrueSurfOp(\n [\n \"NAME:Stator\",\n \"Objects:=\", [StatorName],\n \"CurvedSurfaceApproxChoice:=\", \"ManualSettings\",\n \"SurfDevChoice:=\", 0,\n \"NormalDevChoice:=\", 2,\n \"NormalDev:=\", str(StatorNormalDev) + \"deg\",\n \"AspectRatioChoice:=\", 2,\n \"AspectRatio:=\", str(StatorAspectRatio)\n ]\n )\n\n # Coil meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Coils\",\n \"RefineInside:=\"\t, True,\n \"Enabled:=\"\t\t, True,\n \"Objects:=\"\t\t, WindingName,\n \"RestrictElem:=\"\t, False,\n \"NumMaxElem:=\"\t\t, str(WindingNumMaxElem),\n \"RestrictLength:=\"\t, True,\n \"MaxLength:=\"\t\t, str(WindingMaxLength) +\"mm\"\n ]\n )\n\n return main",
"def parallel_nelder_mead_worker(obj_func, simplex, f_vals, j, P, opt_params):\n # Unpack the input parameters\n alpha = opt_params[0] # reflection parameter\n beta = opt_params[1] # contraction parameter\n gamma = opt_params[2] # expansion parameter\n my_point = simplex[j, :] # vertex to update\n my_val = f_vals[j] # value at the vertex to update\n best_val = f_vals[0] # best value in the vertex\n next_val = f_vals[j - 1] # next best point in the simplex\n evals = 0\n\n # Calculate the centroid of the \"good\" simplex points\n N = simplex.shape[0] # number of points in simplex\n centroid = np.mean(simplex[0 : (N - P), :], axis=0)\n\n # Calculate the reflection point and its function value\n r_point = centroid + alpha * (centroid - my_point)\n r_val = obj_func(r_point)\n evals += 1\n\n # Case 1: the reflection point is better than best point\n if r_val < best_val:\n e_point = r_point + gamma * (r_point - centroid)\n e_val = obj_func(e_point) # Calculate expansion point\n evals += 1\n if e_val < r_val:\n new_point = e_point\n new_val = e_val\n else:\n new_point = r_point\n new_val = r_val\n # Case 2: the reflection point is better than the next best point\n elif r_val < next_val:\n new_point = r_point # Report reflection point\n new_val = r_val\n # Case 3: the reflection point is worse than the next best point\n else:\n if r_val < my_val:\n temp_point = r_point # Check whether reflection or original point\n temp_val = r_val # is better and use it temporarily\n else:\n temp_point = my_point\n temp_val = my_val\n c_point = temp_point + beta * (centroid - temp_point)\n c_val = obj_func(c_point) # Calculate contraction point\n evals += 1\n if c_val < temp_val:\n new_point = c_point\n new_val = c_val # Check whether the contraction point is better\n else:\n new_point = temp_point\n new_val = temp_val\n\n # Return the outputs\n return new_point, new_val, evals",
"def mesh_hook(mesh, mode):\n if mode == 'read':\n mesh = gen_block_mesh(dims, shape, [0, 0], name='user_block',\n verbose=False)\n return mesh\n\n elif mode == 'write':\n pass",
"def new_mesh_set(self, all_meshes):\n if isinstance(all_meshes, Mesh):\n mesh_tp = MeshCollection()\n mesh_tp.append(all_meshes)\n all_meshes = mesh_tp\n\n if all_meshes.get_num_frames() is not 1:\n raise IndexError(\"Mesh should be from one frame only\")\n\n if not isinstance(all_meshes, MeshCollection):\n raise TypeError(\"Please send a list of mesh to update_mesh\")\n self.all_meshes = all_meshes\n\n # Remove previous actors from the scene\n for actor in self.mesh_actors:\n self.parent_window.ren.RemoveActor(actor)\n self.mesh_actors = list()\n\n # Create the geometry of a point (the coordinate) points = vtkPoints()\n for (i, mesh) in enumerate(self.all_meshes):\n points = vtkPoints()\n for j in range(mesh.get_num_vertex()):\n points.InsertNextPoint([0, 0, 0])\n\n # Create an array for each triangle\n cell = vtkCellArray()\n for j in range(mesh.get_num_triangles()): # For each triangle\n line = vtkPolyLine()\n line.GetPointIds().SetNumberOfIds(4)\n for k in range(len(mesh.triangles[j])): # For each index\n line.GetPointIds().SetId(k, mesh.triangles[j, k])\n line.GetPointIds().SetId(3, mesh.triangles[j, 0]) # Close the triangle\n cell.InsertNextCell(line)\n poly_line = vtkPolyData()\n poly_line.SetPoints(points)\n poly_line.SetLines(cell)\n\n # Create a mapper\n mapper = vtkPolyDataMapper()\n mapper.SetInputData(poly_line)\n\n # Create an actor\n self.mesh_actors.append(vtkActor())\n self.mesh_actors[i].SetMapper(mapper)\n\n self.parent_window.ren.AddActor(self.mesh_actors[i])\n self.parent_window.ren.ResetCamera()\n\n # Update marker position\n self.update_mesh(self.all_meshes)",
"def update_mesh(remote, objectid):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_UpdateMesh(objectid)\n remote.runCommand(cmd1)",
"def compute_PPmax_Gompertz(particle, fieldset, time):\n if particle.active == 1:\n PPnorm = fieldset.c * particle.M\n particle.PPmax = PPnorm * fieldset.P0",
"def _final_mesh(self):\n assert (\n \"final_mesh\" in self.__dict__.keys()\n ), \"Final Mesh does not exist yet - please run multi-view optimization before getting\"\n return self.final_mesh",
"def max_varEff(df):\n if isinstance(df, str):\n df = pd.read_csv(df, index_col=0)\n\n ref_list = ['mmsplice_ref_acceptorIntron',\n 'mmsplice_ref_acceptor',\n 'mmsplice_ref_exon',\n 'mmsplice_ref_donor',\n 'mmsplice_ref_donorIntron']\n alt_list = ['mmsplice_alt_acceptorIntron',\n 'mmsplice_alt_acceptor',\n 'mmsplice_alt_exon',\n 'mmsplice_alt_donor',\n 'mmsplice_alt_donorIntron']\n\n if 'mmsplice_dlogitPsi' not in df.columns:\n X = df[alt_list].values - df[ref_list].values\n X = transform(X)\n df['mmsplice_dlogitPsi'] = LINEAR_MODEL.predict(X)\n\n dfMax = df.groupby(['ID'], as_index=False).agg(\n {'mmsplice_dlogitPsi': lambda x: max(x, key=abs)})\n\n dfMax = dfMax.merge(df, how='left', on=['ID', 'mmsplice_dlogitPsi'])\n dfMax = dfMax.drop_duplicates(subset=['ID', 'mmsplice_dlogitPsi'])\n # dfMax = dfMax.drop(\"mmsplice_dlogitPsi\", axis=1)\n return dfMax",
"def WriteGmsh(self, filename, write_surface_info=False):\n\n self.__do_essential_memebers_exist__()\n\n mesh = deepcopy(self)\n p = self.InferPolynomialDegree()\n\n # if p > 1:\n # mesh = self.GetLinearMesh(remap=True)\n\n element_type = mesh.element_type\n edim = mesh.InferElementalDimension()\n\n # THESE TAGS ARE DIFFERENT FROM THE GMSH READER TAGS\n bel = -1\n if element_type == \"line\":\n el = 1\n elif element_type == \"tri\":\n if p == 1:\n el = 2\n bel = 1\n elif p == 2:\n el = 9\n bel = 8\n elif p == 3:\n el = 21\n bel = 26\n elif p == 4:\n el = 23\n bel = 27\n elif element_type == \"quad\":\n if p == 1:\n el = 3\n bel = 1\n elif p == 2:\n el = 10\n bel = 8\n elif p == 3:\n el = 36\n bel = 26\n elif p == 4:\n el = 37\n bel = 27\n elif element_type == \"tet\":\n if p == 1:\n el = 4\n bel = 2\n elif p == 2:\n el = 11\n bel = 9\n elif element_type == \"hex\":\n if p == 1:\n el = 5\n bel = 3\n else:\n el = 12\n bel = 10\n else:\n raise ValueError(\"Element type not understood\")\n\n\n elements = np.copy(mesh.elements).astype(np.int64)\n points = mesh.points[np.unique(elements),:]\n\n # TRI6\n if el == 9:\n elements = elements[:,[0, 1, 2, 3, 5, 4]]\n # TRI10\n elif el == 21:\n elements = elements[:,[0, 1, 2, 3, 4, 7, 9, 8, 5, 6]]\n # TRI15\n elif el == 23:\n elements = elements[:,[0, 1, 2, 3, 4, 5, 9, 12, 14, 13, 10, 6, 7, 8, 11]]\n # QUAD9\n elif el == 10:\n elements = elements[:,[0, 1, 2, 3, 4, 7, 8, 5, 6]]\n # QUAD16\n elif el == 36:\n elements = elements[:,[0, 1, 2, 3, 4, 5, 9, 13, 15, 14, 10, 6, 7, 8, 12, 11]]\n # QUAD25\n elif el == 37:\n elements = elements[:,[0, 1, 2, 3, 4, 5, 6, 11, 16, 21, 24, 23, 22, 17, 12, 7, 8, 9, 10, 15, 20, 19, 18, 13, 14]]\n # TET10\n elif el == 11:\n # Tet 2\n elements = elements[:,[0, 1, 2, 3, 4, 6, 5, 7, 9, 8]]\n # HEX27\n elif el == 12:\n elements = elements[:,[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 13, 11, 14, 12, 15, 16, 22, 23, 25, 26, 10, 17, 18, 20, 21, 24, 19]]\n\n # Take care of a corner case where nnode != points.shape[0]\n if mesh.nnode != points.shape[0]:\n mesh.nnode = points.shape[0]\n\n if points.shape[1] == 2:\n points = np.hstack((points,np.zeros((points.shape[0],1))))\n\n points_repr = np.zeros((points.shape[0],points.shape[1]+1), dtype=object)\n points_repr[:,0] = np.arange(mesh.nnode) + 1\n points_repr[:,1:] = points\n\n elements_repr = np.zeros((elements.shape[0],elements.shape[1]+5), dtype=object)\n elements_repr[:,0] = np.arange(mesh.nelem) + 1\n elements_repr[:,1] = el\n elements_repr[:,2] = 2\n elements_repr[:,3] = 0\n elements_repr[:,4] = 1\n elements_repr[:,5:] = elements + 1\n\n if write_surface_info:\n\n if edim == 3:\n boundary = np.copy(mesh.faces).astype(np.int64)\n elif edim == 2:\n boundary = np.copy(mesh.edges).astype(np.int64)\n\n boundary_repr = np.zeros((boundary.shape[0],boundary.shape[1]+5), dtype=object)\n boundary_repr[:,0] = np.arange(boundary.shape[0]) + 1\n boundary_repr[:,1] = bel\n boundary_repr[:,2] = 2\n boundary_repr[:,3] = 0\n boundary_repr[:,4] = 1\n boundary_repr[:,5:] = boundary + 1\n\n elements_repr[:,0] += boundary.shape[0]\n\n gmsh_nelem = mesh.nelem + boundary.shape[0]\n else:\n gmsh_nelem = mesh.nelem\n\n with open(filename, 'w') as f:\n f.write(\"$MeshFormat\\n\")\n f.write(\"2.2 0 8\\n\")\n f.write(\"$EndMeshFormat\\n\")\n f.write(\"$Nodes\\n\")\n f.write(str(mesh.nnode) + \"\\n\")\n\n np.savetxt(f, points_repr, fmt=\"%s\")\n\n f.write(\"$EndNodes\\n\")\n f.write(\"$Elements\\n\")\n f.write(str(gmsh_nelem) + \"\\n\")\n\n if write_surface_info:\n np.savetxt(f, boundary_repr, fmt=\"%s\")\n\n np.savetxt(f, elements_repr, fmt=\"%s\")\n\n f.write(\"$EndElements\\n\")",
"def rescale_and_translate_mesh(data, meshname,\n mesh_new_origin=np.array([0.,0.,0.]),\n mesh_new_size=np.array([1.,1.,1.])):\n # get mesh nodes as hdf5 node (directly in memory data)\n # and rescale mesh\n nodes = data.get_mesh_nodes(meshname, as_numpy=False)\n # Translate mesh to origin 0,0,0\n nodes[:,0] = nodes[:,0] - nodes[:,0].min()\n nodes[:,1] = nodes[:,1] - nodes[:,1].min()\n if nodes.shape[1] == 3:\n nodes[:,2] = nodes[:,2] - nodes[:,2].min()\n # Get mesh new X,Y and Z sizes\n X_new_size = mesh_new_size[0]\n Y_new_size = mesh_new_size[1]\n if nodes.shape[1] == 3:\n Z_new_size = mesh_new_size[2]\n # Compute old mesh corrdinate range for each dimension\n X_size = nodes[:,0].max() - nodes[:,0].min()\n Y_size = nodes[:,1].max() - nodes[:,1].min()\n if nodes.shape[1] == 3:\n Z_size = nodes[:,2].max() - nodes[:,2].min()\n # Dilate mesh along each dimension to new mesh size\n nodes[:,0] = (nodes[:,0]/X_size)*X_new_size\n nodes[:,1] = (nodes[:,1]/Y_size)*Y_new_size\n if nodes.shape[1] == 3:\n nodes[:,2] = (nodes[:,2]/Z_size)*Z_new_size\n # Translate mesh\n nodes[:,0] = nodes[:,0] + mesh_new_origin[0]\n nodes[:,1] = nodes[:,1] + mesh_new_origin[1]\n if nodes.shape[1] == 3:\n nodes[:,2] = nodes[:,2] + mesh_new_origin[2]\n # flush new nodes in memory\n nodes.flush()\n return",
"def WriteMesh(fileW, mesh, exportMatrix, materials, labels, isCollision = False):\n from . import fileWriter, enums\n global DO\n\n debug(\" Writing BASIC:\", mesh.name)\n start = fileW.tell()\n\n # the verts and normals in pairs and a list that translates between original id and distinct id\n distVertNrm = VertNrmPairs(mesh.vertices, exportMatrix) \n\n #creating a bounding box and updating it while writing vertices\n bounds = BoundingBox()\n\n #writing vertices\n verticesAddress = fileW.tell()\n for v in distVertNrm:\n v[0].write(fileW)\n bounds.checkUpdate(v[0])\n\n bounds.calcCenter()\n bounds.calcRadius(mesh.vertices)\n\n #writing normals\n normalsAddress = fileW.tell()\n for v in distVertNrm:\n v[1].write(fileW)\n\n # creating the loops (as an index list)\n\n if isCollision:\n polyVs = PolyVert.collisionFromLoops(mesh)\n else:\n polyVs = PolyVert.fromLoops(mesh)\n\n # making them strips, each set is for one mesh set\n materialLength = len(materials)\n if materialLength < 2:\n polyT = list()\n for p in polyVs:\n polyT.extend(p)\n polyVs = [polyT]\n\n\n polyStrips = PolyVert.toStrips(polyVs)\n\n if DO:\n for i,s in enumerate(polyStrips):\n if s is not None:\n if s[0] == enums.PolyType.Strips:\n print(\" strips\", str(i)+\":\", len(s) - 1)\n else:\n print(\" tris\", str(i)+\":\", (len(s) - 1) / 3)\n\n #writing the mesh data and getting the mesh sets\n meshSets = list() #[None] * len(polyStrips)\n\n if materialLength == 0 or isCollision:\n for i, p in enumerate(polyStrips):\n if p == None:\n continue\n meshSets.append(PolyVert.write(fileW, mesh, 0, i, p, isCollision))\n else:\n for i, p in enumerate(polyStrips):\n if p == None:\n continue\n matID = 0\n try:\n for mid, m in enumerate(materials):\n if m.name == mesh.materials[i].name:\n matID = mid\n break\n except ValueError:\n debug(\" material\", mesh.materials[i].name, \"not found\")\n meshSets.append(PolyVert.write(fileW, mesh, matID, i, p))\n\n # writing the mesh sets\n meshSetAddress = fileW.tell()\n\n for m in meshSets:\n m.write(fileW, labels)\n\n #adding mesh address to the labels\n labels[\"bsc_\" + mesh.name] = fileW.tell()\n #labels[mesh.name] = fileW.tell()\n\n #writing addresses\n\n labels[\"bsc_\" + mesh.name + \"_v\"] = verticesAddress\n fileW.wUInt(verticesAddress)\n labels[\"bsc_\" + mesh.name + \"_nrm\"] = normalsAddress\n fileW.wUInt(normalsAddress)\n fileW.wUInt(len(distVertNrm))\n labels[\"bsc_\" + mesh.name + \"_ml\"] = meshSetAddress # ml = \"mesh list\"\n fileW.wUInt(meshSetAddress)\n fileW.wUInt(0x00000010) # material address is always the same (at least the way this addon exports the format)\n fileW.wUShort(len(meshSets))\n fileW.wUShort(materialLength) # material count\n bounds.write(fileW)\n fileW.wUInt(0) #sa1 gap\n\n if DO:\n print(\" vert addr:\", '{:08x}'.format(verticesAddress))\n print(\" nrm addr:\", '{:08x}'.format(normalsAddress))\n print(\" vertices:\", len(distVertNrm))\n print(\" set addr:\", '{:08x}'.format(meshSetAddress))\n print(\" sets:\", len(meshSets))\n print(\" mats:\", materialLength)\n print(\" BASIC length:\", (fileW.tell() - start))\n print(\"----- \\n\")\n\n fileW.align(4)",
"def set_mesh(\n self,\n edge_pt_min: int = 15,\n edge_pt_max=20,\n mesh_elements: str = \"Quad\",\n global_mesh_size: float = 0.25,\n max_surf_offset: float = 0.01,\n max_dihedral_angle: float = 15,\n ):\n if self.comm.rank == 0:\n self._aim.input.Edge_Point_Min = edge_pt_min\n self._aim.input.Edge_Point_Max = edge_pt_max\n self._aim.input.Mesh_Elements = mesh_elements\n self._aim.input.Tess_Params = [\n global_mesh_size,\n max_surf_offset,\n max_dihedral_angle,\n ]\n self._is_setup = True\n return self",
"def compute_mesh_area_numpy(mesh):\n pass",
"def _update_max_value(k, mi, by_gene):\n # Update the max mutual info.\n if mi is not None:\n by_gene[k] = max(by_gene.get(k, 0), mi)",
"def callback(mesh):\n shrunk = mesh.shrink(0.9)\n mesh.overwrite(shrunk) # must operate \"in-place\" by overwrite",
"def subdivision(mesh):\n\t\n\t\n\t# 1. generate new nodes in the centre of quad\n\t# 1/4 o-------o 1/4 o: existing vertices\n\t# | | *: newly-generated vertices\n\t# | * |\n\t# | |\n\t# 1/4 o-------o 1/4\n\n\tnew_coor = mesh.give_nodes().give_coor()\n\t\n\tfor face_index in range(mesh.give_model_inf()[2]): \n\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\tfor vertex_index in range(4):\n\t\t\tmesh.give_faces()\n\t\t\tnode_index = mesh.give_faces().give_node_list(face_index)[vertex_index]\n\n\t\t\tnew_x += 0.25*mesh.give_nodes().give_coor(node_index)[0]\n\t\t\tnew_y += 0.25*mesh.give_nodes().give_coor(node_index)[1]\n\t\t\tnew_z += 0.25*mesh.give_nodes().give_coor(node_index)[2]\n\t\t\t\n\t\tnew_coor.append((new_x, new_y, new_z))\n\t\t\n\t# generating new nodes on the edge\n\t# figure out one edge is shared by how many surfaces\n\tedge_shared_by_faces_list = helper.find_edge_shared_by_which_faces(mesh.give_edges(), mesh.give_faces())\n\t\n\tfor edge_index in range(mesh.give_model_inf()[1]):\n\n\t\tnew_x, new_y, new_z = (0., 0., 0.)\n\t\t\n\t# 2. generate new node on boundary edge\n\t# o: existing vertices\n\t# 1/2 o---*---o 1/2 *: newly-generated vertices\n\t# \n\n\t\tnew_coor = mesh.give_nodes().give_coor()\n\t\tif len(edge_shared_by_faces_list[edge_index]) == 1:\t\n\t\t\tnew_x, new_y, new_z = (0., 0., 0.)\n\t\t\tfor vertex_index in range(2):\n\t\t\t\tthis_node = mesh.give_edges().give_node(edge_index)[vertex_index]\n\t\t\t\tnew_x += 0.5*mesh.give_nodes().give_coor()[this_node][0]\n\t\t\t\tnew_y += 0.5*mesh.give_nodes().give_coor()[this_node][1]\n\t\t\t\tnew_z += 0.5*mesh.give_nodes().give_coor()[this_node][2]\n\t\t\t\t\n\t\t\tnew_coor.append((new_x, new_y, new_z))\n\t\t\t\t\n\t# 3. generate new node on interior edge\n\t# 1/16 o-------o 1/16 o: existing vertices\n\t# | | *: newly-generated vertices\n\t# 3/8 o---*---o 3/8\n\t# | |\n\t# 1/16 o-------o 1/16\n\n\t\telse:\n\t\t\tnew_x, new_y, new_z = (0., 0., 0.)\n\t\t\tconsidered_node = []\n\t\t\tfor vertex_index in range(2):\n\t\t\t\tthis_node = mesh.give_edges().give_node(edge_index)[vertex_index]\n\t\t\t\tconsidered_node.append(this_node)\n\t\t\t\tnew_x += 3./8.*mesh.give_nodes().give_coor()[this_node][0]\n\t\t\t\tnew_y += 3./8.*mesh.give_nodes().give_coor()[this_node][1]\n\t\t\t\tnew_z += 3./8.*mesh.give_nodes().give_coor()[this_node][2]\n\t\t\t\n\t\t\t# faces contain this node\n\t\t\tpotential_node = []\n\t\t\tfor face_index in edge_shared_by_faces_list[edge_index]:\t\t\n\t\t\t\tfor vertex_index in range(4):\n\t\t\t\t\t\tpotential_node.append(mesh.give_faces().give_node_list(face_index)[vertex_index])\n\t\t\t\n\t\t\touter_node = []\n\t\t\tfor node in potential_node:\n\t\t\t\tif (node not in considered_node) & (node not in outer_node):\n\t\t\t\t\touter_node.append(node)\n\t\t\t\t\t\n\t\t\tfor vertex_index in outer_node:\n\t\t\t\tnew_x += 1./16.*mesh.give_nodes().give_coor()[vertex_index][0]\n\t\t\t\tnew_y += 1./16.*mesh.give_nodes().give_coor()[vertex_index][1]\n\t\t\t\tnew_z += 1./16.*mesh.give_nodes().give_coor()[vertex_index][2]\n\t\t\t\n\t\t\tnew_coor.append((new_x, new_y, new_z))\n\n\t# update the links of edges and surfaces\n\tnew_edge_list = []\n\tnew_face_list = []\n\tfor face_index in range(mesh.give_model_inf()[2]):\n\t\told_node0 = mesh.give_faces().give_node_list(face_index)[0]\n\t\told_node1 = mesh.give_faces().give_node_list(face_index)[1]\n\t\told_node2 = mesh.give_faces().give_node_list(face_index)[2]\n\t\told_node3 = mesh.give_faces().give_node_list(face_index)[3]\n\t\t\n\t\told_edge0 = mesh.give_faces().give_edge_list(face_index)[0]\n\t\told_edge1 = mesh.give_faces().give_edge_list(face_index)[1]\n\t\told_edge2 = mesh.give_faces().give_edge_list(face_index)[2]\n\t\told_edge3 = mesh.give_faces().give_edge_list(face_index)[3]\n\t\t\n\t\tnew_node4 = old_edge0 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2] \n\t\tnew_node5 = old_edge1 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2]\n\t\tnew_node6 = old_edge2 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2]\n\t\tnew_node7 = old_edge3 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2]\t\n\t\tnew_node8 = mesh.give_model_inf()[0] + face_index\n\t\t\n\t\tif helper.in_list((old_node0, new_node4), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node0, new_node4))\n\t\tif helper.in_list((new_node4, new_node8), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node4, new_node8))\n\t\tif helper.in_list((new_node8, new_node7), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node8, new_node7))\n\t\tif helper.in_list((new_node7, old_node0), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node7, old_node0))\n\t\tif helper.in_list((new_node4, old_node1), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node4, old_node1))\n\t\tif helper.in_list((old_node1, new_node5), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node1, new_node5))\n\t\tif helper.in_list((new_node5, new_node8), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node5, new_node8))\n\t\tif helper.in_list((new_node7, old_node3), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node7, old_node3))\n\t\tif helper.in_list((old_node3, new_node6), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node3, new_node6))\n\t\tif helper.in_list((new_node6, new_node8), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node6, new_node8))\n\t\tif helper.in_list((new_node6, old_node2), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node6, old_node2))\n\t\tif helper.in_list((old_node2, new_node5), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node2, new_node5))\n\t\n\t\tnew_face_list.append((old_node0, new_node4, new_node8, new_node7))\n\t\tnew_face_list.append((new_node4, old_node1, new_node5, new_node8))\n\t\tnew_face_list.append((new_node7, new_node8, new_node6, old_node3))\n\t\tnew_face_list.append((new_node8, new_node5, old_node2, new_node6))\n\t\t\n\tnew_edges = geo.Edge(new_edge_list)\n\t\n\tnew_faces = geo.Face(new_face_list, new_edges)\n\t\t\n\t# update existing nodes\t\n\tfor node_index in range(mesh.give_model_inf()[0]):\n\t\t\n\t\tring1, ring2 = helper.find_neighbour_node(new_edges, new_faces, node_index)\n\t\tvalence = helper.find_valence(node_index, new_faces) \n\t\t#: valence: the number of faces sharing on specific edge\n\n\t# 4. update existing corner vertex\n\t# 2/4 @---* 1/4 *: newly-generated vertices\n\t# | | @: existing vertices to be updated\n\t# 1/4 *---* 0 The higher mask values on neighbouring vertices, \n\t# the more likely a square mesh will be refined into a sphere.\n\t \n\t\tif valence == 1:\n\n\t\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\t\tprint\n\t\t\tfor node_in_ring1 in ring1:\n\t\t\t\tnew_x += 1./4.*mesh.give_nodes().give_coor()[node_in_ring1][0]\n\t\t\t\tnew_y += 1./4.*mesh.give_nodes().give_coor()[node_in_ring1][1]\n\t\t\t\tnew_z += 1./4.*mesh.give_nodes().give_coor()[node_in_ring1][2]\n\n\t\t\tfor node_in_ring2 in ring2:\n\t\t\t\tnew_x += 0.*mesh.give_nodes().give_coor()[node_in_ring2][0]\n\t\t\t\tnew_y += 0.*mesh.give_nodes().give_coor()[node_in_ring2][1]\n\t\t\t\tnew_z += 0.*mesh.give_nodes().give_coor()[node_in_ring2][2]\n\t\t\t\t\n\t\t\tnew_x += 2./4.*mesh.give_nodes().give_coor()[node_index][0]\n\t\t\tnew_y += 2./4.*mesh.give_nodes().give_coor()[node_index][1]\n\t\t\tnew_z += 2./4.*mesh.give_nodes().give_coor()[node_index][2]\n\n\t# 5. update existing boundary joint vertex\n\t# 3/4\n\t# 1/8 *---*---* 1/8 *: newly-generated vertices\n\t# | | | @: existing vertices to be updated\n\t# 0 *---*---* 0\n\n\t\telif valence == 2:\n\t\t\t\n\t\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\t\tfor node_in_ring1 in ring1:\n\t\t\t\tif helper.find_valence(node_in_ring1, new_faces) <= 2: \n\t\t\t\t\tnew_x += 1./8.*mesh.give_nodes().give_coor()[node_in_ring1][0]\n\t\t\t\t\tnew_y += 1./8.*mesh.give_nodes().give_coor()[node_in_ring1][1]\n\t\t\t\t\tnew_z += 1./8.*mesh.give_nodes().give_coor()[node_in_ring1][2]\n\t\t\t\t\t\n\t\t\tnew_x += 3./4.*mesh.give_nodes().give_coor()[node_index][0]\n\t\t\tnew_y += 3./4.*mesh.give_nodes().give_coor()[node_index][1]\n\t\t\tnew_z += 3./4.*mesh.give_nodes().give_coor()[node_index][2]\n\t\n\t# 6. update new node on interior edge\n\t# * r/k\n\t# /\\ b/k*\n\t# *__/ \\___ r/k\n\t# \\ \\ /¬¬/ *: newly-generated vertices: \n\t# \\ \\/ / b = 3/2/valence, r = 1/4/valence\n\t# *--@--* b/k\t @: existing vertices to be updated: 1-b-r\t\t\n\t# / /\\ \\\n\t# /__/ \\__\\\n\t# * \\ / * r/k\n\t# \\/\n\t\t\n\t\telse:\n\t\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\t\tbeta = 3./2./valence\n\t\t\tgamma = 1./4./valence\n\t\t\tfor node_in_ring1 in ring1:\n\t\t\t\tnew_x += beta/valence*mesh.give_nodes().give_coor()[node_in_ring1][0]\n\t\t\t\tnew_y += beta/valence*mesh.give_nodes().give_coor()[node_in_ring1][1]\n\t\t\t\tnew_z += beta/valence*mesh.give_nodes().give_coor()[node_in_ring1][2]\n\t\t\t\n\t\t\tfor node_in_ring2 in ring2:\n\t\t\t\tnew_x += gamma/valence*mesh.give_nodes().give_coor()[node_in_ring2][0]\n\t\t\t\tnew_y += gamma/valence*mesh.give_nodes().give_coor()[node_in_ring2][1]\n\t\t\t\tnew_z += gamma/valence*mesh.give_nodes().give_coor()[node_in_ring2][2]\n\t\t\t\n\t\t\tnew_x += (1. - beta - gamma)*mesh.give_nodes().give_coor()[node_index][0]\n\t\t\tnew_y += (1. - beta - gamma)*mesh.give_nodes().give_coor()[node_index][1]\n\t\t\tnew_z += (1. - beta - gamma)*mesh.give_nodes().give_coor()[node_index][2]\n\t\t\n\t\tnew_coor[node_index] = (new_x, new_y, new_z)\n\t\n\tnew_nodes = geo.Node(new_coor)\n\t\n\tmesh.update(new_nodes, new_edges, new_faces)\n\t\n\t# return new_mesh\n\treturn mesh",
"def update_mesh(self, all_meshes):\n if isinstance(all_meshes, Mesh):\n mesh_tp = []\n mesh_tp.append(all_meshes)\n all_meshes = mesh_tp\n\n for i, mesh in enumerate(all_meshes):\n if mesh.time.size != 1:\n raise IndexError(\"Mesh should be from one frame only\")\n\n if len(self.all_meshes) <= i or mesh.channel.size != self.all_meshes[i].channel.size:\n self.new_mesh_set(all_meshes)\n return # Prevent calling update_markers recursively\n\n if not isinstance(all_meshes, list):\n raise TypeError(\"Please send a list of mesh to update_mesh\")\n\n self.all_meshes = all_meshes\n\n for i, mesh in enumerate(self.all_meshes):\n points = vtkPoints()\n n_vertex = mesh.channel.size\n mesh = np.array(mesh)\n for j in range(n_vertex):\n points.InsertNextPoint(mesh[0:3, j])\n\n poly_line = self.mesh_actors[i].GetMapper().GetInput()\n poly_line.SetPoints(points)\n self.mesh_actors[i].GetProperty().SetLineWidth(self.mesh_linewidth)",
"def val_mul(self, a):\n f = self.to_Poly()\n return f.val_mul(a).to_PolyMesh(self.params)",
"def moveMesh(self, name, **args):\n \n randName = 'moveMesh' + str(np.random.randint(10**5,10**8))\n\n args = dictToTuple(**args)\n\n self.lmp.command('fix {} all move/mesh mesh {} '.format(randName, name) + ('{} ' * len(args)).format(*args))\n\n return randName",
"def mesh_uniform(N_e, d, Omega):",
"def update_mesh(self, all_meshes):\n if isinstance(all_meshes, Mesh):\n mesh_tp = MeshCollection()\n mesh_tp.append(all_meshes)\n all_meshes = mesh_tp\n\n if all_meshes.get_num_frames() is not 1:\n raise IndexError(\"Mesh should be from one frame only\")\n\n for i in range(len(all_meshes)):\n if all_meshes.get_mesh(i).get_num_vertex() is not self.all_meshes.get_mesh(i).get_num_vertex():\n self.new_mesh_set(all_meshes)\n return # Prevent calling update_markers recursively\n\n if not isinstance(all_meshes, MeshCollection):\n raise TypeError(\"Please send a list of mesh to update_mesh\")\n\n self.all_meshes = all_meshes\n\n for (i, mesh) in enumerate(self.all_meshes):\n points = vtkPoints()\n for j in range(mesh.get_num_vertex()):\n points.InsertNextPoint(mesh[0:3, j])\n\n poly_line = self.mesh_actors[i].GetMapper().GetInput()\n poly_line.SetPoints(points)"
] | [
"0.5798947",
"0.5716331",
"0.55502725",
"0.55502725",
"0.55502725",
"0.5410889",
"0.53264076",
"0.52080476",
"0.5147973",
"0.51004106",
"0.5062747",
"0.50544333",
"0.49495655",
"0.49182805",
"0.49177897",
"0.49008822",
"0.4881483",
"0.4853714",
"0.48349267",
"0.4832497",
"0.48222816",
"0.47884676",
"0.47745243",
"0.47680742",
"0.47673613",
"0.4758129",
"0.47358727",
"0.47103202",
"0.4705399",
"0.4693797"
] | 0.5895573 | 0 |
Get a list of single body meshes to test identifiers on. | def get_meshes(path='../../../models', cutoff=None):
bodies = collections.deque()
for file_name in os.listdir(path):
try:
mesh = trimesh.load(os.path.join(path, file_name))
split = mesh.split()
bodies.extend(split)
if len(split) > 1:
bodies.append(mesh)
except BaseException:
continue
if cutoff is not None and len(bodies) > cutoff:
return np.array(bodies)
for _i in range(100):
cylinder = trimesh.creation.cylinder(
radius=np.random.random() * 100,
height=np.random.random() * 1000,
sections=int(np.clip(np.random.random() * 720,
20,
720)))
capsule = trimesh.creation.capsule(
radius=np.random.random() * 100,
height=np.random.random() * 1000,
count=np.clip(np.random.random(2) * 720,
20,
720).astype(int))
bodies.append(cylinder)
bodies.append(capsule)
for _i in range(10):
bodies.append(trimesh.creation.random_soup(
int(np.clip(np.random.random() * 1000,
20,
1000))))
bodies.append(trimesh.creation.icosphere())
bodies.append(trimesh.creation.uv_sphere())
bodies.append(trimesh.creation.icosahedron())
return np.array(bodies) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_mesh_ids(self, body):\n with self.lock:\n return self.send_command('get_kinbody_link_mesh_ids ' + body.GetName())",
"def collectionMeshes(collection):\n return [o for o in collection.all_objects if o.type == 'MESH']",
"def filter_legislative_bodies(self):\n return self.filter_nodes('//LegislativeBody[@id]')",
"def get_body_infos():\n return [get_body_info(i) for i in get_body_ids()]",
"def identify_bodies(self, indexes: np.ndarray) -> np.ndarray:\n return np.append(indexes, [indexes]*(self.num_particles-1))",
"def get_body_names():\n return [bi.body_name for bi in get_body_infos()]",
"def get_all_lod(namestr):\n meshes = []\n for me in bpy.data.meshes:\n if \"|q\" in me.name and namestr in me.name:\n meshes.append(me)\n return meshes",
"def getMeshes():\n nodes = pipernode.get('piperSkinnedMesh')\n return {mesh.getParent() for skin in nodes for mesh in skin.getChildren(ad=True, type='mesh') if mesh.getParent()}",
"def get_meshes(scene):\r\n # all the instances we want to duplicate and change the source\r\n instances = []\r\n # the mesh we want to use as the new source\r\n replacement_mesh = None\r\n # the original mesh\r\n original_mesh = None\r\n\r\n for item in scene.selected:\r\n if item.isAnInstance:\r\n instances.append(item)\r\n original_mesh = item.itemGraph(\"meshInst\").connectedItems[\"Reverse\"][0]\r\n else:\r\n replacement_mesh = item\r\n return [instances, replacement_mesh, original_mesh]",
"def _get_child_meshes(obj):\n\tif obj.type == 'MESH':\n\t\treturn [obj], []\n\telse:\n\t\tmeshes, other = [], [obj]\n\t\tfor child in obj.children:\n\t\t\tchild_meshes, child_other = _get_child_meshes(child)\n\t\t\tmeshes += child_meshes\n\t\t\tother += child_other\n\n\t\treturn meshes, other",
"def get_bodyparts(project_dir):\n print(f\"\\n\\n\\nLoading data\")\n df_paths = sorted(glob.glob(os.path.join(project_dir, '*.h5')))\n points_2d_df = utils.create_dlc_points_2d_file(df_paths)\n arr = points_2d_df[points_2d_df[\"frame\"]==0][[\"marker\"]][points_2d_df[\"camera\"]==0].values\n final_arr = arr.flatten().tolist()\n return(final_arr)",
"def body_contacts(self, physics):\n return self.collect_contacts(physics, self._body_geom_ids)",
"def get_bodies(number=5, excluded_params=None):\n bodies = [positive_body]\n for _ in range(number):\n bodies.append(create_body(excluded_params))\n return bodies",
"def search_all_id():\n\n # Quering with MPRester to obtain corresponding material id, reduced formula and space group.\n with MPR(get_api_key()) as m:\n\n # Criteria is set as greater than -1 to search materials, which is unphysical in real world, therefore in effect\n # search all available materials.\n llist = m.query(criteria={'band_gap': {'$gt': -1}}, properties=['material_id', 'pretty_formula','spacegroup'],\n # Chunk size is set as 0 for no chunking.\n chunk_size=0)\n\n # Generate an empty dictionary for further sampling.\n space_group_dict = dict()\n\n # To loop all items in space_group_dict and to label by their space groups.\n for index in llist:\n\n # To find space group in Hall Symbol.\n space_group = index['spacegroup']['hall']\n\n # If the item space group is not in space_group_dict, then add it and create an empty value list for further\n # storing.\n if space_group not in space_group_dict:\n space_group_dict[space_group] = []\n\n # To append the material id (str) to the list(value) of corresponding space group (key).\n space_group_dict[space_group].append(index['material_id'])\n\n # To sample the space_group_dict by random_sample.\n final_list = random_sample(space_group_dict, 0.01)\n\n return final_list",
"def get_materials_from_blender_objects(blender_objects):\n materials = set()\n meshes = {ob.data for ob in blender_objects if ob.type == 'MESH'}\n for ob in meshes:\n if not ob.materials:\n continue\n materials.add(ob.materials[0])\n return sorted(materials, key=lambda m: m.name)",
"def getFromComponent(self, component):\n return ParaMeshBodies()",
"def get_selected_objects (context):\n return [obj for obj in context.selected_objects if obj.type == 'MESH']",
"def new_mesh_set(self, all_meshes):\n if isinstance(all_meshes, Mesh):\n mesh_tp = MeshCollection()\n mesh_tp.append(all_meshes)\n all_meshes = mesh_tp\n\n if all_meshes.get_num_frames() is not 1:\n raise IndexError(\"Mesh should be from one frame only\")\n\n if not isinstance(all_meshes, MeshCollection):\n raise TypeError(\"Please send a list of mesh to update_mesh\")\n self.all_meshes = all_meshes\n\n # Remove previous actors from the scene\n for actor in self.mesh_actors:\n self.parent_window.ren.RemoveActor(actor)\n self.mesh_actors = list()\n\n # Create the geometry of a point (the coordinate) points = vtkPoints()\n for (i, mesh) in enumerate(self.all_meshes):\n points = vtkPoints()\n for j in range(mesh.get_num_vertex()):\n points.InsertNextPoint([0, 0, 0])\n\n # Create an array for each triangle\n cell = vtkCellArray()\n for j in range(mesh.get_num_triangles()): # For each triangle\n line = vtkPolyLine()\n line.GetPointIds().SetNumberOfIds(4)\n for k in range(len(mesh.triangles[j])): # For each index\n line.GetPointIds().SetId(k, mesh.triangles[j, k])\n line.GetPointIds().SetId(3, mesh.triangles[j, 0]) # Close the triangle\n cell.InsertNextCell(line)\n poly_line = vtkPolyData()\n poly_line.SetPoints(points)\n poly_line.SetLines(cell)\n\n # Create a mapper\n mapper = vtkPolyDataMapper()\n mapper.SetInputData(poly_line)\n\n # Create an actor\n self.mesh_actors.append(vtkActor())\n self.mesh_actors[i].SetMapper(mapper)\n\n self.parent_window.ren.AddActor(self.mesh_actors[i])\n self.parent_window.ren.ResetCamera()\n\n # Update marker position\n self.update_mesh(self.all_meshes)",
"def load_meshes(self):\n for meta_mesh in self.gltf.meshes:\n # Returns a list of meshes\n meshes = meta_mesh.load(self.materials)\n self.meshes.append(meshes)\n\n for mesh in meshes:\n self.scene.meshes.append(mesh)",
"def create_meeples(self, playerid):\n\t\tmeeples = []\n\t\tfor meepleid in range(4):\n\t\t\tmeeples.append(S_Meeple(playerid,\n\t\t\t\t\t\t\t\t\tmeepleid,\n\t\t\t\t\t\t\t\t\tMEEPLESIZE))\n\t\tprint(\"meeples von spieler {} gebaut!\".format(playerid))\n\t\treturn meeples",
"def new_mesh_set(self, all_meshes):\n if isinstance(all_meshes, Mesh):\n mesh_tp = []\n mesh_tp.append(all_meshes)\n all_meshes = mesh_tp\n\n if not isinstance(all_meshes, list):\n raise TypeError(\"Please send a list of mesh to update_mesh\")\n self.all_meshes = all_meshes\n\n # Remove previous actors from the scene\n for actor in self.mesh_actors:\n self.parent_window.ren.RemoveActor(actor)\n self.mesh_actors = list()\n\n # Create the geometry of a point (the coordinate) points = vtkPoints()\n for i, mesh in enumerate(self.all_meshes):\n if mesh.time.size != 1:\n raise IndexError(\"Mesh should be from one frame only\")\n\n points = vtkPoints()\n for j in range(mesh.channel.size):\n # points.InsertNextPoint([0, 0, 0])\n points.InsertNextPoint(mesh.data[:3, j, 0].tolist())\n\n # Create an array for each triangle\n draw_patch = not mesh.automatic_triangles and not self.force_wireframe\n if draw_patch:\n poly_type = vtkPolygon\n n_ids = 3\n color = self.patch_color[i]\n else:\n poly_type = vtkPolyLine\n n_ids = 4\n color = self.mesh_color\n cells = vtkCellArray()\n\n # Create the polygons\n for j in range(mesh.triangles.shape[1]):\n poly = poly_type()\n poly.GetPointIds().SetNumberOfIds(n_ids) # make a tri\n for k in range(len(mesh.triangles[:, j])):\n poly.GetPointIds().SetId(k, mesh.triangles[k, j])\n if not draw_patch:\n poly.GetPointIds().SetId(3, mesh.triangles[0, j]) # Close the triangle\n cells.InsertNextCell(poly)\n\n poly_data = vtkPolyData()\n poly_data.SetPoints(points)\n if draw_patch:\n poly_data.SetPolys(cells)\n else:\n poly_data.SetLines(cells)\n\n mapper = vtkPolyDataMapper()\n mapper.SetInputData(poly_data)\n\n # Create an actor\n self.mesh_actors.append(vtkActor())\n self.mesh_actors[i].SetMapper(mapper)\n self.mesh_actors[i].GetProperty().SetColor(color)\n self.mesh_actors[i].GetProperty().SetOpacity(self.mesh_opacity)\n\n self.parent_window.ren.AddActor(self.mesh_actors[i])\n\n # Update marker position\n self.update_mesh(self.all_meshes)",
"def getIDs():",
"def item(self, index):\n return ParaMeshBody()",
"def item(self, index):\n return ParaMeshBody()",
"def info_materials_polymer_get():\n materials = _material_by_group(974) # 974 == intermediate group\n return materials, 200",
"def mesh_names(self):\n meshes = []\n for vname in self.nc.variables.keys():\n try:\n if self.nc.variables[vname].cf_role == 'mesh_topology':\n meshes.append(vname)\n except AttributeError:\n pass\n return meshes",
"def test_read_multiple(self):\n meshes = stlreader.get_data(self.stl_multi_file)\n for name, vertices, polygons in meshes:\n self.assertEqual(name, \"{}#{}\".format(os.path.basename(self.stl_multi_file), 0))\n self.assertTrue(len(vertices) > 0)\n self.assertTrue(len(polygons) > 0)\n polygon_ids = list()\n for a, b, c in polygons.itervalues():\n polygon_ids += [a, b, c]\n self.assertItemsEqual(set(vertices.keys()), set(polygon_ids))",
"def get_textures_from_blender_objects(blender_objects):\n textures = set()\n meshes = {ob.data for ob in blender_objects if ob.type == 'MESH'}\n for ob in meshes:\n if not ob.materials:\n continue\n for ts in ob.materials[0].texture_slots:\n if ts and ts.texture and ts.texture.image:\n textures.add(ts.texture)\n return sorted(textures, key=lambda t: t.name)",
"def detect_bodies(file, storage='photos/bodies', way=\"network\"):\n if not os.path.exists(storage):\n os.mkdir(storage)\n assert type(file) == str\n assert way in ['network', 'local']\n bodies = []\n cords = []\n if way == 'local':\n imgs, res = Detect_object.process(file)\n for k, v in res.items():\n if k == 1:\n for line in v:\n cords.append(line[:4].tolist())\n for img in imgs:\n name = str(time.time())+'.jpg'\n img.save(os.path.join(storage, name))\n bodies.append(name)\n elif way == 'network':\n res = get_body_attributes(file)\n num = res[\"person_num\"]\n info = res['person_info']\n img = Image.open(file).convert('RGB')\n for i in range(num):\n loc = info[i]['location']\n height = loc['height']\n width = loc['width']\n top = loc['top']\n left = loc['left']\n img_ = img.crop([left, top, left+width, top+height])\n name = str(time.time())+'.jpg'\n img_.save(os.path.join(storage, name))\n bodies.append(name)\n cords.append([left, top, left+width, top+height])\n return (bodies, cords)",
"def load_meshes_from(self, med_fname):\n from salome import lcc\n from SMESH import SMESH_Gen\n sstd = self.sstd\n ceng = lcc.FindOrLoadComponent(\"FactoryServer\", \"SMESH\")\n eng = ceng._narrow(SMESH_Gen)\n eng.SetCurrentStudy(sstd)\n cmeshes = eng.CreateMeshesFromMED(med_fname)[0]\n meshes = []\n for cmesh in cmeshes:\n meshes.append(self.attach_mesh_from(cmesh))\n return meshes"
] | [
"0.73483276",
"0.5918279",
"0.57549536",
"0.5689863",
"0.5679754",
"0.56532514",
"0.56160617",
"0.56097174",
"0.54366004",
"0.5387349",
"0.5338473",
"0.5329637",
"0.52505124",
"0.5202831",
"0.5134976",
"0.51091045",
"0.5063652",
"0.5014104",
"0.4995707",
"0.49625397",
"0.49101567",
"0.48983717",
"0.48943663",
"0.48943663",
"0.48751527",
"0.48645705",
"0.48564658",
"0.4844296",
"0.48318285",
"0.48149523"
] | 0.59325 | 1 |
Encrypt text with the given key. key > any string with length smaller than or equal to 16 text > the data to encrypt, as a string Returns an array of bytes. | def encrypt(key, text):
key = _key_array(key)
text = _text_array(text)
aes = mxit.aes.AES()
parts = _split(text, 16)
encoded = []
for part in parts:
encoded += aes.encrypt(part, key, aes.keySize["SIZE_128"])
return encoded | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def encode(text, key):\n encrypted = []\n for i in text:\n encrypted.append(key[i])\n return encrypted",
"def encrypt_key(data, key):\n data = MegaCrypto.base64_decode(data)\n return sum((MegaCrypto.str_to_a32(MegaCrypto.cbc_encrypt(data[_i:_i + 16], key))\n for _i in range(0, len(data), 16)), ())",
"def encrypt(text, key):\r\n\trail = [['\\n' for i in range(len(text))] for j in range(key)] \r\n\t\r\n\tdir_down = False\r\n\trow, col = 0, 0\r\n\t\r\n\tfor i in range(len(text)): \r\n\t\tif (row == 0) or (row == key - 1): \r\n\t\t\tdir_down = not dir_down \r\n\t\t\r\n\t\trail[row][col] = text[i] \r\n\t\tcol += 1\r\n\t\t\r\n\t\tif dir_down: \r\n\t\t\trow += 1\r\n\t\telse: \r\n\t\t\trow -= 1\r\n\t \r\n\tresult = [] \r\n\tfor i in range(key): \r\n\t\tfor j in range(len(text)): \r\n\t\t\tif rail[i][j] != '\\n': \r\n\t\t\t\tresult.append(rail[i][j]) \r\n\treturn(\"\" . join(result))",
"def encrypt(self, key, plaintext):\n output = []\n padded_key = padd_key(key, plaintext)\n for i in range(len(plaintext)):\n enc_ascii = (ord(plaintext[i]) + ord(padded_key[i])) % 256\n output.append(chr(enc_ascii))\n return ''.join(output)",
"def encrypt(plaintext: str, key: str) -> str:\n return \"\".join(chr(ord(p) ^ ord(k)) for (p, k) in zip(plaintext, key))",
"def encrypt(data, key):\n data = six.ensure_binary(data)\n data = privy.hide(secret=data, password=key)\n data = six.ensure_text(data)\n return data",
"def encrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n ciphertext = aes.encrypt(text)\r\n return ciphertext",
"def encrypt(\r\n key: bytes,\r\n plain_text: str,\r\n) -> bytes:\r\n block_size = 16\r\n plain_text = _pad(plain_text, block_size)\r\n iv = os.urandom(block_size)\r\n cipher = AES.new(key, AES.MODE_CBC, iv)\r\n cipher_text = cipher.encrypt(plain_text.encode())\r\n return iv + cipher_text",
"def encrypt(self, message, key):\n return self.translateMessage(message, key, \"encrypt\")",
"def encrypt(content, key):\n\ttry:\n\t\tfrom Cryptodome.Cipher import AES\n\t\tfrom Cryptodome import Random\n\texcept ImportError:\n\t\tfrom Crypto.Cipher import AES\n\t\tfrom Crypto import Random\n\n\tif not isPython2():\n\t\tif isString(content):\n\t\t\tcontent = content.encode(\"latin-1\")\n\t\tif isString(key):\n\t\t\tkey = key.encode(\"latin-1\")\n\n\tcontent = pad(content)\n\tiv = Random.new().read(AES.block_size)\n\tcipher = AES.new(key, AES.MODE_CBC, iv)\n\tresult = iv + cipher.encrypt(content)\n\treturn result",
"def encrypt(message, key):\n\tnumericRepresentation = []\n\tfor c in message:\n\t\tnumericRepresentation.append(ord(c) - 65)\n\n\tcipher = \"\"\n\tfor x in numericRepresentation:\n\t\tcipher += chr((x + key) % 26 + 65)\n\n\treturn cipher",
"def aes_encrypt(data, key):\r\n cipher = aes_cipher_from_key(key)\r\n padded_data = pad(data)\r\n return cipher.encrypt(padded_data)",
"def caesar_encode(self, text, key):\n result_list = []\n for char in text:\n if char.isalpha():\n if char.islower():\n offset = ASCII_LOWER_OFFSET\n else:\n offset = ASCII_UPPER_OFFSET\n char = chr((ord(char) - offset + key) % ALPHABET_SIZE + offset)\n result_list.append(char)\n return ''.join(result_list)",
"def encrypt(self, message, key):\n message = self.pkcs7_pad(message)\n iv = ''.join(chr(random.randint(0, 0xFF)) for i in range(AES.block_size))\n cipher = AES.new(key, AES.MODE_CBC, iv, segment_size=64)\n return iv + cipher.encrypt(message)",
"def encrypt_message(message: str, key: int = 17):\n\n alphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n shifted_alphabet = alphabet[key:] + alphabet[:key]\n encrypted_message = \"\"\n\n for i in message.upper():\n\n # Use encryption for letters only, keep the rest\n if i in alphabet:\n\n # Find index in alphabet\n for idx, l in enumerate(alphabet):\n if i == l:\n\n # Add letter in shifted alphabet\n # with this index to the message\n encrypted_message += shifted_alphabet[idx]\n else:\n encrypted_message += i\n\n return encrypted_message",
"def encode(key, plain):\n print(\"ciphertext: \", end=\"\")\n\n # used variables\n pos = 0\n key_len = len(key)\n\n # loop over every character in the text\n for char in plain:\n key_pos = pos % key_len\n # leave non-alphabetical characters alone\n if not char.isalpha():\n print(char, end=\"\")\n # cipher characters\n elif char.isupper():\n cipher = chr((char_to_number(char) + char_to_number(key[key_pos])) \\\n % 26 + ord(\"A\"))\n \n print(cipher, end=\"\")\n pos += 1\n else:\n cipher = chr((char_to_number(char) + char_to_number(key[key_pos])) \\\n % 26 + ord(\"a\"))\n \n print(cipher, end=\"\")\n pos += 1\n\n print()",
"def xorstr (key, msg):\n # join a list of chars into string where list is generated by \n # XORing each of msg bytes with each of the key bytes rotating.\n return ''.join([chr(ord(msg[i]) ^ ord(key[i % len(key)])) for i in range (0, len(msg))])",
"def xor_decrypt(ciphertext, key):\n\n\tdecrypted_char = ''\n\tdecrypted_str = ''\n\n\tfor char in ciphertext:\n\t\tdecrypted_char = chr(char ^ key)\n\t\tdecrypted_str += decrypted_char\n\n\treturn decrypted_str",
"def encrypt_and_encode(data, key):\r\n return base64.urlsafe_b64encode(aes_encrypt(data, key))",
"def encodeVigenere(self, key):\n\n key = key.upper().replace(\" \", \"\")\n encode = Vig(key)\n cipherText = encode.encode(self.planeText)\n \n if (self.verbose == 1):\n print(cipherText)\n \n return(cipherText)",
"def encode(key: str, clear: str) -> str:\n\n enc = []\n for i in range(len(clear)):\n key_c = key[i % len(key)]\n enc_c = chr((ord(clear[i]) + ord(key_c)) % 256)\n enc.append(enc_c)\n return base64.urlsafe_b64encode(\"\".join(enc).encode()).decode()",
"def xor_encode(data, key):\n if not data:\n return \"\"\n if not key:\n raise exceptions.EncryptError\n return binascii.hexlify(\n ''.join(chr(ord(a) ^ ord(b)) for a, b in zip(data, key)).encode(\"utf-8\")).decode(\"utf-8\")",
"def aes_cipher_from_key(key):\r\n return AES.new(key, AES.MODE_CBC, generate_aes_iv(key))",
"def xor(plaintext, key):\n # NOTE: this will return a string of length equal to the shorter of the two lengths\n \n # Iterate through the strings, creating a list of bytes\n arr = [chr(a ^ b) for (a,b) in zip(plaintext, key)]\n bstr = b\"\" # Initialize a byte string\n for byte in arr: # For each byte in the list,\n bstr += bytes([ord(byte)]) # Convert the byte in the list to a byte string\n return bstr",
"def encrypt( raw, key, iv ):\n result = ''\n tmp_iv = iv \n text = pad(raw)\n\n for i in xrange(0, len(text) / BS):\n lower_bound = i * 16\n upper_bound = (i+1) * 16\n \n tmp = AES.new(key, AES.MODE_OFB, tmp_iv).decrypt( text[lower_bound:upper_bound] )\n tmp_iv = tmp\n result += tmp\n\n return result",
"def decrypt(key: str, encrypted: str) -> str:\n\n key_len = len(key)\n decrypted = ''\n\n # Go through the encrypted string in chunks the length of the key\n for i in range(0, len(encrypted), key_len):\n chunk = encrypted[i:i + key_len] # Pull out a chunk the size of the key\n\n # Apply the key to the chunk\n for j, c in enumerate(chunk):\n decrypted += chr(ord(key[j]) ^ ord(c))\n\n return decrypted",
"def caesarShiftStringOps(message, key, encrypt=True):\n message = message.lower().replace(' ', '')\n alphabet = string.ascii_lowercase\n\n if not encrypt:\n key = -key\n\n shiftedAlphabet = alphabet[key:] + alphabet[:key]\n return message.translate(str.maketrans(alphabet, shiftedAlphabet))",
"def decrypt(text: str, key: str = None):\n if not text.isdecimal():\n raise ValueError(\"Encrypted text must contain only numbers.\")\n tmpres = []\n lkey = []\n if key is not None:\n lkey = list(key.encode(\"utf-8\"))\n i = 0\n counter = 0\n while i < len(text):\n l = int(text[i])\n tmp = text[i + 1:i + l + 1]\n i += l + 1\n if not tmp:\n break\n if lkey:\n c = int(tmp) - lkey[counter % len(lkey)]\n else:\n pm = 1 if tmp[0] == \"0\" else -1\n ri = int(tmp[1]) * pm\n c = int(tmp[2:]) - ri\n tmpres.append(c)\n counter += 1\n return bytes(tmpres).decode(\"utf8\")",
"def convert_key_to_string(key):\n\n return key.encode(encoder=nacl.encoding.Base64Encoder).decode('utf-8')",
"def decrypt(cipher, key):\n\tnumericRepresentation = []\n\tfor c in cipher:\n\t\tnumericRepresentation.append(ord(c) - 65)\n\n\tmessage = \"\"\n\tfor x in numericRepresentation:\n\t\tmessage += chr((x - key) % 26 + 65)\n\n\treturn message"
] | [
"0.70797527",
"0.6840706",
"0.6558892",
"0.64273185",
"0.64126307",
"0.6379325",
"0.6335545",
"0.6279772",
"0.62030673",
"0.6136716",
"0.61286926",
"0.61280227",
"0.6042837",
"0.5989007",
"0.59492034",
"0.5916038",
"0.5902616",
"0.5860912",
"0.5793804",
"0.57515836",
"0.5691581",
"0.5683948",
"0.5678851",
"0.56675637",
"0.56567276",
"0.5639985",
"0.5626516",
"0.5613643",
"0.5610526",
"0.56000394"
] | 0.72668356 | 0 |
For a graph represented by its adjacency matrix A, computes the cooccurence matrix by random surfing on the graph with returns. 1alpha is the probability to make, at each step, a return to the original step. | def PCO(A, K, alpha):
A=np.array(A, dtype=float)
#The adjacency matrix A is first normalized
A=normalize(A)
n=A.shape[0]
I=np.eye(n)
P=I
M=np.zeros((n, n))
for i in range(K):
P = alpha*np.dot(P,A) + (1-alpha)*I
M = M+P
return(M) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def conductance_matrix(A):\n if issparse(A):\n G = nx.from_scipy_sparse_matrix(A)\n else:\n G = nx.from_numpy_matrix(A)\n subgraphC = []\n for subgraph in nx.connected_component_subgraphs(G):\n a_sub = nx.adjacency_matrix(subgraph)\n r_sub = resistance_matrix(a_sub)\n m = len(subgraph)\n # add one to diagonal, invert, remove one from diagonal:\n c_sub = 1 / (r_sub + np.eye(m)) - np.eye(m)\n subgraphC.append(c_sub)\n C = spla.block_diag(*subgraphC)\n # resort C so that it matches the original node list\n component_order = []\n for component in nx.connected_components(G):\n component_order += list(component)\n component_order = list(np.argsort(component_order))\n C = C[component_order, :]\n C = C[:, component_order]\n return C",
"def directed_cycle_score(A):\n\n # Implement your cycle score given Problem 4 Part 2\n temp_matrix = np.zeros(A.shape)\n alpha = 0.05\n k = 0\n summation_term = 999999\n num_terms = A.shape[0]\n # while change < 0.05:\n for i in range(num_terms):\n summation_term = (1 / np.math.factorial(k)) * expm(A)\n temp_matrix += summation_term\n\n cycle_score = np.trace(temp_matrix) - (A.shape[0] * num_terms)\n return cycle_score",
"def next_life_generation(A):\n newA = deepcopy(A)\n height = len(newA)\n width = len(newA[0])\n\n for row in range(1, height - 1):\n for col in range(1, width -1):\n x = countNeighbors(row, col, A)\n if x < 2 or x > 3:\n newA[row][col] = 0\n elif x == 3:\n newA[row][col] = 1\n else:\n newA[row][col] = A[row][col]\n return newA",
"def commute_matrix(A):\n R = resistance_matrix(A)\n E = A.sum() / 2 # number of edges in graph\n C = 2 * E * R\n return C",
"def compSBM_A(Nc, Nn, lbde):\n # each node has a uniform probability of belonging to a cluster\n # list indicating to which cluster each node belongs to\n Cn = np.array([np.random.randint(low=0, high=Nc) for nn in np.arange(Nn)])\n\n # matrix of inter- & intra- clusters proba\n Cp = np.eye(Nc) * 0.05 + np.random.uniform(low=0.0, high=0.04,\n size=(Nc, Nc))\n Cp[Cp < 0.025] = 0\n\n # build edges for each cluster\n A = np.zeros((Nn, Nn))\n for i in np.arange(Nc):\n Cn_idx_i = np.where(Cn == i)[0]\n Nc_i = len(Cn_idx_i)\n Ac_i = np.random.binomial(1, Cp[i, i], size=(Nc_i, Nc_i)) * 1.0\n Ac_i *= np.random.laplace(0, lbde, size=(Nc_i, Nc_i))\n for ii in np.arange(Nc_i):\n A[Cn_idx_i[ii], Cn_idx_i] = Ac_i[ii, :]\n\n # build edges between clusters\n # for edges going from cluster j to cluster i\n for j in np.arange(Nc):\n Cn_idx_j = np.where(Cn == j)[0]\n Nc_j = len(Cn_idx_j)\n Ac_ij = np.random.binomial(1, Cp[i, j], size=(Nc_i, Nc_j)) * 1.0\n Ac_ij *= np.random.laplace(0, lbde, size=(Nc_i, Nc_j))\n for ii in np.arange(Nc_i):\n A[Cn_idx_i[ii], Cn_idx_j] = Ac_ij[ii, :]\n\n # normalised the adjacency matrix by its largest eigenvalue\n Aw, Av = np.linalg.eig(A)\n A /= 1.1 * np.abs(Aw).max()\n\n return A",
"def coarsen(A, levels, self_connections=False):\n # Function written by M. Defferrard, taken (almost) verbatim, from \n # https://github.com/mdeff/cnn_graph/blob/master/lib/coarsening.py#L5\n graphs, parents = metis(A, levels)\n perms = compute_perm(parents)\n\n for i, A in enumerate(graphs):\n M, M = A.shape\n\n if not self_connections:\n A = A.tocoo()\n A.setdiag(0)\n\n if i < levels:\n A = perm_adjacency(A, perms[i])\n\n A = A.tocsr()\n A.eliminate_zeros()\n graphs[i] = A\n\n# Mnew, Mnew = A.shape\n# print('Layer {0}: M_{0} = |V| = {1} nodes ({2} added),'\n# '|E| = {3} edges'.format(i, Mnew, Mnew-M, A.nnz//2))\n\n\n return graphs, perms[0] if levels > 0 else None",
"def bernoulli_adjacency_matrix(E):\n A = np.random.binomial(1, E)\n A = np.tril(A) + np.tril(A, -1).T\n return A",
"def GenerateInitialSolution():\n c = random.random()*C\n count = 0\n while np.count_nonzero(alpha) < gamma:\n rand = random.randint(0, len(x_train)-1)\n if y_train[rand] == 1:\n alpha[rand] = c\n L[rand, 1] = c\n # L[count, 0] = rand\n # L[count, 1] = alpha[rand]\n SVs[count] = rand\n count += 1\n while np.count_nonzero(alpha) < 2*gamma:\n rand = random.randint(0, len(x_train)-1)\n if y_train[rand] == -1:\n alpha[rand] = c\n L[rand, 1] = c\n # L[count, 0] = rand\n # L[count, 1] = alpha[rand]\n SVs[count] = rand\n count += 1\n return alpha",
"def get_C100():\n m = 100\n random.seed(1111*m)\n A = random.randn(m, m) + 1j*random.randn(m, m)\n A = 0.5*(A + np.conj(A).T)\n return A",
"def ant_colony(map, alpha=3, beta=4, m=10, rho=0.2, q=1, its_max=20):\n n = len(map)\n tau = np.ones((n, n))\n eta = 1/map.D\n for i in range(n):\n eta[i, i] = 0\n paths_array = np.zeros((m, n), int)\n its = 0\n path_best = np.zeros((its_max, n), int)\n distance_best = np.zeros(its_max)\n\n while its < its_max:\n paths_length = np.zeros(m)\n for i in range(m):\n source = np.random.randint(n)\n visited = []\n unvisited = list(range(n))\n node_now = source\n node_next = -1\n paths_array[i, 0] = source\n\n for j in range(1, n):\n visited.append(node_now)\n unvisited.remove(node_now)\n prob_roulette = np.array([0]*n, dtype=float)\n for k in unvisited:\n prob_roulette[k] = (pow(tau[node_now, k], alpha)\n * pow(eta[node_now, k], beta))\n prob_roulette = prob_roulette/sum(prob_roulette)\n cum_roulette = prob_roulette.cumsum()\n cum_roulette -= np.random.uniform(0, 1)\n node_next = list(cum_roulette >= 0).index(True)\n paths_array[i, j] = node_next\n paths_length[i] += map.D[node_now, node_next]\n node_now = node_next\n paths_length[i] += map.D[node_now, source]\n\n if its == 0:\n distance_best[its] = paths_length.min()\n path_best[its] = paths_array[paths_length.argmin()].copy()\n else:\n if distance_best[its-1] < paths_length.min():\n distance_best[its] = distance_best[its-1]\n path_best[its] = path_best[its-1].copy()\n else:\n distance_best[its] = paths_length.min()\n path_best[its] = paths_array[paths_length.argmin()].copy()\n\n add_tau = np.zeros((n, n))\n\n for i in range(m):\n for j in range(n):\n row = paths_array[i, j]\n col = paths_array[i, (j+1) % n]\n add_tau[row][col] += q/paths_length[i]\n\n tau = (1 - rho)*tau + add_tau\n\n its += 1\n\n return Hamiltonian(path_best[-1], map)",
"def causal_discovery(data, num_steps=100, cycle_score_tolerance=1e-9):\n idx_to_var_map = {i: var_name for i, var_name in enumerate(data.columns)}\n num_vars = len(data.columns)\n\n # initialize an empty graph\n A_opt = np.zeros((num_vars, num_vars), dtype=int)\n # besides the adjacency matrix keep a set of edges present\n # in the graph making for easy delete/reverse moves. each entry in the\n # set is a tuple of integers (i, j) corresponding to indices\n # for the end points of a directed edge Vi-> Vj\n edges = set([])\n\n # get initial BIC score for empty graph and set it to the current optimal\n bic_opt = bic_score(A_opt, data, idx_to_var_map)\n\n for step in range(num_steps):\n\n # See details in Algorithm 1 of the hw3 handout\n # for what to do in this for loop\n pass\n\n return A_opt, edges, idx_to_var_map",
"def permutation_test_trial(adj_matrix, ass_matrix, size, graph, n_cell_types):\n\n if size == adj_matrix.shape[0]:\n shuffled_graph = shuffle_labels(ass_matrix, n_cell_types)\n H = calculate_neighborhood_distribution_sparse(adj_matrix, shuffled_graph)\n\n else:\n subgraph_nodes = create_subgraph(graph, size, 1)[0]\n sg_adj, sg_ass = parse_subgraph(subgraph_nodes, graph, ass_matrix)\n shuffled_graph = shuffle_labels(sg_ass, n_cell_types)\n H = calculate_neighborhood_distribution_sparse(sg_adj, shuffled_graph)\n\n return H",
"def _build_classifier(graph, cell_classifier, alpha=0.9):\n\n # build undirected graph weighted by node similarity\n G = graph.get_networkx()\n\n # evaluate posterior genotype distribution for each node\n posterior = cell_classifier.evaluate_posterior(graph.df.loc[list(G.nodes)])\n\n # compile normalized adjacency matrix\n adjacency = nx.to_numpy_array(G)\n adjacency /= adjacency.sum(axis=0)\n\n # evaluate centrality\n n = np.array(adjacency).shape[0]\n centrality = np.linalg.solve(np.eye(n, n)-(alpha*adjacency), (1-alpha)*posterior)\n\n # build classifier that maps model distributions to genotypes.\n #get_label = np.vectorize(cell_classifier.component_to_label.get)\n node_labels = centrality.argmax(axis=1)\n\n # return genotype mapping\n index_to_genotype = dict(zip(list(G.nodes), node_labels))\n\n return np.vectorize(index_to_genotype.get)",
"def feature_calculator(args, graph):\n index_1 = [edge[0] for edge in graph.edges()]\n index_2 = [edge[1] for edge in graph.edges()]\n values = [1 for edge in graph.edges()]\n node_count = max(max(index_1)+1,max(index_2)+1)\n adjacency_matrix = sparse.coo_matrix((values, (index_1,index_2)),shape=(node_count,node_count),dtype=np.float32)\n degrees = adjacency_matrix.sum(axis=0)[0].tolist()\n degs = sparse.diags(degrees, [0])\n normalized_adjacency_matrix = degs.dot(adjacency_matrix)\n target_matrices = [normalized_adjacency_matrix.todense()]\n powered_A = normalized_adjacency_matrix\n if args.window_size > 1:\n for power in tqdm(range(args.window_size-1), desc = \"Adjacency matrix powers\"):\n powered_A = powered_A.dot(normalized_adjacency_matrix)\n to_add = powered_A.todense()\n target_matrices.append(to_add)\n target_matrices = np.array(target_matrices)\n return target_matrices",
"def GGPgraphrnd(alpha, sigma, tau):\n # `epsilon` truncated sampling\n epsilon = 1e-6\n W = ggprnd(alpha, sigma, tau, trc=epsilon)\n\n W_star = sum(W)\n D_star = poisson(W_star**2).rvs()\n\n U = W_star * uniform().rvs((D_star, 2))\n \n W_interval = np.concatenate([np.array([0.]), W.cumsum()])\n\n interval_ranks = histc(U.flatten(), W_interval)\n selected_atom = np.array([False] * len(W))\n selected_atom[np.unique(interval_ranks)] = True\n w_rem = sum(W[~selected_atom])\n w = sum(W[selected_atom])\n\n # D: directed multi-graph\n hash_table = {key: value for key, value in zip(np.unique(interval_ranks), range(len(np.unique(interval_ranks))))}\n indexer = lambda x: hash_table[x]\n indexer = np.vectorize(indexer)\n D = interval_ranks.reshape(D_star, 2)\n D = coo_matrix((np.ones(D_star), (indexer(D[:, 0]), indexer(D[:, 1]))), \n shape=(sum(selected_atom), sum(selected_atom)))\n Z = (D + D.T).astype(bool)\n\n return coo_matrix(Z), w, w_rem",
"def adjacency_matrix(cluster_pred):\n #print('adjacency start')\n x = cluster_pred.copy()\n if(len(x.shape) == 1):\n x = x[:, np.newaxis]\n # Force the cluster indexing to be positive integers\n if(x.min() <= 0):\n x += -x.min() + 1\n\n A = np.dot(x**-1., x.T) == 1\n #print('adjacency end')\n return A",
"def compute_limit_matrix(gamma, adjacency, n_states):\n num_states = n_states\n identity = np.eye(num_states)\n return np.linalg.inv(identity - gamma * adjacency / 6)",
"def costSDT(graph, a):\n hit=0; miss=0; fa=0; cr=0\n check=(graph==a)\n for rnum, r in enumerate(a):\n for cnum, c in enumerate(r[:rnum]):\n if check[rnum,cnum]==True:\n if a[rnum,cnum]==1:\n hit += 1\n else:\n cr += 1\n else:\n if a[rnum,cnum]==1:\n miss += 1\n else:\n fa += 1\n return [hit, miss, fa, cr]",
"def bsc_output(a, x):\n y = []\n transitions = list(np.random.binomial(size=len(x), n=1, p= a))\n for i in range(len(x)):\n if(transitions[i]==1): y.append(1-x[i])\n else: y.append(x[i])\n return y",
"def generate_random_graph(variable_names, dist_func, edge_prob, connected=False, max_parents=-1, num_latents=0, **kwargs):\n shuffle(variable_names) # To have a random order\n num_vars = len(variable_names)\n\n # Generate random adjacency matrix with specified edge probability\n adj_matrix = np.random.binomial(n=1, p=edge_prob, size=(num_vars, num_vars))\n\n # Make sure that adjacency matrix is half diagonal\n for v_idx in range(num_vars):\n adj_matrix[v_idx, :v_idx+1] = 0\n\n # Nodes that do not have any parents or children are connected\n for v_idx in range(num_vars):\n has_connection = (adj_matrix[v_idx, :].any() or adj_matrix[:, v_idx].any())\n if not has_connection:\n con_idx = np.random.randint(num_vars-1)\n if con_idx >= v_idx:\n con_idx += 1\n adj_matrix[v_idx, con_idx] = True\n else:\n adj_matrix[con_idx, v_idx] = True\n\n # Ensure that a node has less than N parents\n if max_parents > 0:\n for v_idx in range(adj_matrix.shape[0]):\n num_parents = adj_matrix[:, v_idx].sum()\n if num_parents > max_parents:\n indices = np.where(adj_matrix[:, v_idx] == 1)[0]\n indices = indices[np.random.permutation(indices.shape[0])[:num_parents-max_parents]]\n adj_matrix[indices, v_idx] = 0\n\n # Connect nodes to one connected graph\n if connected:\n visited_nodes, connected_nodes = [], [0]\n while len(visited_nodes) < num_vars:\n while len(connected_nodes) > 0:\n v_idx = connected_nodes.pop(0)\n children = np.where(adj_matrix[v_idx, :])[0].tolist()\n parents = np.where(adj_matrix[:, v_idx])[0].tolist()\n neighbours = children + parents\n for n in neighbours:\n if (n not in visited_nodes) and (n not in connected_nodes):\n connected_nodes.append(n)\n if v_idx not in visited_nodes:\n visited_nodes.append(v_idx)\n if len(visited_nodes) < num_vars:\n node1 = np.random.choice(np.array(visited_nodes))\n node2 = np.random.choice(np.array([i for i in range(num_vars) if i not in visited_nodes]))\n adj_matrix[min(node1, node2), max(node1, node2)] = True\n connected_nodes.append(node1)\n\n # Add latent confounders \n if num_latents > 0:\n # Latent confounders are identified by their variable name \"X_{l,...}\"\n variable_names = [r\"$X_{l,%i}$\" % (i+1) for i in range(num_latents)] + variable_names\n # Latent confounders are added in the graph structure. When exporting the graph, \n # we remove those variables so that we can apply our structure learning algorithm\n # without any changes.\n node_idxs = [v_idx+num_latents for v_idx in range(num_vars)\n if (adj_matrix[:, v_idx].sum() < max_parents or max_parents <= 0)]\n adj_matrix = np.concatenate([np.zeros((num_latents, num_vars)), adj_matrix], axis=0)\n adj_matrix = np.concatenate([np.zeros((num_vars+num_latents, num_latents)), adj_matrix], axis=1)\n # Randomly select the node pairs on which we want to have a latent confounder\n latent_children = []\n for l in range(num_latents):\n node_pair = None\n # We sample unique node pairs where there exists no direct edge between both nodes\n while node_pair is None or node_pair in latent_children or adj_matrix[node_pair[0], node_pair[1]]:\n node_pair = random.sample(node_idxs, k=2)\n node_pair = sorted(node_pair)\n latent_children.append(node_pair)\n adj_matrix[l, node_pair[0]] = 1\n adj_matrix[l, node_pair[1]] = 1\n latents = np.array([[i]+lc for i, lc in enumerate(latent_children)])\n else:\n latents = None\n\n return graph_from_adjmatrix(variable_names, dist_func, adj_matrix, latents=latents)",
"def life_generation(A):\n return map2(next_alive, neighbor_count(A), A)",
"def contact_guidance(lgca):\n newnodes = lgca.nodes.copy()\n relevant = (lgca.cell_density[lgca.nonborder] > 0) & \\\n (lgca.cell_density[lgca.nonborder] < lgca.K)\n coords = [a[relevant] for a in lgca.nonborder]\n for coord in zip(*coords):\n n = lgca.cell_density[coord]\n sni = lgca.guiding_tensor[coord]\n permutations = lgca.permutations[n]\n si = lgca.si[n]\n weights = np.exp(lgca.beta * np.einsum('ijk,jk', si, sni)).cumsum()\n ind = bisect_left(weights, random() * weights[-1])\n newnodes[coord] = permutations[ind]\n\n lgca.nodes = newnodes",
"def anneal():\n best_sol = list(range(SIZE))\n best_sum = get_sum(best_sol)\n shuffle(best_sol)\n\n temp = 10000000\n cool_rate = 0.0003\n\n counter = 0\n while temp > 1:\n new_sol = best_sol.copy()\n i, j = randint(0, SIZE - 1), randint(0, SIZE - 1)\n new_sol[i], new_sol[j] = new_sol[j], new_sol[i]\n new_energy = get_sum(new_sol)\n cur_energy = best_sum\n if calculate_probability(cur_energy, new_energy, temp) > random():\n best_sol = new_sol.copy()\n best_sum = new_energy\n temp *= 1 - cool_rate\n counter += 1\n\n print(counter)\n\n print(best_sol)\n print(best_sum)\n return best_sol, best_sum",
"def compute_adjacency_matrix(G):\n\n iG = nx.convert_node_labels_to_integers(G)\n adj_list = iG.adjacency_list()\n n_nodes = len(iG.nodes())\n\n adj_mat = np.zeros((n_nodes, n_nodes))\n for x in xrange(n_nodes):\n adj_mat[x, adj_list[x]] = 1\n\n return adj_mat",
"def alg(c):\n return c[0]*G[0] + c[1]*G[1] + c[2]*G[2]",
"def _estimate_assignments(self, graph: GraphRepresentation) -> None:\n embed_graph = augment_diagonal(graph)\n latent = AdjacencySpectralEmbed(\n n_components=self.n_components, **self.embed_kws\n ).fit_transform(embed_graph)\n if isinstance(latent, tuple):\n latent = np.concatenate(latent, axis=1)\n gc = GaussianCluster(\n min_components=self.min_comm,\n max_components=self.max_comm,\n **self.cluster_kws\n )\n vertex_assignments = gc.fit_predict(latent) # type: ignore\n self.vertex_assignments_ = vertex_assignments",
"def main():\n n = 34\n # create the adjacency matrix\n stripped_lines = Util.get_stripped_lines(g_karate_data.splitlines())\n string_rows = [line.split() for line in stripped_lines if line]\n assert len(string_rows) == n\n for row in string_rows:\n assert len(row) == n\n data_rows = [[float(x) for x in string_row] for string_row in string_rows]\n A = np.array(data_rows)\n # create the ordered module indices\n first_cluster_one_based_indices = [1, 3, 4, 14, 2, 8, 20, 18, 22, 13, 12, 6, 7, 17, 5, 11]\n second_cluster_one_based_indices = [25, 32, 26, 29, 24, 28, 9, 34, 33, 19, 16, 31, 15, 10, 23, 30, 21, 27]\n assert len(first_cluster_one_based_indices + second_cluster_one_based_indices) == n\n assert list(sorted(first_cluster_one_based_indices + second_cluster_one_based_indices)) == range(1, n+1)\n ordered_module_indices = []\n for i in range(n):\n if i+1 in first_cluster_one_based_indices:\n ordered_module_indices.append(0)\n else:\n ordered_module_indices.append(1)\n # print the modularity\n Q = get_modularity_other_b(A, ordered_module_indices)\n print 'modularity calculated using my interpretation of the method of the paper', Q\n Q = get_modularity_other_b2(A, ordered_module_indices)\n print 'modularity calculated using a modification of my interpretation of the method of the paper', Q\n Q = get_modularity_other_c(A, ordered_module_indices)\n print 'modularity calculated using the method on wikipedia', Q\n Q = get_eric_modularity(A, ordered_module_indices)\n print 'modularity calculated using the method eric used:', Q\n print 'expected modularity: .375 +/- .025'",
"def cc_visited(ugraph):\r\n\tremaining_node = ugraph.keys()\t\t#The keys are accessible directly.\r\n\t\r\n\tcon_com = [] #connected component\r\n\twhile len(remaining_node) != 0 :\r\n\t\tnode = random.choice(remaining_node)\r\n\t\tvisited = bfs_visited(ugraph,node)\r\n\t\tcon_com.append(visited)\r\n\t\tfor item in visited:\r\n\t\t\tremaining_node.remove(item)\r\n\treturn con_com",
"def next_life_generation(a):\n w = len(a[0])\n h = len(a)\n new_a = create_board(w, h)\n\n for n in range(h):\n for m in range(w):\n if 0 < n < h - 1 and 0 < m < w - 1:\n count = count_neighbours(n, m, a)\n if count < 2 or count > 3:\n new_a [n][m] = 0\n elif count == 3:\n new_a[n][m] =1\n else:\n new_a[n][m] = a[n][m]\n else:\n new_a[n][m] = 0\n \n return new_a",
"def random_walk(lgca):\n # disarrange(lgca.nodes, axis=-1)\n relevant = (lgca.cell_density[lgca.nonborder] > 0) & \\\n (lgca.cell_density[lgca.nonborder] < lgca.K)\n coords = [a[relevant] for a in lgca.nonborder]\n for coord in zip(*coords):\n npr.shuffle(lgca.nodes[coord])"
] | [
"0.6241756",
"0.600505",
"0.59761393",
"0.59632105",
"0.58276546",
"0.58211476",
"0.57884556",
"0.5777621",
"0.5756752",
"0.56359476",
"0.55702484",
"0.5567716",
"0.55158526",
"0.54752773",
"0.54675233",
"0.5442799",
"0.54157084",
"0.54124737",
"0.53701353",
"0.53666514",
"0.5364982",
"0.5323595",
"0.531969",
"0.53093445",
"0.5308204",
"0.53015536",
"0.52959424",
"0.5291456",
"0.52826965",
"0.52548164"
] | 0.6560323 | 0 |
Reverse lines of old file into new file | def reverse(old, new):
infile = open(old, "r")
xs = infile.readlines() # Turn lines of old file into a list
infile.close()
xs.reverse() # Reverse this list
outfile = open(new, "w")
for i in xs:
outfile.write(i) # Write a line-at-a-time at reversed list
outfile.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reverse_lines(file1, file2):\n with open(file1) as input, open(file2, 'w') as output:\n for word in input:\n word.rstrip() #to remove the newline character at the end\n word1 = word[::-1]\n output.write(f'{word1}\\n')\n print(word1)",
"def reverse_file(filename):\n S = ArrayStack()\n original = open(filename)\n for line in original:\n S.push(line.rstrip(\"\\n\")) # we will re-insert newlines when writing\n original.close()\n\n # now we overwrite with contents in LIFO order\n output = open(filename, \"w\") # reopening file overwrites original\n while not S.is_empty():\n output.write(S.pop() + \"\\n\") # re-insert newline characters\n output.close()",
"def reverse(file_name):\n try:\n with open(file_name, \"r\") as file:\n list_inverted = file.readlines()[::-1]\n list_inverted_per_lines = list([i.replace(\"\\n\", \"\")[::-1] + \"\\n\" for i in list_inverted])\n list_inverted_per_lines[-1] = list_inverted_per_lines[-1].replace(\"\\n\", \"\")\n new_file_name = \"disodered.txt\"\n with open(new_file_name, \"w\") as file_2:\n for text in list_inverted_per_lines:\n file_2.write(text)\n print(\"Operation performed successfully!, file saved as '{}' in the main dir\".format(new_file_name))\n except FileNotFoundError:\n print(\"the specified file does not exist!\")",
"def open_read_append_new_file(file1, file2):\n\n # open the first file in the read mode\n with open(file1) as fin:\n # read all lines into a list\n lst = fin.readlines()\n\n # reverse the list\n lst.reverse()\n\n # open second file for pending\n fout = open(file2, \"a\")\n\n # write reversed lines to a second file\n fout.writelines(lst)\n\n # close the second file\n fout.close()",
"def test_reverse_file (filename):\r\n\r\n\tS = Array_stack.ArrayStack()\r\n\toriginal = open(filename)\r\n\tfor line in original:\r\n\t\tS.push(line.rstrip('\\n'))\r\n\toriginal.close()\r\n\t# now we write with contents in LIFO order\r\n\r\n\toutput = open(filename, 'w')\r\n\twhile not S.is_empty():\r\n\t\toutput.write(S.pop() + '\\n')\r\n\toutput.close()",
"def reverseLine(inputFile, outputFile):\n with open(inputFile, 'r') as f: # opening the classes.txt file\n newlist = f.readlines() # creating a list containing each line from the classes file\n for i in range(len(newlist)): \n newlist[i] = newlist[i].strip() # getting rid of the newlines as to not interfere with the text reversal\n\n with open(outputFile, 'w') as f:\n reversedList = []\n\n for i in range(len(newlist)): \n x = newlist[i].split() # this will break up each elements string on their spaces to make it easier to reverse them\n x.reverse()\n original = ' '.join(x) # this will bring it all back together in input files original text format\n reversedList.append(original + '\\n')\n\n final = ''.join(reversedList)\n print(final)\n f.write(final)",
"def replace2(oldlst,newlst):\n with open(oldlst, 'r') as f4:\n with open(newlst,'w') as f5:\n for line in f4:\n f5.write(line)",
"def _change_file(file):\n\n with fileinput.FileInput(file, inplace=True, backup='.bak') as f:\n for index, line in enumerate(f):\n if index == 13:\n print(line.replace(line, line[15:]), end='')\n else:\n print(line.replace(line, line), end='')",
"def generate_reverse(path):\n \n with open(path, \"r\") as f:\n for line in f:\n line = line.strip()\n # print (line) \n if len(line) == 0:\n continue\n \n if line[0] == \">\":\n line = line + \"_R\"\n print(line)\n else:\n buf = \"\"\n for char in line:\n if char == \"A\":\n buf += \"T\"\n elif char == \"T\":\n buf += \"A\"\n elif char == \"G\":\n buf += \"C\"\n elif char == \"C\":\n buf += \"G\"\n\n print (buf[::-1])",
"def reconstruct(new, original):\n\n output = []\n\n # Find deltas between new and original and add newlines to the output wherever they're found in original\n seqm = difflib.SequenceMatcher(None, new, original, False)\n for opcode, a0, a1, b0, b1 in seqm.get_opcodes():\n delta_a = seqm.a[a0:a1] # Substring in new\n delta_b = seqm.b[b0:b1] # Substring in original\n newline = re.search(r'(\\r|\\n)+', delta_b)\n\n # Always take delta_a over delta_b unless there's a newline\n if opcode == 'equal':\n output.append(delta_a)\n elif opcode == 'insert' and newline: # Append any insertion containing a newline\n output.append(newline.group(0))\n elif opcode == 'delete':\n output.append(delta_a)\n elif opcode == 'replace':\n if newline:\n if re.match(r'\\r|\\n', delta_b[-1]): # If the newline is the last character, insert the newline after delta_a\n output.append(delta_a + newline.group(0))\n else: # Otherwise insert the newline before delta_a\n output.append(newline.group(0) + delta_a)\n else:\n output.append(delta_a)\n\n # Strip leading and trailing whitespace from each line\n new = ''.join(output)\n lines = new.split('\\n')\n for i in range(len(lines)):\n lines[i] = lines[i].strip()\n\n # Write file\n file = open('GenderSwap.txt', 'w')\n file.write('\\n'.join(lines))\n file.close()",
"def process(self):\n first_line = self.setup[\"first_line\"]\n last_line = self.setup[\"last_line\"]\n\n self.logger.info(\"Using lines %s - %s\", first_line, last_line)\n\n path_temp = \"{}_\".format(self.path)\n\n with open(self.path, \"r\") as src, open(path_temp, \"w\") as dest:\n lines = src.r..\n copy_lines = lines[first_line-1:last_line]\n dest.write(\"\".join(copy_lines))\n\n os.rename(path_temp, self.path)",
"def reduce_data(old_file, new_file):\n links_list = list()\n\n with open(old_file, \"r\") as file:\n for line in file:\n link = line.replace('\\n', '')\n links_list.append(link)\n\n result_list = list(set(links_list)) # eliminate duplicate links\n\n with open(new_file, \"w\") as file:\n for link in result_list:\n file.write(link + \"\\n\")",
"def pre_process_multispace(filepath, delimiter=\" \"):\n newpath = filepath+\".rev.csv\"\n with open(filepath, \"r\") as src_csv_file:\n with open(newpath, \"w\") as dst_csv_file:\n for src_line in src_csv_file:\n dst_csv_file.write(delimiter.join(src_line.split())+\"\\n\")",
"def read_file_and_maybe_fix_it(filename, fix):\n\twith open(filename, 'r') as istr:\n\t\tlines = istr.readlines()\n\tif fix:\n\t\tlines = list(map(fix_horizontal, fix_vertical(lines)))\n\t\tbackupname = make_backup_file_name(filename)\n\t\tshutil.copy(filename, backupname)\n\t\twith open(filename, 'w') as ostr:\n\t\t\tostr.writelines(lines)\n\treturn lines",
"def replace_lines(file_path, idx=[0], new_lines=['Hello!\\n'], dest=None):\n with open(file_path, 'r') as f:\n lines = f.readlines()\n\n if len(idx) == len(new_lines):\n for i, nl in zip(idx, new_lines):\n lines[i] = nl\n\n if dest is None:\n dest = os.path.split(file_path)[0]\n os.remove(file_path)\n\n new_file = os.path.join(dest, os.path.basename(file_path))\n with open(new_file ,'w') as nf:\n for line in lines:\n nf.write(line)\n else:\n print('Requested indices do not match given number of lines!!!')",
"def replace_in_file(path, old, new):\n with open(path) as fp:\n content = fp.read()\n\n lpf.ensure_removed(path)\n with open(path, 'w') as fp:\n fp.write(content.replace(old, new))",
"def overwrite(fstack: List[Tuple[str,int]]) -> ():\n filename, line_num = fstack.pop()\n tmp = str() # store our new file in memory\n with open(filename, 'r') as input:\n for i,line in enumerate(input):\n if i + 1 == line_num:\n line = line.replace(\"pub \",\"\",1)\n _, line_num = fstack.pop() if fstack else ('',0)\n tmp += line\n with open(filename, 'w') as newfile:\n newfile.write(tmp)",
"def debz(oldfn, newfn):\n if os.path.isfile(newfn):\n print(\"Error: refusing to overwrite existing file '%s'\" % (newfn, ))\n return\n output = open(newfn, 'wb')\n fobj = open(oldfn, 'rb')\n\n output.write(fobj.read(24))\n while True:\n sz = struct.unpack('>L', fobj.read(4))[0]\n chunk = fobj.read(sz)\n if not chunk:\n break\n output.write(bz2.decompress(chunk))\n # unsure of this\n if sz != len(chunk):\n break\n\n output.close()",
"def replaceInFile (remplacements, source, destination, replaceInReadOnly = 0, confirm = None):\n\tfrom shutil import copy2\n\tfrom os.path import isdir, exists, split as splitPath\n\tfrom os import access, W_OK, chmod\n\n\tsource = adaptPath(source)\n\tdestination = adaptPath(destination)\n\t\n\t# If it is not a file\n\tif isdir (source) or isdir (destination):\n\t\treturn\n\n\t# Clears the contents of the target file\n\tresult =[]\n\tmodified = 0\n\tlineNumber = 0\n\n\tif remplacements != []:\n\t\ttry:\n\t\t\t# Reading the contents of the file and creating a list of lines\n\t\t\tsourceLines = open(source,\"r\").readlines()\n\n\t\t\t# For each line in the original file\n\t\t\tfor line in sourceLines:\n\t\t\t\t# For each replacement to be made in the original file\n\t\t\t\tfor i in remplacements:\n\t\t\t\t\t# Replaces values in line\n\t\t\t\t\tres = line.replace(i[0], i[1])\n\n\t\t\t\t\t# Inhibits updating the line\n\t\t\t\t\tupdate = 0\n\n\t\t\t\t\t# If there has been a replacement\n\t\t\t\t\tif line.find(i[0]) != -1:\n\t\t\t\t\t\t# If confirmation is not required\n\t\t\t\t\t\tif confirm == None:\n\t\t\t\t\t\t\tupdate = 1\n\t\t\t\t\t\t# If the change is confirmed\n\t\t\t\t\t\telif confirm (source, lineNumber, sourceLines, i[0]) == 1:\n\t\t\t\t\t\t\tupdate = 1\n\n\t\t\t\t\t# If an update of the current line is requested\n\t\t\t\t\tif update:\n\t\t\t\t\t\t# The file must be updated\n\t\t\t\t\t\tmodified = 1\n\n\t\t\t\t\t\t# Updated line modified\n\t\t\t\t\t\tline = res\n\n\t\t\t\t# Adds the new line to the target file\n\t\t\t\tresult.append(line)\n\n\t\t\t\t# Increment the current line number\n\t\t\t\tlineNumber += 1\n\t\texcept UnicodeDecodeError:\n\t\t\t# Replacement in binary file disabled\n\t\t\tmodified = 0\n\n\t# If the file is to be saved\n\tif modified:\n\t\t# If replacement is force for protected files\n\t\tif replaceInReadOnly:\n\t\t\t# Deletes read-only\n\t\t\tchmod(destination, 0o777)\n\t\t# If the file is not write-protected\n\t\tif access(destination, W_OK) == 1 or not exists(destination):\n\t\t\t# Opens and saves the lines of the target file\n\t\t\topen(destination,\"w\").writelines(result)\n\t# If the destination file is different from the source file\n\telif source != destination:\n\t\t# Copies the destination file\n\t\tmakedir (splitPath(destination)[0])\n\t\ttry: copy2(source, destination)\n\t\texcept IOError:print (\"Cannot copy %s->%s\"%(source, destination))",
"def write_to_file(original_path, new_path):\n print(f\"[INFO]: Transform data from binary to text file {new_path}\")\n with open(new_path, mode='wt', encoding='utf-8') as new_file:\n with open(original_path, mode='rb') as original_file:\n for line in original_file:\n new_file.write(line.decode())",
"def reverse_file_reader(cls, file_object: object,\n\t\t\t\t\t\t\tlines_separator: str = '\\n',\n\t\t\t\t\t\t\tkeep_lines_separator: bool = True,\n\t\t\t\t\t\t\tsave_memory: bool = True):\n\t\tf = open(filename, 'w')\n\n\t\tdef ceil_division(left_number, right_number):\n\t\t\t\"\"\"\n\t\t\tDivides given numbers with ceiling.\n\t\t\t\"\"\"\n\t\t\treturn -(-left_number // right_number)\n\n\t\tdef split(string, separator, keep_separator):\n\t\t\t\"\"\"\n\t\t\tSplits given string by given separator.\n\t\t\t\"\"\"\n\t\t\tparts = string.split(separator)\n\t\t\tif keep_separator:\n\t\t\t\t*parts, last_part = parts\n\t\t\t\tparts = [part + separator for part in parts]\n\t\t\t\tif last_part:\n\t\t\t\t\treturn parts + [last_part]\n\t\t\treturn parts\n\n\t\tdef read_batch_from_end(byte_stream, size, end_position):\n\t\t\t\"\"\"\n\t\t\tReads batch from the end of given byte stream.\n\t\t\t\"\"\"\n\t\t\tif end_position > size:\n\t\t\t\toffset = end_position - size\n\t\t\telse:\n\t\t\t\toffset = 0\n\t\t\t\tsize = end_position\n\t\t\tbyte_stream.seek(offset)\n\t\t\treturn byte_stream.read(size)\n\n\t\tdef reverse_binary_stream(byte_stream, batch_size=None,\n\t\t\t\t\t\t\t\t lines_separator=None,\n\t\t\t\t\t\t\t\t keep_lines_separator=True):\n\t\t\tif lines_separator is None:\n\t\t\t\tlines_separator = (b'\\r', b'\\n', b'\\r\\n')\n\t\t\t\tlines_splitter = methodcaller(str.splitlines.__name__,\n\t\t\t\t\t\t\t\t\t\t\t keep_lines_separator)\n\t\t\telse:\n\t\t\t\tlines_splitter = functools.partial(split,\n\t\t\t\t\t\t\t\t\t\t\t\t separator=lines_separator,\n\t\t\t\t\t\t\t\t\t\t\t\t keep_separator=keep_lines_separator)\n\t\t\tstream_size = byte_stream.seek(0, os.SEEK_END)\n\t\t\tif batch_size is None:\n\t\t\t\tbatch_size = stream_size or 1\n\t\t\tbatches_count = ceil_division(stream_size, batch_size)\n\t\t\tremaining_bytes_indicator = itertools.islice(\n\t\t\t\titertools.accumulate(itertools.chain([stream_size],\n\t\t\t\t\t\t\t\t\t\t\t\t\t itertools.repeat(batch_size)),\n\t\t\t\t\t\t\t\t\t sub),\n\t\t\t\tbatches_count)\n\t\t\ttry:\n\t\t\t\tremaining_bytes_count = next(remaining_bytes_indicator)\n\t\t\texcept StopIteration:\n\t\t\t\treturn\n\n\t\t\tdef read_batch(position):\n\t\t\t\tresult = read_batch_from_end(byte_stream,\n\t\t\t\t\t\t\t\t\t\t\t size=batch_size,\n\t\t\t\t\t\t\t\t\t\t\t end_position=position)\n\t\t\t\twhile result.startswith(lines_separator):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tposition = next(remaining_bytes_indicator)\n\t\t\t\t\texcept StopIteration:\n\t\t\t\t\t\tbreak\n\t\t\t\t\tresult = (read_batch_from_end(byte_stream,\n\t\t\t\t\t\t\t\t\t\t\t\t size=batch_size,\n\t\t\t\t\t\t\t\t\t\t\t\t end_position=position)\n\t\t\t\t\t\t\t + result)\n\t\t\t\treturn result\n\n\t\t\tbatch = read_batch(remaining_bytes_count)\n\t\t\tsegment, *lines = lines_splitter(batch)\n\t\t\tyield from lines[::-1]\n\t\t\tfor remaining_bytes_count in remaining_bytes_indicator:\n\t\t\t\tbatch = read_batch(remaining_bytes_count)\n\t\t\t\tlines = lines_splitter(batch)\n\t\t\t\tif batch.endswith(lines_separator):\n\t\t\t\t\tyield segment\n\t\t\t\telse:\n\t\t\t\t\tlines[-1] += segment\n\t\t\t\tsegment, *lines = lines\n\t\t\t\tyield from lines[::-1]\n\t\t\tyield segment\n\n\t\tdef reverse_file(file, batch_size=None,\n\t\t\t\t\t\t lines_separator=None,\n\t\t\t\t\t\t keep_lines_separator=True):\n\t\t\tencoding = file.encoding\n\t\t\tif lines_separator is not None:\n\t\t\t\tlines_separator = lines_separator.encode(encoding)\n\t\t\tyield from map(functools.partial(codecs.decode,\n\t\t\t\t\t\t\t\t\t\t\t encoding=encoding),\n\t\t\t\t\t\t reverse_binary_stream(\n\t\t\t\t\t\t\t file.buffer,\n\t\t\t\t\t\t\t batch_size=batch_size,\n\t\t\t\t\t\t\t lines_separator=lines_separator,\n\t\t\t\t\t\t\t keep_lines_separator=keep_lines_separator))\n\n\t\tif save_memory:\n\t\t\tr_file = reverse_file(file_object)\n\t\t\tfor line in r_file:\n\t\t\t\tif keep_lines_separator:\n\t\t\t\t\tyield f'{line.rstrip()}{lines_separator}'\n\t\t\t\telse:\n\t\t\t\t\tyield line.rstrip()\n\t\telse:\n\t\t\treversed_file = file_object.readlines()\n\t\t\treversed_file.reverse()\n\t\t\tfor line in reversed_file:\n\t\t\t\tif keep_lines_separator:\n\t\t\t\t\tyield f'{line.rstrip()}{lines_separator}'\n\t\t\t\telse:\n\t\t\t\t\tyield line.rstrip()",
"def convert(src, dst):\n with open(dst, 'w', encoding = 'utf-8') as myFile:\n records = read(src)\n for tag in sorted(records.keys()):\n myFile.write('%s %s\\n' %(tag, records[tag]))",
"def de_flip_file(n):\n start = '0'*(n//2) + '1' + '0'*(n//2) # 00100 if n == 5\n f_name = 'C:/Users/clean/Desktop/dede.txt'\n f_tmp = 'C:/Users/clean/Desktop/dede2.txt'\n f_result = 'C:/Users/clean/Desktop/de_flip.txt'\n\n #L = [[start]]\n with open(f_name,'w') as f:\n f.write(start + '\\n')\n\n #L_tmp = []\n with open(f_tmp,'w') as ftmp:\n pass\n \n #collect = []\n with open(f_result,'w') as f_end:\n pass\n \n \n length = 1\n \n while(True):\n count = 0\n check_end = True\n\n with open(f_name) as f:\n for branch in f:\n L_branch = branch.split()\n next_0 = L_branch[-1][1:] + '0'\n next_1 = L_branch[-1][1:] + '1'\n\n if next_0 == start:\n with open(f_result,'a') as f_end:\n f_end.write(branch)\n count += 1\n continue\n\n if next_0 not in L_branch and next_0[::-1] not in L_branch:\n check_end = False\n with open(f_tmp,'a') as ftmp:\n ftmp.write(branch[:-1] + ' ' + next_0 + '\\n')\n\n if next_1 not in L_branch and next_1[::-1] not in L_branch:\n check_end = False\n with open(f_tmp,'a') as ftmp:\n ftmp.write(branch[:-1] + ' ' + next_1 + '\\n')\n \n if check_end == True:\n break\n \n shutil.copyfile(f_tmp, f_name) # 오른쪽으로 복사\n\n with open(f_tmp,'w') as ftmp:\n pass\n\n print(\"length : {0}, count : {1}\".format(length,count))\n length += 1\n return None",
"def merge(fileHandle1, fileHandle2, outputFileHandle):\n line2 = fileHandle2.readline()\n for line1 in fileHandle1.readlines():\n while line2 != '' and line2 <= line1:\n outputFileHandle.write(line2)\n line2 = fileHandle2.readline()\n outputFileHandle.write(line1)\n while line2 != '':\n outputFileHandle.write(line2)\n line2 = fileHandle2.readline()",
"def FSLFlip(self, infile, prefix):\n cmd = '3dresample -orient LPI -prefix %s.nii -inset %s+orig' % \\\n (prefix, infile)\n self.CheckExec(cmd, ['%s.nii' % prefix])\n fname = '%s+orig.BRIK' % infile\n if os.path.exists(fname):\n os.remove(fname)\n fname = '%s+orig.HEAD' % infile\n if os.path.exists(fname):\n os.remove(fname)",
"def update_file(dst, src, language, mutator):\n\n # if the source and destination are the same, we're updating in place\n inplace = dst == src\n\n if isinstance(src, str):\n # if a filename was provided, open the file\n if inplace:\n mode = \"r+\"\n else:\n mode = \"r\"\n src = open(src, mode)\n\n orig_lines = []\n\n # grab all of the lines of the file and strip them of their line ending\n old_lines = list(line.rstrip(\"\\r\\n\") for line in src)\n new_lines = list(mutator(old_lines, src.name, language))\n\n for line in src:\n line = line\n\n if inplace:\n # if we're updating in place and the file hasn't changed, do nothing\n if old_lines == new_lines:\n return\n\n # otherwise, truncate the file and seek to the beginning.\n dst = src\n dst.truncate(0)\n dst.seek(0)\n elif isinstance(dst, str):\n # if we're not updating in place and a destination file name\n # was provided, create a file object\n dst = open(dst, \"w\")\n\n for line in new_lines:\n dst.write(line)\n dst.write(\"\\n\")",
"def RewriteFile(start, end, original_dir, original_filename, snippet,\n outdir=None):\n original_path = GetPath(os.path.join(original_dir, original_filename))\n original = file(original_path, 'r')\n original_content = original.read()\n original.close()\n if outdir:\n outpath = os.path.join(outdir, original_filename)\n else:\n outpath = original_path\n out = file(outpath, 'w')\n rx = re.compile(r'%s\\n.*?%s\\n' % (re.escape(start), re.escape(end)),\n re.DOTALL)\n new_content = re.sub(rx, '%s\\n%s%s\\n' % (start, snippet, end),\n original_content)\n out.write(new_content)\n out.close()\n print 'Output ' + os.path.normpath(outpath)",
"def normalize_file(in_file, out_file):\n with open(in_file, \"r\") as book, open(out_file, \"w\") as out:\n for line in book:\n if line.strip() == \"\":\n continue\n\n line = normalize_text(line)\n\n out.write(line.lstrip())",
"def _inverse_lines(self):\n pass",
"def write_lines(file_lines, new_file):\n with open(new_file, 'w') as f:\n for l in file_lines:\n f.write(l)"
] | [
"0.75387704",
"0.7520648",
"0.6905407",
"0.6788678",
"0.67051935",
"0.6465679",
"0.6307585",
"0.61510444",
"0.60499054",
"0.57435423",
"0.57397157",
"0.5700939",
"0.5695171",
"0.56869304",
"0.55833876",
"0.5526218",
"0.55017537",
"0.5463649",
"0.54575527",
"0.5442164",
"0.54275554",
"0.54185086",
"0.53972477",
"0.5387586",
"0.5359588",
"0.5326269",
"0.5324578",
"0.5317635",
"0.53176165",
"0.528732"
] | 0.8367213 | 0 |
Player's age in given year (or currentyear) | def age(self, year=None):
y, m = self.master['birthYear'], self.master['birthMonth']
return (year if year else self.currentyear) - y - (m > 6) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_age(self):\n return CURRENT_YEAR - self.year",
"def get_age(self):\n return CURRENT_YEAR - self.year",
"def get_age(self):\n age = CURRENT_YEAR - self.year\n return age",
"def get_age(self):\n return int(CURRENT_YEAR[:4]) - self.year # String-slice only the year",
"def get_age(self):\n age = 2021 - self.year\n return age",
"def get_age(self):\n return Guitar.CURRENT_YEAR - self.year",
"def age(self):\n return datetime.now().year - self.birth_day.year",
"def age(self):\n today = datetime.date.today()\n\n return today.year - int(self.birthday[0:4])",
"def getagefromyear(year=None):\n if year is None:\n print(\"Please enter the year to assign class to them\")\n try:\n t = datetime.datetime.today()\n b = datetime.datetime.strptime(str(year), '%Y')\n a = (t - b).days / 365\n a = int(a)\n if (a < 10) or (a > 80):\n a = None\n except:\n a = None\n return a",
"def calculateAgeInYears(year_born):\r\n # ref https://stackoverflow.com/questions/4436957/pythonic-difference-between-two-dates-in-years\r\n current_year = int(d.datetime.now().year)\r\n difference_in_years = abs(current_year - year_born)\r\n return int(difference_in_years)",
"def age(self):\n today = datetime.date(2001, 5, 12)\n yyyy = self.birthday[0:4]\n mm = int(self.birthday[4:6])\n dd = int(self.birthday[6:8])\n dob = datetime.date(int(yyyy), mm, dd)\n age_in_days = (today - dob).days\n age_in_years = age_in_days / 365\n return int(age_in_years)",
"def get_age(self):\n today = datetime.now()\n return today.year \\\n - self.date_of_birth.year \\\n - ((today.month, self.date_of_birth.day) \\\n < (self.date_of_birth.month, self.date_of_birth.day))",
"def getage(birth_year):\n today = datetime.date.today()\n print(\"Today is: \", today)\n year = today.year\n\n age = year - int(birth_year)\n # print(type(age)) you return int value by taking str\n return age",
"def age_diff(self, other):\n diff = self.age - other.age\n print(abs(diff), \"year difference\")",
"def get_age(self):\n born = self.birth_date\n if not born:\n return 0\n today = fields.Date.today()\n return today.year - born.year - ((today.month, today.day) < (born.month, born.day))",
"def calculate_current_age(dob):\n today = datetime.date.today()\n years = today.year - dob.year\n if today.month < dob.month or (today.month == dob.month and today.day < dob.day):\n years -= 1\n return years",
"def get_age(self, name=None):\n now = datetime.now()\n delta = relativedelta(now, self.date_of_birth)\n years_months_days = str(delta.years) + 'y ' + str(delta.months) + \\\n 'm ' + str(delta.days) + 'd'\n return years_months_days",
"def get_age(YY_OF_BIRTH, MM_OF_BIRTH, DD_OF_BIRTH):\n\n date_ = date.today()\n year = date_.year\n month = date_.month\n day = date_.day\n\n age = year - YY_OF_BIRTH\n\n if (month < MM_OF_BIRTH):\n age -= 1\n elif (month == MM_OF_BIRTH):\n if (day < DD_OF_BIRTH):\n age -= 1\n\n return age",
"def get_age(self):\n if validate_date_format(self.birt):\n birth_year, birth_month, birth_day = change_date_format(self.birt).split('-')\n\n if self.alive:\n provided_date = datetime.today().date()\n age = (provided_date.year - int(birth_year) - ((datetime.today().month, datetime.today().day) < (int(birth_month), int(birth_day))))\n else:\n death_year, death_month, death_day = change_date_format(self.deat).split('-')\n age = (int(death_year) - int(birth_year) - ((int(death_month), int(death_day)) < (int(birth_month), int(birth_day))))\n\n return age",
"def age(self):\n\n years, months, days = calculate_age(self.birthdate)\n if years:\n return \"%d year%s old\" % (years, \"s\" if years > 1 else \"\")\n elif months:\n return \"%d month%s old\" % (months, \"s\" if months > 1 else \"\")\n else:\n return \"%d day%s old\" % (days, \"s\" if days > 1 else \"\")",
"def age(self, agent):\n return (self.time - agent.born)/52.0",
"def calculate_age(born):\n today = datetime.date.today()\n return today.year - born.year - ((today.month, today.day) < (born.month, born.day))",
"def year(self):\n return self._years",
"def get_age(date):\n today = datetime.date.today()\n return today.year - date.year - ((today.month, today.day) < (date.month, date.day))",
"def age(self):\r\n return self._age",
"def get_age(actor: Actor, movie: Movie) -> str:\r\n opening_date = dateutil.parser.parse(movie.release_date)\r\n birth_date = dateutil.parser.parse(actor.born)\r\n age = int((opening_date - birth_date).days / 365)\r\n return f'{actor.name} was {age} years old when {movie.title} came out.'",
"def age(self):\n return self.__age",
"def get_age(self):\n\t\treturn self.age",
"def get_age(self) -> int:\n return self.age",
"def compute_age(epoch):\n birth_date = datetime.datetime.fromtimestamp(epoch/1000)\n now = datetime.datetime.now()\n age = (now - birth_date).days / 365\n return age"
] | [
"0.79131144",
"0.79131144",
"0.78705555",
"0.7868375",
"0.78410995",
"0.7790479",
"0.776395",
"0.7680984",
"0.7429875",
"0.7400296",
"0.73505896",
"0.72571224",
"0.72172093",
"0.71441853",
"0.7121638",
"0.70939815",
"0.6984901",
"0.6898371",
"0.68906313",
"0.68311346",
"0.6787316",
"0.67726976",
"0.6707802",
"0.666308",
"0.66185844",
"0.6608541",
"0.659799",
"0.6566339",
"0.6548147",
"0.65196955"
] | 0.83400005 | 0 |
Block/unblock users from following/commenting/liking another user's posts | def block_user(user_id):
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/")
user = User.query.get_or_404(user_id)
users_blocking = [block.user_blocking_id for block in Blocks.query.all() if block.user_being_blocked_id == g.user.id]
likes = [message for message in user.likes if message.user_id not in users_blocking]
return render_template('users/blocked-users.html', user=user, likes=likes) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def block_uid(self, uid):",
"def unlike(self, request, pk=None):\n\n user_wall_post = self.get_object()\n user_wall_post.likes.remove(self.request.user)\n return Response(status=201)",
"def unlike(self, data_base, user):\n cursor = data_base.cursor()\n cursor.execute(f\"UPDATE post SET likes = likes - 1 WHERE id = '{self.id}'\") # Decrements the likes\n cursor.execute(f\"DELETE FROM user_like WHERE user_id = {user.id} AND post_id = {self.id}\")\n if self.commit_to_db:\n data_base.commit()\n cursor.close()",
"def unblock(self):\n data = {'container': self._reddit.user.me().fullname,\n 'name': str(self), 'type': 'enemy'}\n url = API_PATH['unfriend'].format(subreddit='all')\n # PRAW5 REMOVE (return statement)\n return self._reddit.post(url, data=data)",
"def post(self):\n liked = self.request.get('like')\n unliked = self.request.get('unlike')\n post_id = self.request.get('post_id')\n post = Posts.get_by_id(int(post_id))\n user = self.get_active_user()\n user_id = int(user.key().id())\n\n if liked:\n if user_id in post.liked_by:\n self.render_improper_endpoint_access(\"like\")\n else:\n if post.submitter_id != user_id:\n post.liked_by.append(user.key().id())\n post.put()\n self.redirect('/%s' % str(post.key().id()))\n else:\n self.error(403)\n elif unliked:\n if user_id in post.liked_by:\n index = post.liked_by.index(user_id)\n del post.liked_by[index]\n post.put()\n self.redirect('/%s' % str(post.key().id()))\n else:\n self.error(500)",
"def test_unauthenticated_user_liking(self):\n self.like_dislike(self.dislike_url(5))",
"def users_likes(user_id):\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n users_blocking = [block.user_blocking_id for block in Blocks.query.all() if block.user_being_blocked_id == g.user.id]\n # likes = Message.query.filter(Message.user_id.notin_(users_blocking)).all()\n user = User.query.get_or_404(user_id)\n likes = [message for message in user.likes if message.user_id not in users_blocking]\n return render_template('users/likes.html', user=user, likes=likes)",
"def test_unauthenticated_user_disliking(self):\n self.like_dislike(self.like_url(6))",
"def test_listing_from_wall_when_blocked_some_users(self):",
"def block_user():\n if request.method == 'POST':\n username = get_username()\n user_id = get_id_from_username(username)\n to_block = get_id_from_username(request.form['block_user'])\n if not to_block or to_block==user_id:\n #TODO: some sort of error if blockee doesn't exist\n return redirect(url_for('users.account_page', username=username))\n block_user_db(user_id, to_block)\n return redirect(url_for('users.account_page', username=username))",
"def post(self, request, *args, **kwargs):\n\n user_wall_post_comment = self.get_object()\n user_wall_post_comment.likes.remove(self.request.user)\n return Response(status=201)",
"def filter_blocked_posts(self, query_posts):\n blocks = self.blocks()\n filtered_query_posts = query_posts\n\n for block_user in blocks:\n filtered_query_posts = filtered_query_posts.exclude(author=block_user)\n return filtered_query_posts",
"def show_following(user_id):\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n user = User.query.get_or_404(user_id)\n users_blocking = [block.user_blocking_id for block in Blocks.query.all() if block.user_being_blocked_id == g.user.id]\n likes = [message for message in user.likes if message.user_id not in users_blocking]\n return render_template('users/following.html', user=user, likes=likes)",
"def block_user(request):\n account = models.Account.get_account_for_user(request.user_to_show)\n if request.method == 'POST':\n form = BlockForm(request.POST)\n if form.is_valid():\n account.blocked = form.cleaned_data['blocked']\n logging.debug(\n 'Updating block bit to %s for user %s',\n account.blocked,\n account.email)\n account.put()\n if account.blocked:\n # Remove user from existing issues so that he doesn't participate in\n # email communication anymore.\n tbd = {}\n email = account.user.email()\n query = models.Issue.query(models.Issue.reviewers == email)\n for issue in query:\n issue.reviewers.remove(email)\n issue.calculate_updates_for()\n tbd[issue.key] = issue\n # look for issues where blocked user is in cc only\n query = models.Issue.query(models.Issue.cc == email)\n for issue in query:\n if issue.key in tbd:\n # Update already changed instance instead. This happens when the\n # blocked user is in both reviewers and ccs.\n issue = tbd[issue.key]\n issue.cc.remove(account.user.email())\n tbd[issue.key] = issue\n ndb.put_multi(tbd.values())\n else:\n form = BlockForm()\n form.initial['blocked'] = account.blocked\n templates = {\n 'viewed_account': account,\n 'form': form,\n }\n return respond(request, 'block_user.html', templates)",
"async def block(self, ctx, *, url):\n blocked = await self.db.get('blocked', [])\n if url in blocked:\n return await ctx.send('😾 That image is already blocked.')\n blocked.append(url)\n await self.db.set('blocked', blocked)\n await ctx.send('😾 That image will not be posted again.')",
"def block_user(request):\n account = models.Account.get_account_for_user(request.user_to_show)\n if request.method == 'POST':\n form = BlockForm(request.POST)\n if form.is_valid():\n account.blocked = form.cleaned_data['blocked']\n logging.debug(\n 'Updating block bit to %s for user %s',\n account.blocked,\n account.email)\n account.put()\n if account.blocked:\n # Remove user from existing issues so that he doesn't participate in\n # email communication anymore.\n # These use eventual consistency and cannot be made strongly consistent.\n tbd = {}\n email = account.user.email()\n query = models.Issue.query(models.Issue.reviewers == email)\n for issue in query:\n issue.reviewers.remove(email)\n issue.calculate_updates_for()\n tbd[issue.key] = issue\n # look for issues where blocked user is in cc only\n query = models.Issue.query(models.Issue.cc == email)\n for issue in query:\n if issue.key in tbd:\n # Update already changed instance instead. This happens when the\n # blocked user is in both reviewers and ccs.\n issue = tbd[issue.key]\n issue.cc.remove(account.user.email())\n tbd[issue.key] = issue\n ndb.put_multi(tbd.values())\n else:\n form = BlockForm()\n form.initial['blocked'] = account.blocked\n templates = {\n 'viewed_account': account,\n 'form': form,\n }\n return respond(request, 'block_user.html', templates)",
"def user_response_to_post(self, request, pk):\n post_objects_count = Post.objects.filter(id=pk, liked_users__id=request.user.id).count()\n post_objects = Post.objects.get(id=pk)\n if post_objects_count !=0:\n post_objects.liked_users.remove(request.user)\n response_msg = \"You disliked the post\"\n else:\n post_objects.liked_users.add(request.user)\n response_msg = \"You have liked the post\"\n return Response({'data': response_msg}, status=status.HTTP_200_OK)",
"def auto_mute_following():\n following = set(t.friends.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n muted = set(t.mutes.users.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n\n not_muted = following - muted\n\n # put user IDs of people you do not want to mute here\n users_keep_unmuted = set([])\n \n # mute all \n for user_id in not_muted:\n if user_id not in users_keep_unmuted:\n t.mutes.users.create(user_id=user_id)\n print(\"muted %d\" % (user_id))",
"def notify_all_posters_of_new_post(self, post):\n users_in_thread = set(post.posted_by for post in self.posts.all())\n\n for user in users_in_thread:\n if user != post.posted_by:\n self.notify_user(user, post=post)",
"def grant_deny_access(self, bot, update):\n text = update.callback_query.data.split(\" \")\n command = text[0]\n user_lst = text[1:]\n # costruisce il dizionario dal messaggio\n user = {\"id\": user_lst[0], \"username\": \" \".join(user_lst[1:])}\n if (command.strip(\n \"/\") == \"consentiAccessoSi\"): # se viene garantito l'accesso salva l'user nel db e notifa user e developer\n\n if DB.execute(TABELLE[\"id_users\"][\"select\"][\"from_id\"], (user['id'],)):\n for msg in developer_message:\n bot.edit_message_text(\n chat_id=msg.chat_id,\n text=\"Lo user : \" + str(user[\"username\"]) + \", è gia presente nel db\",\n message_id=msg.message_id,\n parse_mode=\"HTML\"\n )\n return\n\n # print(\"Accesso garantito\")\n self.add_new_user(user)\n bot.send_message(user[\"id\"], \"Ti è stato garantito l'accesso al bot!\")\n\n for msg in developer_message:\n bot.edit_message_text(\n chat_id=msg.chat_id,\n text=\"L'accesso a user : \" + str(user[\"username\"]) + \", è stato garantito\",\n message_id=msg.message_id,\n parse_mode=\"HTML\"\n )\n\n else: # altrimenti aggiungi l'user alla lista bannati e notifica i developers\n # print(\"Accesso negato\")\n bot.send_message(user[\"id\"], \"Non ti è stato garantito l'accesso al bot :(\")\n self.ban_user(user)\n for msg in developer_message:\n bot.edit_message_text(\n chat_id=msg.chat_id,\n text=\"L'accesso a user : \" + str(user[\"username\"]) + \", è stato negato\",\n message_id=msg.message_id,\n parse_mode=\"HTML\"\n )\n\n developer_message.clear()",
"def unsafe_block_by_username(self, username: str) -> None:\n uid = InstagramAPI.username_to_id(username)\n self.api.block(uid)",
"async def user_banned_button(self, payload: discord.RawReactionActionEvent) -> None:\n\n self.bits = flip_action_bits(LoggingActions.USER_BANNED, self.bits)\n await self.update_embed()",
"def following_changed(sender, action, instance, *args, **kwargs):\n\n # m2mchanged.connect specified in apps.py\n\n following = instance.following.all()\n creator = instance.user\n\n if creator in following:\n raise ValidationError (\"can't like own post\")",
"def disallow_handler(update, _):\n global TEMPORARILY_ALLOWED\n user_id = update.message.chat.id\n if user_id == ADMIN_ID:\n TEMPORARILY_ALLOWED = False\n update.message.reply_text(\"Temprarily allowed disabled!\")",
"def is_disliked_by(self, user):\n return user.disliked_articles.filter(pk=self.pk).exists()",
"def user_disappears(self, user):\n pass",
"def temporarily_allow_handler(update, _):\n global TEMPORARILY_ALLOWED\n user_id = update.message.chat.id\n if user_id == ADMIN_ID:\n TEMPORARILY_ALLOWED = True\n update.message.reply_text(\"Temprarily allowed!\")",
"def stop_blocking(block_id):\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n blocked_user = User.query.get_or_404(block_id)\n g.user.blocked_users.remove(blocked_user)\n db.session.commit()\n\n return redirect(f\"/users/{g.user.id}/blocked-users\")",
"def accept(self):\n receiver_friend_list = FriendList.objects.get(user=self.receiver)\n if receiver_friend_list:\n receiver_friend_list.add_friend(self.sender)\n sender_friend_list = FriendList.objects.get(user=self.sender)\n if sender_friend_list:\n sender_friend_list.add_friend(self.receiver)\n self.is_active = False\n self.save()",
"def block_passes(request):\n if request.method == 'POST':\n if 'block' in request.POST:\n pnum = request.POST['passnumber']\n num = PersonPass.objects.all()\n reasons = request.POST['reason']\n flag=0\n for n in num:\n if n.pass_number == pnum:\n flag=1\n if flag == 0 or len(reasons) == 0:\n if flag == 0:\n messages.error(request, \"You have entered an invalid pass number\")\n if len(reasons) == 0:\n messages.error(request, 'Reason is required.')\n else:\n person = PersonPass.objects.get(pass_number= pnum)\n\n #if person is not None:\n #if pnum == passnum.pass_number:\n \n if person.is_blocked == False:\n #return HttpResponse('Your have already blocked this Pass!!')\n person.reason= reasons\n person.is_blocked = True\n person.save()\n # return HttpResponse('Your have successfully blocked!!')\n messages.success(request, 'Your have successfully blocked pass for '+ person.name) \n else:\n messages.error(request, 'Your have already blocked the pass for '+ person.name) \n elif 'unblock' in request.POST:\n pnum = request.POST['passnumber']\n num = PersonPass.objects.all()\n reasons = request.POST['reason']\n flag=0\n for n in num:\n if n.pass_number == pnum:\n flag=1\n if flag == 0 or len(reasons) == 0:\n if flag == 0:\n messages.error(request, \"You have entered an invalid pass number\")\n if len(reasons) == 0:\n messages.error(request, 'Reason is required.')\n else:\n person = PersonPass.objects.get(pass_number= pnum)\n #reasons = request.POST['reason']\n #if person is not None:\n #if pnum == passnum.pass_number:\n \n if person.is_blocked == True:\n #return HttpResponse('Your have already blocked this Pass!!')\n person.reason= reasons\n person.is_blocked = False\n person.save()\n # return HttpResponse('Your have successfully blocked!!')\n messages.success(request, 'Your have successfully unblocked the pass for '+person.name) \n else:\n messages.error(request, 'Your have already unblocked the Pass for '+person.name) \n \n #return HttpResponseRedirect(\"admin/block.pass.html\")\n # else: \n # return render_to_response('admin/block.pass.html' ,{'error' : \"You have entered an invalid pass number\"}, context_instance=RequestContext(request))\n return render_to_response('admin/block_pass.html' , context_instance=RequestContext(request))"
] | [
"0.64359105",
"0.63649625",
"0.6310728",
"0.62724555",
"0.6253589",
"0.61984795",
"0.6175689",
"0.61384624",
"0.6075013",
"0.6074085",
"0.6063862",
"0.6048989",
"0.59944564",
"0.5955434",
"0.5939078",
"0.59326166",
"0.58704615",
"0.5840826",
"0.5819313",
"0.58187544",
"0.58173",
"0.58155036",
"0.5813",
"0.5809694",
"0.57606643",
"0.57329535",
"0.57272935",
"0.5722817",
"0.5714734",
"0.5712618"
] | 0.6597587 | 0 |
Have currentlyloggedinuser stop blocking this user. | def stop_blocking(block_id):
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/")
blocked_user = User.query.get_or_404(block_id)
g.user.blocked_users.remove(blocked_user)
db.session.commit()
return redirect(f"/users/{g.user.id}/blocked-users") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_user(self):\n self.currentuser = None\n self.carlocked = False",
"def unblock(self):\n self.failed_logins = 0\n self.blocked = False",
"def deactivate_user(self, user):\n if user.active:\n user.active = False\n return True\n return False",
"def user_disappears(self, user):\n pass",
"async def remove_blacklist(self, ctx, user: discord.Member):\r\n if user.id not in self.settings['blacklist']:\r\n await ctx.send(\"User is not blacklisted.\")\r\n else:\r\n self.settings['blacklist'].remove(user.id)\r\n await ctx.send(\"User removed from blacklist.\")",
"def unblock(user_id):\n user = User.query.filter_by(id=user_id).first()\n if not user:\n raise ObjectNotFound(\"This user does not exist\")\n\n user.unblock()\n return jsonify(message='User account succesfully unblocked'), 201",
"def deactivate_user(self, user):\n if user.active:\n user.active = False\n # noinspection PyUnresolvedReferences\n self.save(user)\n return True\n return False",
"def log_out(self):\n self.__is_logged_in = False",
"def block_user(user_id):\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n user = User.query.get_or_404(user_id)\n users_blocking = [block.user_blocking_id for block in Blocks.query.all() if block.user_being_blocked_id == g.user.id]\n likes = [message for message in user.likes if message.user_id not in users_blocking]\n return render_template('users/blocked-users.html', user=user, likes=likes)",
"def cmd_disable_private(self, argument):\n if self.is_admin:\n self.bot.admins.remove(self.nick)\n self.send(self.nick, _(\"User %s removed from admins\"), self.nick)\n self.logger.info(\"User %s removed from admins\" % self.nick)",
"async def blacklist_global(self, ctx, user: discord.User, *, reason):\n await self.bot.db.execute(\n \"INSERT IGNORE blacklisted_user VALUES (%s, %s)\", user.id, reason\n )\n self.bot.cache.blacklist[\"global\"][\"user\"].add(user.id)\n await util.send_success(ctx, f\"**{user}** can no longer use Miso Bot!\")",
"def unban_user(self, session, chat_id: int) -> None:\n\n user = session.query(User).get(chat_id)\n if user.is_banned is True:\n user.is_banned = False\n session.commit()",
"def disconnect_user(self, user):\n\t\tis_user_removed = False\n\t\tif user in self.users.all():\n\t\t\tself.users.remove(user)\n\t\t\tself.save()\n\t\t\tis_user_removed = True\n\t\treturn is_user_removed",
"def unblock(self):\n data = {'container': self._reddit.user.me().fullname,\n 'name': str(self), 'type': 'enemy'}\n url = API_PATH['unfriend'].format(subreddit='all')\n # PRAW5 REMOVE (return statement)\n return self._reddit.post(url, data=data)",
"def block_uid(self, uid):",
"def deactivate_user(self, user_name):\n if not self._simultanious_log_ins:\n self._active_users_names.remove(user_name)\n self._users.commit()",
"def confirm_login_allowed(self, user):\r\n if not user.is_active:\r\n raise forms.ValidationError(\r\n self.error_messages['inactive'],\r\n code='inactive',\r\n )",
"async def add_blacklist(self, ctx, user: discord.Member):\r\n if user.id not in self.settings['blacklist']:\r\n try:\r\n self.settings['blacklist'].append(user.id)\r\n await ctx.send(\"User blacklisted.\")\r\n except:\r\n await ctx.send(\"An error occured.\")\r\n else:\r\n await ctx.send(\"User already blacklisted.\")",
"def confirm_login_allowed(self, user):\n if not user.is_active:\n raise forms.ValidationError(\n self.error_messages['inactive'],\n code='inactive',\n )",
"def confirm_login_allowed(self, user):\n if not user.is_active:\n raise forms.ValidationError(\n self.error_messages['inactive'],\n code='inactive',\n )",
"def confirm_login_allowed(self, user):\n if not user.is_active:\n raise forms.ValidationError(\n self.error_messages['inactive'],\n code='inactive',\n )",
"def confirm_login_allowed(self, user):\n if not user.is_active:\n raise forms.ValidationError(\n self.error_messages['inactive'],\n code='inactive',\n )",
"def confirm_login_allowed(self, user):\n if not user.is_active:\n raise forms.ValidationError(\n self.error_messages['inactive'],\n code='inactive',\n )",
"def test_not_logged_user_cannot_leave(self):\n\n utils.test_not_logged_cannot_access(self, self.url)",
"def confirm_login_allowed(self, user):\n if not user.is_active:\n raise ValidationError(\n self.error_messages['inactive'],\n code='inactive',\n )",
"def stop(self):\n self.logger.debug(\"Plugin '{}': stop method called\".format(self.get_fullname()))\n self.scheduler_remove('check_login')\n self.alive = False",
"def killUser(self,user_id,ras_ip,unique_id_val,kill,admin_name):\n ras_id=ras_main.getLoader().getRasByIP(ras_ip).getRasID()\n if kill:\n user_main.getOnline().killUser(user_id,ras_id,unique_id_val,errorText(\"USER_LOGIN\",\"KILLED_BY_ADMIN\",False)%admin_name)\n else:\n user_main.getOnline().clearUser(user_id,ras_id,unique_id_val,errorText(\"USER_LOGIN\",\"CLEARED_BY_ADMIN\",False)%admin_name)",
"def unblock(self):\n prefix = \"28\" if isinstance(self, SkypeBotUser) else \"8\"\n self.skype.conn(\"DELETE\", \"{0}/users/{1}/contacts/blocklist/{2}:{3}\"\n .format(SkypeConnection.API_CONTACTS, self.skype.userId, prefix, self.id),\n auth=SkypeConnection.Auth.SkypeToken)\n self.blocked = False",
"def blocked(self) -> bool:\n return pulumi.get(self, \"blocked\")",
"def display_off(cls, user_id):\r\n user = User.objects.get(id=user_id)\r\n cls.objects.filter(user=user, status=\"denied\").exclude(window=None).update(display=False)"
] | [
"0.66507185",
"0.6560793",
"0.6545891",
"0.64561146",
"0.63917685",
"0.631987",
"0.61726075",
"0.6152056",
"0.6083656",
"0.6043075",
"0.6001118",
"0.5989291",
"0.5986588",
"0.5967347",
"0.59479684",
"0.5893107",
"0.5890102",
"0.58710134",
"0.586824",
"0.586824",
"0.586824",
"0.586824",
"0.586824",
"0.585275",
"0.5833376",
"0.58187336",
"0.5779051",
"0.5771458",
"0.57643205",
"0.5736979"
] | 0.692223 | 0 |
Include only representative combos from the matrix of the two lists making sure that each of the elements contributing is present at least once. | def representative_combos(list_1: list[str], list_2: list[str]) -> list[tuple[str, str]]:
all_selected_combinations: list[tuple[str, str]] = []
for i in range(max(len(list_1), len(list_2))):
all_selected_combinations.append((list_1[i % len(list_1)], list_2[i % len(list_2)]))
return all_selected_combinations | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def excluded_combos(list_1: list[str], list_2: list[str]) -> list[tuple[str, str]]:\n all_combos: list[tuple[str, str]] = []\n for item_1 in list_1:\n for item_2 in list_2:\n all_combos.append((item_1, item_2))\n return [item for item in all_combos if item not in set(representative_combos(list_1, list_2))]",
"def union(set1, set2):",
"def merge(lists):\n newsets, sets = [set(lst) for lst in lists if lst], []\n while len(sets) != len(newsets):\n sets, newsets = newsets, []\n for aset in sets:\n for eachset in newsets:\n if not aset.isdisjoint(eachset):\n eachset.update(aset)\n break\n else:\n newsets.append(aset)\n return newsets",
"def insercionListas(L1,L2):\n return set(L1) & set(L2)",
"def common_elements(s1, s2):\n\n return set(s1 & s2)",
"def union(a, b):\r\n return list(set(a) | set(b))",
"def intersect(list1, list2):\n result_list = []\n #list3 = remove_duplicates(list1)\n for dummy_element in list1:\n if list2.count(dummy_element) > 0 and result_list.count(dummy_element) == 0:\n result_list.append(dummy_element)\n return result_list",
"def union(A,B):\n set_A = A\n set_B = B\n sorted_union = []\n for elements in set_A:\n if elements not in sorted_union:\n sorted_union.append(elements)\n for elements in set_B:\n if elements not in sorted_union:\n sorted_union.append(elements)\n return sorted_union",
"def fullCmpSets(s1, s2):\n if len(s1) != len(s2):\n return 1\n for s1i, s2i in map(None, s1, s2):\n f = s1i.fullCmp(s2i)\n if f:\n return f",
"def commutes_with(self, other):\n a = self.array_form\n b = other.array_form\n if len(a) != len(b):\n raise ValueError(\"The number of elements in the permutations \\\ndon\\'t match.\")\n for i in range(len(a)-1):\n if a[b[i]] != b[a[i]]:\n return False\n return True",
"def intersect(self, other_list):\n assert type(other_list) == type(self)\n \n# if len(self.vals) >= len(other_list.vals):\n# big = self.vals\n# small = other_list.vals\n# else:\n# small = self.vals\n# big = other_list.vals\n# \n# common_list = intSet()\n# for e in big:\n# if e in small:\n# common_list.insert(e)\n# return common_list\n\n common_list = intSet() \n for e in self.vals:\n if other_list.member(e): #if the current e is a member of other_list\n common_list.insert(e)\n return common_list",
"def compare_elements(a, b):\n return set(a or []) == set(b or [])",
"def fullIn(C, g):\n for set in C:\n if not fullCmpSets(set, g):\n return 1",
"def union(list1, list2):\n new_list = list1\n for literal in list2:\n negate_literal = copy.deepcopy(literal)\n negate_literal.negate = not negate_literal.negate\n if negate_literal in list1:\n new_list.remove(negate_literal)\n continue\n if literal not in list1:\n new_list.append(literal)\n return new_list",
"def union(set_1, set_2):\n union_list = []\n\n for number in set_2: # Adds numbers to set_1, since this is the last step\n set_1.append(number)\n \n set_1.sort()\n\n for number in set_1:\n if number not in union_list:\n union_list.append(number)\n \n print(\"Union:\", union_list)\n return set_1, set_2",
"def __listunion(self, c1, c2):\n s1 = {}\n for delta in c1:\n s1[delta] = 1\n\n\tc = c1[:]\n\tfor delta in c2:\n if not s1.has_key(delta):\n\t\tc.append(delta)\n\n\treturn c",
"def union(a, b):\n return list(set(a) | set(b))",
"def union(a, b):\n return list(set(a) | set(b))",
"def comaIsSymmetric(self):\n\t\tfor i in range(2*self.totalBins):\n\t\t\tfor j in range(2*self.totalBins):\n\t\t\t\tif not self.coma[i,j] == self.coma[j,i]:\n\t\t\t\t\tprint i,j,self.coma[i,j],self.coma[j,i]\n\t\t\t\t\treturn False\n\t\treturn True",
"def intersectie_multimi(lista_1, lista_2):\n lista_intersectie = []\n for elemente_lista_1 in lista_1:\n for elemente_lista_2 in lista_2:\n if elemente_lista_1 == elemente_lista_2:\n lista_intersectie.append(elemente_lista_1)\n return lista_intersectie",
"def get_jurisdiction_common_members(a: List[int], b: List[int]) -> Set[int]:\n a_set = set(a)\n b_set = set(b)\n\n if a_set & b_set:\n return a_set & b_set\n else:\n return set()",
"def only_diff_elements(set_1, set_2):\n return (set_1 ^ set_2)",
"def unique(combo, out):\n # This lets us find only minimally covering payments (you should never add cards to a payment that already\n # satisfies the charge)\n for el in out:\n if set(el).issubset(combo):\n return False\n return True",
"def _condense(a,b=None):\r\n\t\t\r\n\t\t# second is by default empty\r\n\t\tif b is None:\r\n\t\t\tb = []\r\n\t\t\r\n\t\t# add first into second\r\n\t\tfor i in a:\r\n\t\t\t\r\n\t\t\t# try to add onto all terms\r\n\t\t\tt = [i.add(j) for j in b]\r\n\t\t\t\r\n\t\t\t# check for match\r\n\t\t\tm = False\r\n\t\t\tfor n,j in enumerate(t):\r\n\t\t\t\tif j is not None:\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t# replace with combination\r\n\t\t\t\t\tb[n] = j\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t# stop searching\r\n\t\t\t\t\tm = True\r\n\t\t\t\t\tbreak\r\n\t\t\t\t\r\n\t\t\t# otherwise append\r\n\t\t\tif not m:\r\n\t\t\t\tb.append(i)\r\n\t\t\t\r\n\t\t\t# remove zeroes\r\n\t\t\tzo = lambda x: 0 in x\r\n\t\t\tb = [i for i in b if not zo(i)]\r\n\t\t\t\r\n\t\treturn b",
"def apply(self):\n next_one = super().apply()\n next_both = set()\n\n for tup in next_one:\n if (tup[1], tup[0]) in next_one:\n next_both.add(tup)\n\n return list(next_both)",
"def intersection(arrays):\n # Create hash table (dict) to store numbers in for faster O(1) lookup (for \n # any individual lookup):\n # numbers = {}\n\n # Create list for intersection of the sets:\n # intersection = []\n\n # Populate hash table with numbers from the first list (keys), because any numbers \n # not in the first list will not be in the intersection of the lists, by definition.\n numbers = {item:False for item in arrays[0]}\n # Now check the other input lists in order, removing any number/item that is not in both:\n for list in arrays[1:]:\n for item in list: # NOT actually O(n**2); just O(n) for the whole input matrix.\n # Mark as True to flag any items that are in the intersection of the two lists:\n if item in numbers:\n numbers[item] = True\n # Keep only the numbers that are in the intersection of the two lists:\n numbers = {key:value for key, value in numbers.items() if value == True}\n # Mark all as False again to start a fresh comparison with the next list:\n for item in numbers:\n numbers[item] = False\n\n return [*numbers.keys()]",
"def lists_combinations(list_1, list_2):\n return [x[0] + ' ' + x[1] for x in itertools.product(list_1, list_2)]",
"def encompasses_broadcastable(b1, b2):\r\n if len(b1) < len(b2):\r\n return False\r\n b1 = b1[-len(b2):]\r\n return not any(v1 and not v2 for v1, v2 in zip(b1, b2))",
"def GetMatchedSubContourListsCollapsing(scListA, scListB):\n\n if (\n scListA == scListB\n ): # if we got the same object for some reason, just return 2 shallow clones\n return scListA, scListB\n\n scsMatchedInB = [] # This keeps us from looping over both lists\n\n scsMatchedInB, removeFromA, removeFromB = FindMatchesAndRemovals(scListA, scListB)\n unMatchedInB = [\n i for i in range(len(scListB)) if i not in scsMatchedInB\n ] # This lets us skip the indexes that already matched\n\n _, removeFromB_2, removeFromA_2 = FindMatchesAndRemovals(\n scListB, scListA, searchInds=unMatchedInB\n ) # FLIP\n\n removeFromA = sorted(list(set(removeFromA + removeFromA_2)))\n removeFromB = sorted(list(set(removeFromB + removeFromB_2)))\n\n scListANew = deepcopy(scListA)\n scListBNew = deepcopy(scListB)\n\n for i in removeFromA[::-1]:\n RemoveSubContour(scListA, i)\n for i in removeFromB[::-1]:\n RemoveSubContour(scListB, i)\n\n return scListANew, scListBNew",
"def intersect(a,b):\n\treturn list(set(a) & set(b))"
] | [
"0.6463747",
"0.6072654",
"0.6063343",
"0.5942452",
"0.5933756",
"0.59013903",
"0.58814776",
"0.58771676",
"0.5853014",
"0.5846436",
"0.58372587",
"0.5833355",
"0.5817443",
"0.5816979",
"0.5809175",
"0.57999456",
"0.5792333",
"0.5792333",
"0.57877153",
"0.5774353",
"0.5765378",
"0.5763758",
"0.57619625",
"0.5729672",
"0.57278913",
"0.57019377",
"0.5700082",
"0.5680151",
"0.56658703",
"0.5601527"
] | 0.6188967 | 1 |
Return exclusion lists of elements that should be excluded from the matrix of the two list of items if what's left should be representative list of combos (i.e. each item from both lists, has to be present at least once in the combos). | def excluded_combos(list_1: list[str], list_2: list[str]) -> list[tuple[str, str]]:
all_combos: list[tuple[str, str]] = []
for item_1 in list_1:
for item_2 in list_2:
all_combos.append((item_1, item_2))
return [item for item in all_combos if item not in set(representative_combos(list_1, list_2))] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __sub__(self, vs):\n return [v for v in self.__elements if tuple(v) not in map(tuple, vs)]",
"def set_difference(lst1, lst2):\n elements = []\n indicies = []\n for indx, item in enumerate(lst1):\n if item not in lst2:\n elements.append(item)\n indicies.append(indx)\n return elements, indicies",
"def get_excluded_pairs(self, max_exclusion = 3):\n\n excluded_pairs = []\n\n # construct a matrix of size n by n where n is the number of atoms in this fragment\n # a value of 1 in row a and column b means that atom a and b are bonded\n connectivity_matrix = [[0 for k in range(self.get_num_atoms())] for i in range(self.get_num_atoms())]\n\n # loop over each pair of atoms\n for index1, atom1 in enumerate(self.get_atoms()):\n for index2, atom2 in enumerate(self.get_atoms()[index1 + 1:]):\n index2 += index1 + 1\n\n # if these atoms are bonded, set their values in the connectivity matrix to 1.\n if atom1.is_bonded(atom2):\n connectivity_matrix[index1][index2] = 1\n connectivity_matrix[index2][index1] = 1\n\n # current matrix represents connectivity_matrix^x where x is the same as as in the excluded_1x pairs we are currently generating\n current_matrix = connectivity_matrix\n\n excluded_pairs_12 = set()\n\n # loop over each pair of atoms\n for index1, atom1 in enumerate(self.get_atoms()):\n for index2, atom2 in enumerate(self.get_atoms()[index1 + 1:]):\n index2 += index1 + 1\n\n # if the value in the current matrix is at least 1, then these atoms are 1 bond apart, and are added to the excluded_pairs_12 list.\n if current_matrix[index1][index2] > 0:\n excluded_pairs_12.add((index1, index2))\n\n # add the excluded_pairs_12 to the list of all excluded pairs\n excluded_pairs.append(excluded_pairs_12)\n\n for i in range(max_exclusion - 1):\n\n # current matrix is multiplied by connectivity_matrix so that each iteration of the loop current_matrix = connectivity_matrix^(i + 1)\n current_matrix = numpy.matmul(current_matrix, connectivity_matrix)\n\n excluded_pairs_1x = set()\n\n # loop over each pair of atoms\n for index1, atom1 in enumerate(self.get_atoms()):\n for index2, atom2 in enumerate(self.get_atoms()[index1 + 1:]):\n index2 += index1 + 1\n\n # if the value in the connectivity matrix is at least 1, then these atoms are x bonds apart, and are added to the excluded_pairs_1x list.\n if current_matrix[index1][index2] > 0:\n excluded_pairs_1x.add((index1, index2))\n\n # filter out all terms inside other excluded lists from the new excluded list\n for excluded_pairs_1y in excluded_pairs:\n excluded_pairs_1x -= excluded_pairs_1y\n\n # add the excluded_pairs_1x to the list of all excluded pairs\n excluded_pairs.append(excluded_pairs_1x)\n\n return [[list(pair) for pair in excluded_pairs_1x] for excluded_pairs_1x in excluded_pairs]",
"def listops_difference(list_a,list_b):\r\n\r\n retlist = []\r\n for item in list_a:\r\n if item not in list_b:\r\n retlist.append(item)\r\n\r\n # ensure that a duplicated item in list_a is only listed once\r\n return listops_uniq(retlist)",
"def only_diff_elements(set_1, set_2):\n return (set_1 ^ set_2)",
"def list_difference(l1: List[Any], l2: List[Any]) -> List[Any]:\n return [item for item in l1 if item not in l2]",
"def list_subtract(a, b):\n a_only = list(a)\n for x in b:\n if x in a_only:\n a_only.remove(x)\n return a_only",
"def drop_groups(input_first, input_second):\n\tret_first, ret_second = [], []\n\tfor idx in xrange(len(input_first)):\n\t\tif len(input_first[idx]) == len(input_second[idx]):\n\t\t\tret_first.append(input_first[idx])\n\t\t\tret_second.append(input_second[idx])\n\treturn ret_first, ret_second",
"def get_disjoint_genes(self, other):\n innovs = {g.innov_num for g in other.link_genes}\n if not innovs:\n return []\n max_innov = max(innovs)\n return [g for g in other.link_genes\n if g.innov_num < max_innov and g.innov_num not in innovs]",
"def compare_lists(self, list1, list2):\n matching_items = []\n\n list1 = list1.copy()\n list2 = list2.copy()\n\n for item in list1:\n if item in list2:\n matching_items.append(item)\n\n for m in matching_items:\n for c in range(list1.count(m)):\n list1.remove(m)\n for c in range(list2.count(m)):\n list2.remove(m)\n if list1 or list2:\n tmp_match = False\n else:\n tmp_match = True\n return tmp_match, list1, list2",
"def common_cities_excluding(members, member_to_exclude, city_sets):\n\n cities = common_cities(members, city_sets)\n cities = [x for x in cities\n if x not in city_sets[member_to_exclude]]\n\n return cities",
"def exclude_inputs(inputs, exclusion_criteria):\n selected_inputs = []\n\n for i in inputs:\n criteria_met = [exclusion_criterion(i) for _, exclusion_criterion in exclusion_criteria.items()]\n if any(criteria_met):\n pass\n else:\n selected_inputs.append(i)\n\n return selected_inputs",
"def get_list_difference(self, set_one, set_two):\n s1 = set(set_one)\n s2 = set(set_two)\n return list(s1.difference(s2))",
"def get_list_diff(list1, list2):\n\n list3 = list(np.setdiff1d(list1,list2))\n return(list3)",
"def subtraction_list(a , b):\n\n c = [i for i in list_a if i not in list_b]\n\n return(c)",
"def entries_not_in(self, other):\n other_keys = set(other._entries.keys())\n filtered_order = [k for k in self._order if k not in other_keys]\n return [self._entries[k] for k in filtered_order]",
"def list_difference(list1, list2):\n diff_list = []\n for item in list1:\n if not item in list2:\n diff_list.append(item)\n return diff_list",
"def unorderable_list_difference(expected, actual, ignore_duplicate=False):\r\n missing = []\r\n unexpected = []\r\n while expected:\r\n item = expected.pop()\r\n try:\r\n actual.remove(item)\r\n except ValueError:\r\n missing.append(item)\r\n if ignore_duplicate:\r\n for lst in expected, actual:\r\n try:\r\n while True:\r\n lst.remove(item)\r\n except ValueError:\r\n pass\r\n if ignore_duplicate:\r\n while actual:\r\n item = actual.pop()\r\n unexpected.append(item)\r\n try:\r\n while True:\r\n actual.remove(item)\r\n except ValueError:\r\n pass\r\n return missing, unexpected\r\n\r\n # anything left in actual is unexpected\r\n return missing, actual",
"def listSubtract(alist,blist):\n result = []\n for item in alist:\n if item not in blist:\n result.append(item)\n return result",
"def diff(xs, ys):\n return [x for x in xs if x not in ys]",
"def filterout(L1, L2):\n for i in L1:\n if i in L2:\n L2.remove(i)",
"def getExcludedAtoms(self):\n try:\n return self._excludedAtoms\n except AttributeError:\n pass\n self._excludedAtoms=[]\n numExcludedAtomsList=self._raw_data[\"NUMBER_EXCLUDED_ATOMS\"]\n excludedAtomsList=self._raw_data[\"EXCLUDED_ATOMS_LIST\"]\n total=0\n for iAtom in range(self.getNumAtoms()):\n index0=total\n n=int(numExcludedAtomsList[iAtom])\n total+=n\n index1=total\n atomList=[]\n for jAtom in excludedAtomsList[index0:index1]:\n j=int(jAtom)\n if j>0:\n atomList.append(j-1)\n self._excludedAtoms.append(atomList)\n return self._excludedAtoms",
"def getExcludedAtoms(self):\n excludedAtomsIdList = self.getFlagData('EXCLUDED_ATOMS_LIST')\n numberExcludedAtoms = self.getFlagData('NUMBER_EXCLUDED_ATOMS')\n atoms = self.atoms\n interval = 0\n excludedAtomsList = []\n for number in numberExcludedAtoms:\n temp = excludedAtomsIdList[interval:interval + number]\n if temp == [0]:\n excludedAtomsList.append([])\n else:\n excludedAtomsList.append([atoms[a-1] for a in temp])\n interval += number\n self.excludedAtoms = excludedAtomsList\n self.printDebug(\"getExcludedAtoms\")",
"def remove_common(first: list, second: list):\n return list(set(first)-set(second))",
"def check_and_invert(columns, excluded):\n if isinstance(excluded, str):\n excluded = [excluded]\n\n included = columns.tolist()\n for exclude in excluded:\n if exclude in included:\n included.remove(exclude)\n return included",
"def avoids(w, forbidden):\n\treturn set(w).isdisjoint(set(forbidden))",
"def difference(a, b):\r\n return list(set(b).difference(set(a)))",
"def _subtract_access_lists(self, list_a, list_b):\n sub_tuples_list = [{\"to\": s.get('access_to'),\n \"type\": s.get('access_type'),\n \"level\": s.get('access_level')}\n for s in list_b]\n return [r for r in list_a if (\n {\"to\": r.get(\"access_to\"),\n \"type\": r.get(\"access_type\"),\n \"level\": r.get(\"access_level\")} not in sub_tuples_list)]",
"def eliminate_rows(sudoku):\n solution = []\n for row in sudoku:\n certain = set([item[0] for item in row if len(item)==1])\n new_row = []\n for item in row:\n if len(item) ==1:\n new_row.append(item)\n else:\n possible = list(set(item)- certain)\n new_row.append(possible)\n solution.append(new_row)\n return solution",
"def list_difference(list1, list2):\r\n diff_list = []\r\n for item in list1:\r\n if not item in list2:\r\n diff_list.append(item)\r\n else:\r\n if list2.count(item) != list1.count(item) and not item in diff_list:\r\n diff_list.append(item) \r\n return diff_list"
] | [
"0.62029153",
"0.60917294",
"0.6032032",
"0.5976475",
"0.5959076",
"0.59407663",
"0.59237903",
"0.59000987",
"0.5852017",
"0.5805686",
"0.57930046",
"0.57798666",
"0.57722604",
"0.57628405",
"0.57605445",
"0.57564837",
"0.5749208",
"0.5745465",
"0.5721658",
"0.5686724",
"0.56635207",
"0.56544524",
"0.565349",
"0.5642853",
"0.56403005",
"0.5627186",
"0.56270844",
"0.5624281",
"0.561027",
"0.55954695"
] | 0.76312757 | 0 |
Load event property file picks for a given experiment number and return that data as an array | def load_event_properties(experiment):
return np.loadtxt('../Slip_Property_Data/%s_event_properties.txt'%experiment,delimiter=',',skiprows=1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_events(experiment):\n event_properties = load_event_properties(experiment)\n blacklist = load_blacklist(experiment)\n return np.delete(event_properties,blacklist,axis=0)",
"def load_events(experiment):\n event_properties = load_event_properties(experiment)\n blacklist = load_blacklist(experiment)\n return np.delete(event_properties,blacklist,axis=0)",
"def fill_event(event):\n is_external = _inspect_descriptor(event.descriptor)\n for data_key, (value, timestamp) in event.data.items():\n if is_external[data_key]:\n # Retrieve a numpy array from filestore\n event.data[data_key][0] = retrieve(value)",
"def load_evs(subject, name, condition):\n evs = []\n for id in get_image_ids(name):\n task_key = BOLD_NAMES[id - 1]\n ev_file = f\"{HCP_DIR}/subjects/{subject}/EVs/{task_key}/{condition}.txt\"\n ev = dict(zip([\"onset\", \"duration\", \"amplitude\"], np.genfromtxt(ev_file).T))\n evs.append(ev)\n return evs",
"def read_ext_prop(self, fname, prop_title, mult=1):\r\n print('Reading ' + prop_title + ' input')\r\n data = []\r\n count = 0\r\n modify = False\r\n with open(fname, \"r\") as fp:\r\n for line in fp:\r\n if not line[:1].isdigit():\r\n if line.startswith('*MOD'):\r\n modify = True\r\n continue # it's a keyword\r\n item = line.split()\r\n if modify:\r\n i = int(item[0])-1\r\n j = int(item[1])-1\r\n K = [int(x)for x in item[2].split(':')]\r\n value = float(item[-1])\r\n for k in range(K[0]-1,K[1]):\r\n data[k,j,i] = value\r\n break\r\n for attr in item:\r\n if \"*\" in attr:\r\n item = attr.split(\"*\")\r\n for i in range(0, int(item[0])):\r\n data.append(float(item[1]) * mult)\r\n count += 1\r\n else:\r\n data.append(float(attr) * mult)\r\n count += 1\r\n # If true, all values have been read\r\n if count == self.size[0] * self.size[1] * self.size[2]:\r\n data = np.array(data)\r\n data = np.reshape(data, (self.size[2], self.size[1], self.size[0]), order=\"C\")\r\n continue\r\n self.add_data(data, prop_title)\r\n self.out_props[prop_title] = data",
"def load_experiments(filename):\n fp = open(filename)\n experiment_names = None\n try:\n experiment_names = simplejson.load(fp)\n except Exception, e:\n l.error(\"Unable to parse experiment file %s: %s\" % (filename, e))\n raise e\n finally:\n fp.close()\n \n for entry in experiment_names:\n for key in entry.keys():\n if key not in ExperimentLoader.ALLOWED_ATTRIBUTES:\n l.warning(\"Ignoring unrecognized key %s on experiment \"\n \"definition %s in filename %s\" %\n (key, entry, filename))\n if ExperimentLoader.NAME_ATTRIBUTE in entry:\n Experiment.objects.get_or_create(\n name=entry.get(ExperimentLoader.NAME_ATTRIBUTE))\n else:\n l.warning(\"Invalid entry in experiment file %s : %s\" %\n (filename, entry))",
"def import_agilent_gc_residual_solvents(file_name):\n df = pd.read_excel(file_name, sheet_name = None)\n samples = get_sample_name(df)\n df_compound = get_compound_df(df)\n samples['metrics'] = df_compound[['analyte', 'measurement']].to_dict('records')\n return samples",
"def load_seed(self) -> np.ndarray:\n return np.loadtxt(CONFIG_DIR / self.name_seed).view(complex).reshape(-1, 1)",
"def test_load_events(self):\n command = '{0}'.format(\n os.path.join(self.datadir, 'monol_testA.evt'))\n hen.read_events.main(command.split())\n new_filename = self.first_event_file\n ev = hen.io.load_events(new_filename)\n assert hasattr(ev, 'header')\n assert hasattr(ev, 'gti')",
"def energy_cal():\n energy_cal = np.load(energy_file)\n return energy_cal",
"def load_eeg(filename):\r\n data = np.load(filename)\r\n return data['eeg'], int(data['srate'])",
"def load_eeg(filename):\r\n data = np.load(filename)\r\n return data['eeg'], int(data['srate'])",
"def load( self, file ):\n if not os.path.isfile( file ):\n sys.exit( \"Error: File \"+file+\" does not exist.\" )\n s = open( file, 'U' ).read().replace(',','.') # convert CR and commas\n s = StringIO.StringIO( s ) # a new, in-memory file\n for i in s.readline().split(\"\\t\"): # loop over tab-separated header items\n key, val = i.split(\"=\")\n try:\n self.prop[key] = float(val) # convert to float and store...\n except ValueError:\n self.prop[key] = val # ...but not possible for text time stamp\n self.prop[\"SPEC\"] = np.loadtxt(s) # store spectra as numpy matrix\n return self",
"def retrieve_fixture():\n j = json.load(open(\"./tests/fixtures/crond_event.json\"))\n return j",
"def read(cls, event_file, regex=regex):\n with open(event_file, 'r') as f:\n filedata = f.read()\n event_matches = re.finditer(regex, filedata, re.VERBOSE + re.MULTILINE)\n list_ = [i.groupdict() for i in event_matches]\n #util.ipshell()\n for event in list_: # convert numbers to float and int types\n for key, item in event.iteritems():\n if util.isint(item):\n event[key] = int(item)\n elif util.isfloat(item):\n event[key] = float(item)\n else:\n event[key] = item.strip()\n #if event[key] == '':\n # event[key] = None\n #if key == 'depth' and regex == cls.regex:\n # event[key] *= 1\n #util.ipshell()\n log.info('Read event information of %d events from events event_file %s' % (len(list_), event_file))\n return cls(list_)",
"def load_experiment(file_name: str):\n exp = Experiment2P()\n # initialize the lazy-load objects with empty lists\n exp.tail_data = []\n exp.replaced_tail_frames = []\n exp.laser_data = []\n exp.all_c = []\n exp.all_dff = []\n exp.func_stacks = []\n with h5py.File(file_name, 'r') as dfile:\n exp.version = dfile[\"version\"][()] # in future allows for version specific loading\n try:\n if exp.version == b\"unstable\" or exp.version == \"unstable\":\n warnings.warn(\"Experiment file was created with development version of analysis code. Trying to \"\n \"load as version 1\")\n elif int(exp.version) > 2:\n raise IOError(f\"File version {exp.version} is larger than highest recognized version '2'\")\n except ValueError:\n raise IOError(f\"File version {exp.version} not recognized\")\n # load general experiment data\n n_planes = dfile[\"n_planes\"][()] # inferrred property of class but used here for loading plane data\n exp.experiment_name = dfile[\"experiment_name\"][()]\n exp.original_path = dfile[\"original_path\"][()]\n exp.scope_name = dfile[\"scope_name\"][()]\n exp.comment = dfile[\"comment\"][()]\n exp.tail_frame_rate = dfile[\"tail_frame_rate\"][()]\n # load singular parameter dictionary\n exp.info_data = exp._load_dictionary(\"info_data\", dfile)\n # load tail-data modification flag if this is version 2\n if int(exp.version) > 1:\n exp.tail_data_augmented = dfile[\"tail_data_augmented\"][()]\n # load per-plane data\n for i in range(n_planes):\n plane_group = dfile[str(i)]\n exp.scanner_data.append(exp._load_dictionary(\"scanner_data\", plane_group))\n exp.tail_data.append(plane_group[\"tail_data\"][()])\n exp.projections.append(plane_group[\"projection\"][()])\n if \"func_stack\" in plane_group:\n exp.func_stacks.append(plane_group[\"func_stack\"][()])\n if \"anat_projection\" in plane_group: # test if this experiment was dual-channel\n exp.anat_projections.append(plane_group[\"anat_projection\"][()])\n if \"tail_data\" in plane_group: # test if this experiment had tail data (for all planes)\n exp.tail_data.append(plane_group[\"tail_data\"][()])\n exp.bout_data.append(plane_group[\"bout_data\"][()])\n exp.tail_frame_times.append(plane_group[\"tail_frame_time\"][()])\n if int(exp.version) > 1 and \"replaced_tail_frames\" in plane_group:\n exp.replaced_tail_frames.append(plane_group[\"replaced_tail_frames\"][()])\n if \"laser_data\" in plane_group: # test if this experiment had laser data\n exp.laser_data.append(plane_group[\"laser_data\"][()])\n exp.all_c.append(plane_group[\"C\"][()])\n exp.all_dff.append(plane_group[\"dff\"][()])\n exp.all_centroids.append(plane_group[\"centroids\"][()])\n exp.all_sizes.append(plane_group[\"sizes\"][()])\n exp.all_spatial.append(plane_group[\"spatial\"][()])\n ps = plane_group[\"mcorr_dict\"][()]\n exp.mcorr_dicts.append(json.loads(ps))\n ps = plane_group[\"cnmf_extract_dict\"][()]\n exp.cnmf_extract_dicts.append(json.loads(ps))\n ps = plane_group[\"cnmf_val_dict\"][()]\n exp.cnmf_val_dicts.append(json.loads(ps))\n exp.populated = True\n return exp",
"def _experimentlist_from_file(filename, directory=None):\n filename = resolve_path(filename, directory=directory)\n try:\n with open(filename, \"r\") as infile:\n return json.load(infile, object_hook=_decode_dict)\n except IOError:\n raise IOError(\"unable to read file, %s\" % filename)",
"def read_data(self):\n self.days = [0, 2, 3, 5, 6, 8, 9, 11, 13, 14]\n path = '../data/'\n data = []\n for day in self.days:\n filename = path + 'spectrum_day{}.txt'.format(day)\n data.append(read_file(filename))\n return data",
"def load(cls, path: str):\n with open(path, \"r\") as f:\n run_data = json.load(f)\n return Experiment.load_from_dict(run_data)",
"def Data_init(**kwargs):\n if 'file' in kwargs:\n print \"Reading the file\"\n else:\n print \"Randomizing the initial data\"\n XV = np.random.rand(kwargs['particles'],kwargs['dimensions']*2) * 2 - 1\n M = np.random.rand(kwargs['particles'])\n\n t_f,num = kwargs['time']\n t = np.linspace(0,t_f,num)\n\n return XV,M,t",
"def getevent(self, filename):\n return self.events[filename.lower()]",
"def get_epix_data_object(evt, src):\n o = evt.get(_psana.Epix.ElementV3, src)\n if o is not None: return o\n\n o = evt.get(_psana.Epix.ElementV2, src)\n if o is not None: return o\n\n o = evt.get(_psana.Epix.ElementV1, src)\n if o is not None: return o\n\n o = evt.get(_psana.Epix.ArrayV1, src)\n if o is not None: return o\n\n return None",
"def rdspecdat(self):\n # TODO : ugh. this is crude. Should have some checks for file format\n # and probably better to use the astropy.io functions now.\n try:\n w, f, e = np.loadtxt(self.filename, unpack=True)\n except:\n w, f = np.loadtxt(self.filename, unpack=True)\n e = []",
"def _load(self, filename):\n with open(filename) as fp:\n reader = csv.DictReader(fp)\n self.events = list(reader)",
"def readExperimentParameters(filename): \n \n csvFile = csv.reader(open(filename))\n pythonSucks = next(csvFile)\n \n data = []\n for row in csvFile:\n list = []\n for i in range(len(row)):\n if(row[i+1] == \"\"):\n list.append(int(row[i]))\n break\n elif(row[i+1] == \"h\"):\n list.append(int(row[i]) * 60)\n break\n else:\n list.append(int(row[i]))\n \n data.append(tuple(list))\n return data",
"def eeg_readavr(file):\t\n\tf=open(file,'r')\t\n\tfirstline = f.readline() # ntpts TSB info etc\n\tstr = string.split(firstline)\n\tntpts = int(str[1])\n\tnchan = int(str[11])\n\ttsb = float(str[3])\n\tdi = float(str[5])\t\n\ttim = np.arange(tsb,ntpts*di+tsb,di)\n\tsecondline = f.readline()\n\tchnam = string.split(secondline)\n\teeg = np.zeros([nchan,ntpts])\t\t\n\tfor i in range(0,nchan):\n\t\ttestline = f.readline()\n\t\ttestline = testline.strip().split()\t\t\n\t\teeg[i,:]=np.array(map(float,testline))\n\t\t\n\tf.close()\n\treturn eeg,tim,nchan,ntpts",
"def read_ephem_file(infile):\n target_id, epoch, period, tdur = [], [], [], []\n with open(infile) as ff:\n data = ff.readlines()\n for row in data:\n s = row.split()\n target_id.append(s[0])\n epoch.append(float(s[1]))\n period.append(float(s[2]))\n tdur.append(float(s[3]))\n return target_id, epoch, period, tdur",
"def _read_events(events_data, event_id, raw, ext, verbose=None):\n if isinstance(events_data, str):\n events = read_events(events_data, verbose=verbose).astype(int)\n elif isinstance(events_data, np.ndarray):\n if events_data.ndim != 2:\n raise ValueError('Events must have two dimensions, '\n 'found %s' % events_data.ndim)\n if events_data.shape[1] != 3:\n raise ValueError('Events must have second dimension of length 3, '\n 'found %s' % events_data.shape[1])\n events = events_data\n elif 'stim' in raw:\n events = find_events(raw, min_duration=0.001, initial_event=True,\n verbose=verbose)\n elif ext in ['.vhdr', '.set'] and check_version('mne', '0.18'):\n events, event_id = events_from_annotations(raw, event_id,\n verbose=verbose)\n else:\n warn('No events found or provided. Please make sure to'\n ' set channel type using raw.set_channel_types'\n ' or provide events_data.')\n events = None\n return events, event_id",
"def test_load_events_xmm(self):\n command = '{0}'.format(\n os.path.join(self.datadir, 'monol_test_fake_lc_xmm.evt'))\n hen.read_events.main(command.split())",
"def readEEGepoch(eegfilename, mainDir):\n # subject = 'ES9007' \n datapath = os.path.join(mainDir)\n os.chdir(datapath)\n \n folders = os.listdir(datapath)\n \n for dir in folders:\n \n os.chdir(os.path.join(datapath, dir))\n file = glob.glob(eegfilename)\n \n if file:\n print('>>>>>>>>>>>>> file loaded from >>>>>>>>>>>>>>>>>:', os.getcwd())\n filepath = os.path.join(os.getcwd(), eegfilename) \n dat = mne.read_epochs(filepath, preload=True) \n break \n return dat"
] | [
"0.63500786",
"0.63500786",
"0.54971594",
"0.5481163",
"0.5479993",
"0.54658467",
"0.54637396",
"0.5439347",
"0.5426198",
"0.53566355",
"0.53481495",
"0.53481495",
"0.5347761",
"0.5314206",
"0.5293709",
"0.5277644",
"0.5244304",
"0.5231523",
"0.52066195",
"0.51659846",
"0.5142331",
"0.5128348",
"0.51273996",
"0.512533",
"0.51158637",
"0.51125556",
"0.508946",
"0.507534",
"0.5071117",
"0.50583297"
] | 0.8261329 | 0 |
Load event numbers from the blacklist file for each experiment and return them as an array | def load_blacklist(experiment):
blacklist = np.loadtxt('../Slip_Property_Data/%s_blacklist.txt'%experiment)
return blacklist | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_events(experiment):\n event_properties = load_event_properties(experiment)\n blacklist = load_blacklist(experiment)\n return np.delete(event_properties,blacklist,axis=0)",
"def load_events(experiment):\n event_properties = load_event_properties(experiment)\n blacklist = load_blacklist(experiment)\n return np.delete(event_properties,blacklist,axis=0)",
"def getBlackList(filename):\n #filename = \"filelist/blacklist_%s.txt\"%dataset.lstrip('/').replace('/','__')\n blacklist = [ ]\n if os.path.exists(filename):\n with open(filename,'r') as file:\n for line in file:\n line = line.rstrip('\\n')\n if line and '#' not in line:\n blacklist.append(line)\n return blacklist",
"async def parse_flash_histogram(path: Path) -> List[List[int]]:\n histogram = list()\n\n async with aiofiles.open(path, \"r\") as f:\n async for line in f:\n histogram.append([int(i) for i in line.rstrip().split()])\n\n return histogram",
"def ReadBlackListFile(BlackListFile):\n blacklist = []\n if os.path.isfile(BlackListFile):\n with open(BlackListFile, 'r') as filecontent:\n for line in filecontent:\n #(chrom1, start1, chrom2, start2) = line.rstrip().split(\"\\t\")\n blacklist.append(line)\n return(blacklist)",
"def load(self):\n self.black = []\n\n for e in self.camera.exposure_settings:\n p = os.path.join(self.config.ffc_dir, 'BLK%5.5d.npy' % e)\n if os.path.isfile(p):\n print('Loading cal: ', p)\n self.black.append(np.load(p).astype(np.int16)) # older versions might have saved uint\n else:\n self.black.append(np.zeros(FRAME_SHAPE, np.int16))",
"def open_blacklist(filepath):\n with open(filepath, 'r') as f:\n blacklist = [tuple(line.strip().split('\\t')) for line in f.readlines()]\n return blacklist",
"def validate_file_blacklist(blacklist):\n valid_values = [\n # 'checkpoint',\n \"description\",\n \"heartbeat\",\n \"predictions_holdout\",\n \"predictions_in_fold\",\n \"predictions_oof\",\n \"predictions_test\",\n \"script_backup\",\n \"tested_keys\",\n \"current_heartbeat\",\n ]\n if blacklist == \"ALL\":\n G.warn('WARNING: Received `blacklist`=\"ALL\". Nothing will be saved')\n return blacklist\n\n if not blacklist:\n return []\n elif not isinstance(blacklist, list):\n raise TypeError(\"Expected blacklist to be a list, not: {}\".format(blacklist))\n elif not all([isinstance(_, str) for _ in blacklist]):\n invalid_files = [(type(_).__name__, _) for _ in blacklist if not isinstance(_, str)]\n raise TypeError(\"Expected blacklist contents to be strings, not: {}\".format(invalid_files))\n\n for a_file in blacklist:\n if a_file not in valid_values:\n raise ValueError(f\"Invalid blacklist value: {a_file}.\\nExpected one of: {valid_values}\")\n if a_file in [\"description\", \"tested_keys\"]:\n G.warn(f\"Including {a_file!r} in blacklist will severely impede library functionality\")\n\n # Blacklist experiment-specific heartbeat if general (current) heartbeat is blacklisted\n if (\"current_heartbeat\" in blacklist) and (\"heartbeat\" not in blacklist):\n blacklist.append(\"heartbeat\")\n\n return blacklist",
"def load_blocked_groups(self):\n print(\" ->[*] Loading group blacklist...\")\n blacklist = set()\n if os.access(\"blocked_groups\", os.F_OK):\n with codecs.open(\"blocked_groups\", \"r\", encoding=\"utf-8\") as groups:\n blocked_groups = groups.readlines()\n for group in blocked_groups:\n blacklist.add(group)\n return blacklist",
"def read_modified_alert_ids():\n # Return an empty list if the file doesn't exist.\n if not os.path.exists(MODIFIED_ALERTS_FILE):\n return []\n # Get a lock on the file\n lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5)\n lock.acquire()\n # Open the file and read in the data.\n fp = open(MODIFIED_ALERTS_FILE, \"r+\")\n ids = fp.read().split(\"\\n\")\n # remove zero length strings\n ids = filter(len, ids)\n # convert IDs to int\n ids = list(map(int, ids))\n # remove duplicates\n ids = list(set(ids))\n # close and remove the file\n fp.close()\n #TODO: uncomment when live\n #os.unlink(MODIFIED_ALERTS_FILE)\n # Release the lock.\n lock.release()\n return ids",
"def load_traces(dir, limit=None):\n games = []\n for i, fn in enumerate(os.listdir(dir)):\n if limit is not None and i == limit: break\n\n f = open(os.path.join(dir, fn), 'rb')\n seed, trace = pickle.load(f)\n f.close()\n games.append(trace)\n\n return games",
"def tags_load(folder):\n tags=[]\n fname=os.path.join(folder,\"experiment.txt\")\n if not os.path.exists(fname):\n print(\"creating empty\",os.path.basename(fname))\n with open(fname,'w') as f:\n f.write('')\n with open(fname) as f:\n raw=f.read().split(\"\\n\")\n for line in raw:\n line=line.strip().split(\"#\")[0]\n if not \"=\" in line:\n continue\n tag,vals=[x.strip() for x in line.split('=')]\n vals=[clock_to_float(x) for x in vals.split('-')]\n tags.append([tag]+vals)\n tags.sort(key=lambda x: x[1])\n if len(tags) and not 'baseline' in [x[0] for x in tags]:\n print(\" Tags found but no baseline found! Inventing one.\")\n BL1=1\n BL2=tags[0][1]-1\n if BL2<BL1:\n BL2=BL1+1\n tags.insert(0,['baseline',BL1,BL2])\n if len(tags):\n print(\"Tags found in %s:\"%os.path.basename(fname))\n for tag in tags:\n print(\" %s = %s\"%(tag[0],str(tag[1:])))\n return tags",
"def _parse_blacklist(path):\n if path is None:\n return []\n with open(path, 'rt') as f:\n return [line.strip() for line in f]",
"def load_probes(probe_file):\n probes = common.read_file(probe_file)\n probe_list = list(filter(None, probes))\n return probe_list",
"def loadFromFile(self, filename):\n\t\treturn []",
"def read_activity_data(dir, file_extension, mask_file):\n time1 = time.time()\n mask_img = nib.load(mask_file)\n mask = mask_img.get_data()\n count = 0\n for index in np.ndindex(mask.shape):\n if mask[index] != 0:\n count += 1\n files = [f for f in sorted(os.listdir(dir))\n if os.path.isfile(os.path.join(dir, f))\n and f.endswith(file_extension)]\n activity_data = []\n for f in files:\n img = nib.load(os.path.join(dir, f))\n data = img.get_data()\n (d1, d2, d3, d4) = data.shape\n masked_data = np.zeros([d4, count], np.float32, order='C')\n count1 = 0\n for index in np.ndindex(mask.shape):\n if mask[index] != 0:\n masked_data[:, count1] = np.copy(data[index])\n count1 += 1\n activity_data.append(masked_data)\n logger.info(\n 'file %s is loaded and masked, with data shape %s' %\n (f, masked_data.shape)\n )\n time2 = time.time()\n logger.info(\n 'data reading done, takes %.2f s' %\n (time2 - time1)\n )\n return activity_data",
"def load_list(self):\n with open('/home/roman/Skola/ProjektyMimo/ApkaNaSkolu/output.dat', 'rb') as f:\n self.events = pickle.load(f)",
"def read_blacklist(self, _blacklist_fname):\n try:\n blacklist_f = codecs.open(_blacklist_fname, mode='r', encoding='utf-8')\n line_idx = 0\n for fline in blacklist_f:\n line_idx = line_idx + 1\n line = fline.strip()\n if ((len(line) > 0) and (line[0] != '#')):\n # non null and not started # line ... add to the set\n if (line in self.__black_list_set):\n print u'duplication found [' + line + u'] at ' + str(line_idx) + \\\n u' ignored'\n else:\n self.__black_list_set.add(line)\n\n print u'read blacklist_file [' + _blacklist_fname + \\\n u'], number of entries: ' + str(len(self.__black_list_set))\n except IOError as e:\n print \"I/O error({0}): {1}\".format(e.errno, e.strerror)\n print \"Can not open a blacklist file {0}\".format(_blacklist_fname)\n print \"Please create blacklist file (an empty file is also fine.)\"\n sys.exit(1)\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise",
"def LoadBadChannelUIDs(self, fname):\n badlUIDs = []\n\n with open(fname, \"r\") as f:\n for l in f:\n bad_bank, bad_channel = l.split()\n badlUIDs.append(self._getUID(bank=int(bad_bank),\n channel=int(bad_channel)))\n\n return badlUIDs",
"def __load_bugs(self):\n bugs = []\n with open(self.reffile(), 'rb') as reffile:\n reader = csv.reader(reffile, delimiter=';', quotechar='\\n')\n for line in reader:\n bugs.append(tuple(map(int, line)))\n return bugs",
"def getRpmBlacklist():\n global index\n enabledRpms = set()\n for stream in enabledStreams.values():\n enabledRpms = enabledRpms.union(stream.get_rpm_artifacts())\n\n allRpms = set()\n for name in index.get_module_names():\n module = index.get_module(name)\n for stream in module.get_all_streams():\n allRpms = allRpms.union(stream.get_rpm_artifacts())\n\n return list(allRpms.difference(enabledRpms))",
"def load_data_for_cross_validation(file_path, N, sentiment):\n tweets = load_data(file_path)\n return tweets[:-N], tweets[-N:], [sentiment for x in range(N)]",
"def filter_events(self):\n events_by_b = []\n events_by_npart = []\n\n bfiles = [f for f in glob.glob(self._path+\"/*.b\") if os.path.isfile(f)]\n npfiles = [f for f in glob.glob(self._path+\"/*.npart\") if os.path.isfile(f)]\n\n if bfiles:\n print \"Found a .b file, doing impact parameter filtering.\"\n self.sort_by_bfile(bfiles, events_by_b)\n if npfiles:\n print \"Found a .npart file, doing participant number filtering.\"\n self.sort_by_npartfile(npfiles, events_by_npart)\n\n if not bfiles and not npfiles:\n self.sort_by_logfolder(events_by_b, events_by_npart)\n\n # Return the appropriate list of events\n if events_by_b:\n print len(events_by_b), \"data files remain after filtering.\"\n return events_by_b\n elif events_by_npart:\n print len(events_by_npart), \"data files remain after filtering.\"\n return events_by_npart\n else:\n print \"filter_events: None of the events fulfill the required criteria:\"\n print \"b range:\", self._bmin, self._bmax, \"Npart range:\", self._npmin, self._npmax",
"def read_betti_dir( fdir ):\n dlist = os.listdir( fdir )\n betti_list = [ f for f in dlist if f.endswith( '.betti' ) ]\n\n # keep the frame numbers organized in a dict ?\n #betti = {}\n # nah, just list them\n betti_arr = []\n for b in betti_list:\n bnums = numpy.loadtxt( fdir+b, dtype=numpy.uint8 )\n betti_arr.append( bnums )\n betti_arr = numpy.asarray( betti_arr )\n return betti_arr.T",
"def read_banfile():\n # matches stuff like\n # \"/GLOW/*\"\n # and extracts the stuff between the quotes\n regex = re.compile(r'^\\s*[\"](/[^\"]+)[\"]\\s*(?:$|[#])')\n bans = []\n\n try:\n with open(BAN_MAPFILE, \"r\", encoding=\"latin-1\") as filehandle:\n for line in filehandle:\n match = regex.match(line)\n if not match:\n continue\n else:\n bans.append(match.group(1))\n except EnvironmentError as err:\n if err.errno == errno.ENOENT:\n logging.getLogger(__name__).warning(\"%s not found - all mappings might fail!\", BAN_MAPFILE)\n else:\n raise\n\n return bans",
"def _load_trial_data_voxceleb(filelist, feature_dir):\n\n trial_list = []\n\n for target_or_not, utterance1_file, utterance2_file in tqdm(filelist, desc=\"load\"):\n is_target = target_or_not == \"1\"\n\n utterance1_path = os.path.join(feature_dir, utterance1_file).replace(\".wav\", \".npy\")\n data1 = np.load(utterance1_path).astype(np.float32)\n\n utterance2_path = os.path.join(feature_dir, utterance2_file).replace(\".wav\", \".npy\")\n data2 = np.load(utterance2_path).astype(np.float32)\n\n trial = Trial(\n trial_features=data1,\n claimed_identity=None,\n test_features=data2,\n is_target=is_target,\n origin=None\n )\n\n trial_list.append(trial)\n\n return trial_list",
"def list_saved_epochs(experiment_name, architecture):\n\n extract_epoch = lambda f: int(f.split('.')[-3])\n filename_list = params_to_filename(experiment_name, architecture)\n return [extract_epoch(f) for f in filename_list]",
"def get_urls(url_list='urls.blur'):\n with open(os.path.join('..', 'data', url_list), 'r') as f:\n urls = [tuple(line.split('\\t')) for line in f.read().split('\\n') \n if line and line[0] != '#']\n return urls",
"def read_flows_from_file(filename: str):\n with open(os.path.join(DATA_DIR, filename)) as f:\n return [int(x.strip()) for x in f.readlines() if x.strip()]",
"def get_exlusions(self):\n files = os.listdir(self.exclusions_path)\n for filename in files:\n image = cv2.imread(self.exclusions_path + filename)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (21,21), 0)\n\n if filename.startswith('day'):\n self.gray_refs['day'].append(gray)\n elif filename.startswith('night'):\n self.gray_refs['night'].append(gray)"
] | [
"0.660259",
"0.660259",
"0.6048789",
"0.57697254",
"0.5728082",
"0.56975484",
"0.5672603",
"0.5634948",
"0.56291896",
"0.5584334",
"0.557808",
"0.5545715",
"0.5494566",
"0.54839736",
"0.5439858",
"0.5413065",
"0.5389082",
"0.5352533",
"0.5331738",
"0.52484757",
"0.5247818",
"0.5232517",
"0.5226733",
"0.5220501",
"0.5211987",
"0.51923865",
"0.5187872",
"0.5184594",
"0.5176463",
"0.51399654"
] | 0.71201795 | 0 |
Loads all events from a given experiment that are not on the blacklist file for that experiment. Returns array of event properties. | def load_events(experiment):
event_properties = load_event_properties(experiment)
blacklist = load_blacklist(experiment)
return np.delete(event_properties,blacklist,axis=0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_blacklist(experiment):\n blacklist = np.loadtxt('../Slip_Property_Data/%s_blacklist.txt'%experiment)\n return blacklist",
"def load_blacklist(experiment):\n blacklist = np.loadtxt('../Slip_Property_Data/%s_blacklist.txt'%experiment)\n return blacklist",
"def load_event_properties(experiment):\n return np.loadtxt('../Slip_Property_Data/%s_event_properties.txt'%experiment,delimiter=',',skiprows=1)",
"def load_event_properties(experiment):\n return np.loadtxt('../Slip_Property_Data/%s_event_properties.txt'%experiment,delimiter=',',skiprows=1)",
"def get_events(self):\n disallowed = [ident(self.add_event.__func__), ident(ident)]\n self.frames = None\n\n return [item for item in self.events if item[2] not in disallowed]",
"def get_sample_events(self): \n return self.sample_events[:]",
"def filter_events(self):\n events_by_b = []\n events_by_npart = []\n\n bfiles = [f for f in glob.glob(self._path+\"/*.b\") if os.path.isfile(f)]\n npfiles = [f for f in glob.glob(self._path+\"/*.npart\") if os.path.isfile(f)]\n\n if bfiles:\n print \"Found a .b file, doing impact parameter filtering.\"\n self.sort_by_bfile(bfiles, events_by_b)\n if npfiles:\n print \"Found a .npart file, doing participant number filtering.\"\n self.sort_by_npartfile(npfiles, events_by_npart)\n\n if not bfiles and not npfiles:\n self.sort_by_logfolder(events_by_b, events_by_npart)\n\n # Return the appropriate list of events\n if events_by_b:\n print len(events_by_b), \"data files remain after filtering.\"\n return events_by_b\n elif events_by_npart:\n print len(events_by_npart), \"data files remain after filtering.\"\n return events_by_npart\n else:\n print \"filter_events: None of the events fulfill the required criteria:\"\n print \"b range:\", self._bmin, self._bmax, \"Npart range:\", self._npmin, self._npmax",
"def get_game_events(self):\n\t\tcontents = self.archive.read_file('replay.game.events')\n\t\treturn self.protocol.decode_replay_game_events(contents)",
"async def load_events(\n self,\n event_config: dict,\n guild: Guild\n ) -> Dict[int, BaseEvent]:\n events = {}\n for message_id_str, event_dict in event_config.items():\n events[int(message_id_str)] = await self.load_event(\n event_dict,\n guild\n )\n\n return events",
"def getAll(self, event_name):\n raw_events = self._callEventGetAll(self._id, event_name)\n return [snippet_event.from_dict(msg) for msg in raw_events]",
"def load_evs(subject, name, condition):\n evs = []\n for id in get_image_ids(name):\n task_key = BOLD_NAMES[id - 1]\n ev_file = f\"{HCP_DIR}/subjects/{subject}/EVs/{task_key}/{condition}.txt\"\n ev = dict(zip([\"onset\", \"duration\", \"amplitude\"], np.genfromtxt(ev_file).T))\n evs.append(ev)\n return evs",
"def _load_smartertime_events(since: datetime, filepath) -> list[Event]:\n\n print(f\"Loading smartertime data from {filepath}\")\n with open(filepath) as f:\n data = json.load(f)\n events = [Event(**e) for e in data[\"events\"]]\n\n # Filter out events before `since`\n events = [e for e in events if since.astimezone(timezone.utc) < e.timestamp]\n\n # Filter out no-events and non-phone events\n events = [\n e for e in events if any(s in e.data[\"activity\"] for s in [\"phone:\", \"call:\"])\n ]\n\n # Normalize to window-bucket data schema\n for e in events:\n e.data[\"app\"] = e.data[\"activity\"]\n e.data[\"title\"] = e.data[\"app\"]\n\n return events",
"def load_events_in_crontab(self):\n # clean the current crontab from all Kalliope event\n self._remove_all_job()\n # load the brain file\n for synapse in self.brain.synapses:\n for signal in synapse.signals:\n # print signal\n # if the signal is an event we add it to the crontab\n if type(signal) == Event:\n # for all synapse with an event, we add the task id to the crontab\n self._add_event(period_string=signal.period, event_id=synapse.name)",
"def available_events(self):\n return self.target.read_value(self.available_events_file).splitlines()",
"def get_event_list(self):\n pass",
"def events_reset(self):\n self._expected_events = []\n jsonfiles = [p for p in [os.path.join(self._event_dir, f)\n for f in os.listdir(self._event_dir)]\n if os.path.isfile(p)]\n for jsonfile in jsonfiles:\n os.unlink(jsonfile)",
"def load_experiments(filename):\n fp = open(filename)\n experiment_names = None\n try:\n experiment_names = simplejson.load(fp)\n except Exception, e:\n l.error(\"Unable to parse experiment file %s: %s\" % (filename, e))\n raise e\n finally:\n fp.close()\n \n for entry in experiment_names:\n for key in entry.keys():\n if key not in ExperimentLoader.ALLOWED_ATTRIBUTES:\n l.warning(\"Ignoring unrecognized key %s on experiment \"\n \"definition %s in filename %s\" %\n (key, entry, filename))\n if ExperimentLoader.NAME_ATTRIBUTE in entry:\n Experiment.objects.get_or_create(\n name=entry.get(ExperimentLoader.NAME_ATTRIBUTE))\n else:\n l.warning(\"Invalid entry in experiment file %s : %s\" %\n (filename, entry))",
"def load_events():\n\n print('load_events')\n\n Event.query.delete()\n\n for row in open(\"seed_data/events.csv\"):\n row = row.rstrip()\n private, \\\n host_id, \\\n venue, \\\n title, \\\n time_begin, \\\n time_end, \\\n max_cap, \\\n url = row.split(',')\n\n private = int(private)\n host_id = int(host_id)\n\n ven = Venue.query.filter_by(name=venue).first()\n\n begin_at = datetime.strptime(time_begin, \"%y-%m-%d %H:%M:%S\")\n\n end_at = datetime.strptime(time_end, \"%y-%m-%d %H:%M:%S\")\n\n evt = Event(private=private,\n host_id=host_id,\n venue_id=ven.id,\n title=title,\n begin_at=begin_at,\n end_at=end_at,\n max_cap=max_cap,\n url=url)\n\n db.session.add(evt)\n\n db.session.commit()",
"def test_load_events(self):\n command = '{0}'.format(\n os.path.join(self.datadir, 'monol_testA.evt'))\n hen.read_events.main(command.split())\n new_filename = self.first_event_file\n ev = hen.io.load_events(new_filename)\n assert hasattr(ev, 'header')\n assert hasattr(ev, 'gti')",
"async def get_events(self) -> list[Event]:\n log.debug(\"Discovering events in branding repository.\")\n\n try:\n event_directories = await self.fetch_directory(\"events\", types=(\"dir\",)) # Skip files.\n except Exception:\n log.exception(\"Failed to fetch 'events' directory.\")\n return []\n\n instances: list[Event] = []\n\n for event_directory in event_directories.values():\n log.trace(f\"Attempting to construct event from directory: '{event_directory.path}'.\")\n try:\n instance = await self.construct_event(event_directory)\n except Exception as exc:\n log.warning(f\"Could not construct event '{event_directory.path}'.\", exc_info=exc)\n else:\n instances.append(instance)\n\n return instances",
"def _filter_capabilities(self, events): \n events_out = [x for x in events if Capability.has(x)]\n return events_out",
"def get_events(self):\n\n events = []\n\n for watched_file in self._watched_files:\n for line in watched_file:\n self._do_rule_processing(line, events)\n\n return events",
"async def load_ongoing_events(\n event_config: dict\n ) -> Dict[int, OngoingEvent]:\n events = {}\n for message_id_str, event_dict in event_config.items():\n event_dict[\"message_embed\"] = Embed.from_dict(\n event_dict[\"message_embed\"]\n )\n events[int(message_id_str)] = OngoingEvent(**event_dict)\n\n return events",
"def load_json(path):\n events = []\n try:\n with open(path, 'r') as fd:\n data = fd.read()\n except IOError as e:\n print \"I/O error({0}): {1}\".format(e.errno, e.strerror)\n return []\n except:\n raise\n\n jsondata = json.loads(data)\n if 'events' not in jsondata:\n return []\n for e in jsondata['events']:\n event = Event(occasion=e['occasion'],\n invited_count=e['invited_count'],\n year=e['year'],\n month=e['month'],\n day=e['day'],\n cancelled=e['cancelled'] if 'cancelled' in e else False)\n events.append(event)\n \n return events",
"def _extract_complement_events(self):\n\t\ttry:\n\t\t\ttable = self.hdf5file[fastq_paths[self.version]['complement'] % self.group]\n\t\t\tself.complement_events = [Event(x) for x in table['Events'][()]]\n\t\texcept Exception, e:\n\t\t\tself.complement_events = []",
"def load_list(self):\n with open('/home/roman/Skola/ProjektyMimo/ApkaNaSkolu/output.dat', 'rb') as f:\n self.events = pickle.load(f)",
"def loadData(self):\n\n for info in os.walk(settings.BEHAVIOR_PATH):\n path = info[0]\n\n # Get the files, if there are any\n for element in info[2]:\n split = element.split(\".\")\n\n # If there's only one '.' in the filename, then we know it's not a .old.h5 file, or a file without an extension.\n if(len(split) == 2):\n name, extension = element.split(\".\")\n\n if(self.log):\n logging.debug(\"Name: \" + name + \" Extension: \" + extension)\n\n for animal in self.subjects:\n\n # Get the date from the name and format it in ISO format to compare to the current date.\n experimentDate = name.split(\"_\")[-1]\n isoDate = experimentDate[:4] + \"-\" + experimentDate[4:6] + \"-\" + experimentDate[6:8]\n\n if(self.log):\n logging.debug(\"Comparing date: \" + str(isoDate) + \" to \" + str(self.date) + \" (today)\")\n\n # We only want data from today from an animal that we care about\n if(self.date == extrafuncs.parse_isodate(isoDate) and extension == \"h5\" and animal in name):\n try:\n full_path = os.path.join(path, element)\n self.behavData.append((full_path, loadbehavior.BehaviorData(full_path, readmode='full')))\n if(self.log):\n logging.info(\"Successfully loaded data from: \" + full_path)\n except:\n self.sendToAllSubscribers(\"Error when attempting to load \" + full_path + \".\", \"Alert: Alarm error\")\n if(self.log):\n logging.error(\"Could not load \" + full_path + \".\")",
"def load_events(protests_data):\n\n print \"Events\"\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate events \n Event.query.delete()\n\n # Read through each protest event all need info\n for protest in protests_data:\n event_id = protest[0]\n full_date = protest[1]\n year = protest[3]\n event_code = protest[27]\n full_location = protest[36]\n latitude = protest[39]\n longitude = protest[40]\n url = protest[57]\n\n if latitude == \"\" or longitude == \"\":\n continue\n\n event = Event(event_id=event_id,\n full_date=full_date,\n year=year, \n event_code=event_code, \n full_location=full_location,\n latitude=latitude,\n longitude=longitude,\n url=url)\n\n # Add event to session\n db.session.add(event)\n\n # Commit to database\n db.session.commit()",
"def load_bad_times(nwb):\n times = nwb.invalid_times\n bad_times = None\n if times is not None:\n start = nwb.invalid_times['start_time'].data[:]\n stop = nwb.invalid_times['stop_time'].data[:]\n bad_times = np.stack([start, stop], axis=1)\n return bad_times",
"def _filter_capabilities(self, events):\n return [x for x in events if Capability.has(x)]"
] | [
"0.6039847",
"0.6039847",
"0.59158844",
"0.59158844",
"0.5699845",
"0.52346635",
"0.51764226",
"0.51480323",
"0.5146169",
"0.5146137",
"0.51326334",
"0.5104683",
"0.50849926",
"0.50769466",
"0.5074125",
"0.50419056",
"0.5038742",
"0.5038058",
"0.5028962",
"0.5018109",
"0.49805495",
"0.49588764",
"0.4958491",
"0.4936884",
"0.49311435",
"0.49102607",
"0.485762",
"0.48429695",
"0.4840137",
"0.4835552"
] | 0.8164338 | 0 |
Take array, filter out rows in which the element in the given column is not in the range lowhigh (inclusive) | def filter(data,col,low,high):
inds = np.where(data[:,col]>=low)
data_trim = data[inds]
inds = np.where(data_trim[:,col]<=high)
data_trim = data_trim[inds]
return data_trim | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_out_of_bounds(self, data, low_bound, high_bound):\n data = data.dropna()\n data = data[(data > low_bound).all(axis=1) & (data < high_bound).all(axis=1)] \n return data",
"def isolate_burned_pixels(array, upper, lower):\n not_burned = numpy.logical_or(array <= lower,\n array >= upper)\n array[not_burned] = 0\n return array",
"def remove_distance_extremes(scan, low, high):\n scan.samples[:] = [sample for sample in scan.samples if (\n sample.distance >= low and sample.distance <= high)]",
"def drop_outliers(data, cols, t=1.5):\n iqr_d = iqr(data, cols, t)\n for col in cols:\n return data[~((data[col]< iqr_d[\"low_b\"][col]) | (data[col]> iqr_d[\"upp_b\"][col]))]",
"def cut_transformed_array_borders(array): \n for col in range(array.shape[1]): \n col_=array[:, col]\n \n where=np.where(col_>0)\n \n if len(where[0])>0:\n \n col_[[np.min(where[0]),np.min(where[0])+1, np.max(where[0]), np.max(where[0])-1 ]]=0\n \n array[:,col]=col_\n \n for row in range(array.shape[0]): \n row_=array[row,:]\n \n where=np.where(row_>0)\n if len(where[0])>0:\n\n row_[[np.min(where[0]),np.min(where[0])+1, np.max(where[0]), np.max(where[0])-1 ]]=0\n \n array[row,:]=row_\n \n return array",
"def _filter_column(array, col, criteria):\n # Raise an error if the column does not exist. This is the only way to\n # test it across all possible types (pandas, recarray...)\n try:\n array[col]\n except:\n raise KeyError('Filtering criterion %s does not exist' % col)\n\n if (not isinstance(criteria, string_types) and\n not isinstance(criteria, bytes) and\n not isinstance(criteria, tuple) and\n isinstance(criteria, collections.Iterable)):\n\n filter = np.zeros(array.shape[0], dtype=np.bool)\n for criterion in criteria:\n filter = np.logical_or(filter,\n _filter_column(array, col, criterion))\n return filter\n\n if isinstance(criteria, tuple):\n if len(criteria) != 2:\n raise ValueError(\"An interval must have 2 values\")\n if criteria[0] is None:\n return array[col] <= criteria[1]\n if criteria[1] is None:\n return array[col] >= criteria[0]\n filter = array[col] <= criteria[1]\n return np.logical_and(filter, array[col] >= criteria[0])\n\n return array[col] == criteria",
"def filterRows(function, rows):\n return [y for y in rows if function(y)]",
"def apply_row_cut(self, array_chunk):\n mask = np.ones(len(array_chunk), dtype=bool)\n\n for colname, lower_bound in self.row_cut_min_dict.items():\n mask *= array_chunk[colname] > lower_bound\n\n for colname, upper_bound in self.row_cut_max_dict.items():\n mask *= array_chunk[colname] < upper_bound\n\n for colname, equality_condition in self.row_cut_eq_dict.items():\n mask *= array_chunk[colname] == equality_condition\n\n for colname, inequality_condition in self.row_cut_neq_dict.items():\n mask *= array_chunk[colname] != inequality_condition\n\n return array_chunk[mask]",
"def clip_extrema(self, nlow=0, nhigh=0):\n\n if nlow is None:\n nlow = 0\n if nhigh is None:\n nhigh = 0\n\n argsorted = np.argsort(self.data_arr.data, axis=0)\n mg = np.mgrid[[slice(ndim)\n for i, ndim in enumerate(self.data_arr.shape) if i > 0]]\n for i in range(-1*nhigh, nlow):\n # create a tuple with the indices\n where = tuple([argsorted[i, :, :].ravel()] +\n [i.ravel() for i in mg])\n self.data_arr.mask[where] = True",
"def remove_outliers(self, matrix):\n input = matrix[:, :-1]\n row_incides_to_delete = []\n for j, column in enumerate(input.transpose()):\n self.feature_means.append(np.mean(column))\n self.feature_stds.append(np.std(column))\n\n for i, row in enumerate(input):\n cell = input[i, j]\n if cell > self.feature_means[j] + 3 * self.feature_stds[j] or cell < self.feature_means[j] - 3 * \\\n self.feature_stds[j]:\n row_incides_to_delete.append(i)\n matrix = np.delete(matrix, row_incides_to_delete, 0)\n return matrix, len(list(set(row_incides_to_delete)))",
"def remove_outliers(X, lo, hi):\n\t\n\tx1 = np.array(X)\n\ty1 = x1[np.where(x1 > lo)]\n\ty2 = y1[np.where(y1 <= hi)]\n\n\treturn y2",
"def delete_outliers_of_data_before(data: np.ndarray, qi_inspect: int, threshold: int):\n idx_to_del = []\n done = False\n for j in range(data.shape[0]):\n if data[j, qi_inspect] < threshold:\n if not done:\n idx_to_del = j\n done = True\n else:\n idx_to_del = np.append(idx_to_del, j)\n return np.delete(data, idx_to_del, axis=0)",
"def cloud_filter(array, bqa):\n array_dest = array.copy()\n array_dest[np.where((bqa != 2720) & (bqa != 2724) & (bqa != 2728) & (bqa != 2732)) ] = 'nan'\n return array_dest",
"def not_between(self, column: str, low: [str, int], high: [str, int]):\n self._wheres += (BetweenExpression(column, low, high, equality=\"NOT BETWEEN\"),)\n return self",
"def remove_outliers(a, constant=1.5):\n if not isinstance(a, np.ndarray):\n a = np.array(list(a))\n\n upper_quartile = np.percentile(a, 75)\n lower_quartile = np.percentile(a, 25)\n IQR = (upper_quartile - lower_quartile) * constant\n quartile_set = (lower_quartile - IQR, upper_quartile + IQR)\n return [y for y in a.tolist() if y >= quartile_set[0] and y <= quartile_set[1]]",
"def filterMissings(self, threshold, data):\n\n #replace NAs by 0 for counting\n data.fillna(0).astype(bool).sum(axis=1)\n\n filtered_columns = data.columns\n\n\n #find out threshold, i.e. minimum number of non-zero in real numbers\n rowNumber = data.shape[0]\n min_nonZeros = int(rowNumber - ((rowNumber * int(threshold))/100))\n\n zero_counts = data.astype(bool).sum(axis=0)\n\n for columnID, nonZeros in zero_counts.items():\n if nonZeros <= min_nonZeros:\n filtered_columns = filtered_columns.drop(columnID)\n\n\n return data[filtered_columns]",
"def threshold(array, value):\n width = len(array[0])\n height = len(array)\n new_array = np.array(np.zeros((height, width)))\n for row in range(height):\n for col in range(width):\n new_array[row,col] = (array[row,col] if (array[row,col] > value) else 0)\n return new_array",
"def FilterByRange(X, rangeCut=0.4):\n Rg = X.iloc[:, X.columns.get_level_values(1) == \"ptp\"]\n Xidx = np.all(Rg.values <= rangeCut, axis=1)\n return X.iloc[Xidx, :]",
"def pixel2mask(image: np.ndarray, low: float, high: float) -> np.ndarray:\n mask = image > low\n labels = smeasure.label(mask, background=0)\n for region in smeasure.regionprops(label_image=labels, intensity_image=image):\n if region.max_intensity < high:\n mask[region.coords[:, 0], region.coords[:, 1]] = 0\n\n return mask",
"def extract_from_Array(array, limits=()):\n if limits==():\n return array\n low = limits[0]\n high = limits[1]\n idxlower = set(np.where(array<=high)[0])\n idxhigher = set(np.where(array>=low)[0])\n idx = idxhigher.intersection(idxlower)\n return array[list(idx)]",
"def drop_rows_with_outliers(df, columns, sigma=3):\n selection = np.full(len(df.index), True, dtype=np.dtype('bool'))\n if not isinstance(columns, list):\n columns = [columns]\n for var in columns:\n std_var = np.std(df[var])\n mean_var = np.mean(df[var])\n in_range = np.logical_and(df[var] > mean_var - sigma*std_var,\n df[var] < mean_var + sigma*std_var)\n selection = np.logical_and(selection, in_range)\n return df[selection]",
"def trans(array,dim):\n return array[filter(lambda x: x != dim,range(len(array)) ) ]",
"def remove_angular_window(scan, low, high):\n # angle of samples is encoded in millidegrees\n scan.samples[:] = [sample for sample in scan.samples if (\n 0.001 * sample.angle < low or 0.001 * sample.angle > high)]",
"def array_range(a, low, high, ref=None):\n if ref is None:\n ref = a\n return a[np.logical_and(ref >= low, ref < high)]",
"def detect_outlier(column, max_dev=2):\n column_mean = np.mean(column)\n column_std = np.std(column)\n dist_from_mean = abs(column - column_mean)\n outlier_filter = dist_from_mean > max_dev * column_std\n ids = np.arange(len(column))\n return ids[outlier_filter]",
"def filter_column(col, row):\n return col == column",
"def find_holes(db, table_name, column_name, _range, filter=None):\n if not filter:\n filter = {\"match_all\": {}}\n\n _range = wrap(_range)\n params = {\n \"min\": _range.min,\n \"max\": _range.max - 1,\n \"column_name\": db.quote_column(column_name),\n \"table_name\": db.quote_column(table_name),\n \"filter\": esfilter2sqlwhere(db, filter)\n }\n\n min_max = db.query(\"\"\"\n SELECT\n min({{column_name}}) `min`,\n max({{column_name}})+1 `max`\n FROM\n {{table_name}} a\n WHERE\n a.{{column_name}} BETWEEN {{min}} AND {{max}} AND\n {{filter}}\n \"\"\", params)[0]\n\n db.execute(\"SET @last={{min}}-1\", {\"min\": _range.min})\n ranges = db.query(\"\"\"\n SELECT\n prev_rev+1 `min`,\n curr_rev `max`\n FROM (\n SELECT\n a.{{column_name}}-@last diff,\n @last prev_rev,\n @last:=a.{{column_name}} curr_rev\n FROM\n {{table_name}} a\n WHERE\n a.{{column_name}} BETWEEN {{min}} AND {{max}} AND\n {{filter}}\n ORDER BY\n a.{{column_name}}\n ) a\n WHERE\n diff>1\n \"\"\", params)\n\n if ranges:\n ranges.append({\"min\": min_max.max, \"max\": _range.max})\n else:\n if min_max.min:\n ranges.append({\"min\": _range.min, \"max\": min_max.min})\n ranges.append({\"min\": min_max.max, \"max\": _range.max})\n else:\n ranges.append(_range)\n\n return ranges",
"def filter_percentile(df, col, up=95, down=5):\n pup = np.percentile(df[col].values, up)\n pdw = np.percentile(df[col].values, down)\n\n s = (df[col]<pup) & (df[col]>pdw)\n df2 = df[s]\n\n return df2",
"def process_dataframe(self, dataframe):\n return dataframe[dataframe.ix[:,0] < dataframe.ix[:,1]]",
"def process_dataframe(self, dataframe):\n return dataframe[dataframe.ix[:,0] < dataframe.ix[:,1]]"
] | [
"0.6622279",
"0.62409604",
"0.62121946",
"0.60105443",
"0.5949661",
"0.5949454",
"0.58782387",
"0.5841053",
"0.57375765",
"0.5733186",
"0.5720308",
"0.5711006",
"0.5705007",
"0.56055427",
"0.5593223",
"0.5579824",
"0.5573033",
"0.556831",
"0.55604434",
"0.5549569",
"0.5492338",
"0.5490609",
"0.54313934",
"0.54249436",
"0.5408461",
"0.54036254",
"0.53609073",
"0.53420377",
"0.5341951",
"0.5341951"
] | 0.74447274 | 0 |
Create a storage group template. The new storage group will be associated with the CPC identified by the `cpcuri` input property. | def create(self, properties):
if properties is None:
properties = {}
result = self.session.post(self._base_uri, body=properties)
# There should not be overlaps, but just in case there are, the
# returned props should overwrite the input props:
props = copy.deepcopy(properties)
props.update(result)
name = props.get(self._name_prop, None)
uri = props[self._uri_prop]
storage_group_template = StorageGroupTemplate(self, uri, name, props)
self._name_uri_cache.update(name, uri)
return storage_group_template | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def capacitygroup_create(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_create(cmd_ctx, cpc, options))",
"def add_storage_group1(self):\n\n faked_storage_group = self.faked_console.storage_groups.add({\n 'object-id': SG1_OID,\n # object-uri will be automatically set\n # parent will be automatically set\n # class will be automatically set\n 'cpc-uri': CPC_URI,\n 'name': SG1_NAME,\n 'description': 'Storage Group #1',\n 'type': 'fcp',\n 'shared': False,\n 'fulfillment-state': 'complete',\n 'connectivity': 4,\n })\n return faked_storage_group",
"def security_group_create(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(keep_name=True, **kwargs)\n return cloud.create_security_group(**kwargs)",
"def create_cluster_template(\n self, name, image_id=None, keypair_id=None, coe=None, **kwargs\n ):\n cluster_template = (\n self.container_infrastructure_management.create_cluster_template(\n name=name,\n image_id=image_id,\n keypair_id=keypair_id,\n coe=coe,\n **kwargs,\n )\n )\n\n return cluster_template",
"def placement_group(template, name):\n p = PlacementGroup(name, template=template)\n p.Strategy = 'cluster'\n return p",
"def create(self, body: CloudSecurityGroup) -> Dict:\n\t\treturn self._post(route=AWSSecurityGroupConsts.CLOUD_SECURITY_GROUP.value, body=body)",
"def create_resource(self, namespace: \"str\" = None):\n names = [\"create_namespaced_csistorage_capacity\", \"create_csistorage_capacity\"]\n\n _kube_api.execute(\n action=\"create\",\n resource=self,\n names=names,\n namespace=namespace,\n api_client=None,\n api_args={\"body\": self.to_dict()},\n )",
"def fusion_api_create_storage_volume_template(self, body, api=None, headers=None):\n return self.template.create(body=body, api=api, headers=headers)",
"def create_group(self, path):\n if self.options['storage_method'] == 'hdf5':\n # execute h5py command\n self.file_pointer.create_group(path)\n elif self.options['storage_method'] == 'none':\n # save command for later processing\n self.h5commands.append((\"create_group\", path,))\n else:\n raise Exception('Invalid option value for storage_method (%s)' % storage_method)",
"def create(self, context=None):\n values = self.obj_get_changes()\n db_nodegroup = self.dbapi.create_nodegroup(values)\n self._from_db_object(self, db_nodegroup)",
"def __create_group(self):\n\n group = time.strftime(_GROUP_NAME_FORMAT, time.localtime())\n LOG.info(\"Creating backup group '%s'.\", group)\n\n group_path = self.group_path(group)\n\n try:\n os.mkdir(group_path)\n except EnvironmentError as e:\n if e.errno != errno.EEXIST:\n raise Error(\"Unable to create a new backup group '{}': {}.\",\n group_path, psys.e(e))\n\n self.__on_group_created(group)\n\n return group",
"def create_pvc(\n self,\n storageclass,\n accessmode=\"ReadWriteOnce\",\n pvc_name_prefix=\"autotests-pvc\",\n pvc_size=3\n ):\n if self.name:\n pvc_name = self.name\n else:\n pvc_name = f\"{pvc_name_prefix}-{get_random_str()}\"\n pvc_size = f\"{pvc_size}Gi\"\n accessmode = accessmode.split()\n\n pvc_data = {}\n pvc_data['pvc_name'] = pvc_name\n pvc_data['cluster_namespace'] = self.namespace\n pvc_data['storageclass_namespace'] = storageclass\n pvc_data['storage'] = pvc_size\n pvc_data['access_mode'] = accessmode\n\n data = generate_yaml_from_jinja2_template_with_data(\n self.template_path,\n **pvc_data\n )\n self.service_pvc.create(body=data, namespace=self.namespace)\n\n return pvc_name",
"def storage_pool_create(context, values):\n if not values.get('id'):\n values['id'] = uuidutils.generate_uuid()\n\n storage_pool_ref = models.StoragePool()\n storage_pool_ref.update(values)\n\n session = get_session()\n with session.begin():\n session.add(storage_pool_ref)\n\n return _storage_pool_get(context,\n storage_pool_ref['id'],\n session=session)",
"def create(self, volume_types, name=None,\n description=None, user_id=None,\n project_id=None, availability_zone=None):\n\n body = {'consistencygroup': {'name': name,\n 'description': description,\n 'volume_types': volume_types,\n 'user_id': user_id,\n 'project_id': project_id,\n 'availability_zone': availability_zone,\n 'status': \"creating\",\n }}\n\n return self._create('/consistencygroups', body, 'consistencygroup')",
"def createVolumeGroup(self, pvs, name):\n vg = {}\n vg['command'] = 'create:volgroup'\n vg['extentSize'] = EXTENT_SIZE\n vg['pvs'] = pvs\n vg['name'] = name\n\n return vg",
"def capacitygroup_add_partition(cmd_ctx, cpc, capacitygroup, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_add_partition(\n cmd_ctx, cpc, capacitygroup, options))",
"def create_storageclass(\n self,\n blockPool,\n sc_name_prefix=\"autotests-sc\",\n allow_volume_expansion=True,\n reclaim_policy=\"Delete\",\n fstype=\"xfs\",\n clusterNamespace=framework.config.ENV_DATA['cluster_namespace'],\n ):\n if self.name:\n sc_name = self.name\n else:\n sc_name = f\"{sc_name_prefix}-{get_random_str()}\"\n\n sc_data = {}\n sc_data['k8s_api_version'] = defaults.STORAGE_API_VERSION\n sc_data['storageclass_name'] = sc_name\n sc_data['volume_expansion'] = allow_volume_expansion\n sc_data['reclaimPolicy'] = reclaim_policy\n sc_data['blockPool'] = blockPool\n sc_data['clusterNamespace'] = clusterNamespace\n sc_data['fstype'] = fstype\n\n data = generate_yaml_from_jinja2_template_with_data(\n self.template_path,\n **sc_data\n )\n self.service_sc.create(body=data)\n\n return sc_name",
"def request_group_create():\n return Response(render_template('admin/group/create-update.html',\n csrf_token=(\n get_raw_jwt() or {}).get(\"csrf\"),\n target=\"/admin/group/create\"),\n mimetype='text/html')",
"def create_template(self, **kwargs):\n _template = self.get_template(name=kwargs[\"name\"])\n if _template:\n raise ValueError(f\"Template already used: {kwargs['name']}\")\n\n if \"compute_id\" not in kwargs:\n kwargs[\"compute_id\"] = \"local\"\n\n response = self.http_call(\n \"post\", url=f\"{self.base_url}/templates\", json_data=kwargs\n )\n\n return response.json()",
"def createGroup(self, *group):\n if not self.rank:\n logging.info('Creating atom group {}'.format(group))\n\n if not len(group):\n for idSS in self.pargs['idSS']:\n self.lmp.command('group group{} type {}'.format(idSS, idSS))\n else:\n self.lmp.command('group ' + ('{} ' * len(group)).format(*group))",
"def product_group_create(obj, name, department):\n client = get_client(obj)\n\n with Action('Creating product_group: {}'.format(name), nl=True):\n pg = client.product_group_create(name, department)\n\n print(json.dumps(pg, indent=4))",
"def test_create_resource_group(self):\n pass",
"def create_new_volume(self, volumeInfo, change_name=True):\n size = volumeInfo.get(SVC_KEY_VDISK_CAPACITY)\n if (change_name):\n new_volume_name = self._get_new_volume_name(\n volumeInfo.get(SVC_KEY_VDISK_NAME))\n else:\n new_volume_name = volumeInfo.get(SVC_KEY_VDISK_NAME)\n if SVC_KEY_VOLUME_GROUP in volumeInfo:\n volumeGroup = volumeInfo.get(SVC_KEY_VOLUME_GROUP)\n elif self.dft_stg_pool:\n volumeGroup = self.dft_stg_pool\n else:\n volumeGroup = self.get_mdisk_grp_by_size(size)\n\n if volumeGroup is None:\n raise SVCNoSANStoragePoolException\n\n # iogrp parameter should not use name since it could be\n # customized. It is always safe to use iogrp 0.\n cmd = \"svctask mkvdisk -name %s -iogrp 0 -mdiskgrp %s \" \\\n \"-size %s -unit b\" % (new_volume_name, volumeGroup, size)\n\n output, err_output = self._svc_command(cmd)\n\n volume_uid = self.get_uid(new_volume_name)\n\n # Check if it got created\n if not volume_uid:\n # The SVC message of out of space is not really user friendly.\n # So, we will manully check whether the pool ran out of space\n free_capacity = self.get_mdisk_grp_size(volumeGroup)\n\n if float(size) > float(free_capacity):\n ex_args = {'pool_name': volumeGroup,\n 'size': size,\n 'free_capacity': free_capacity}\n raise SVCVolumeGroupOutOfSpace(**ex_args)\n if err_output:\n ex_args = {'new_volume_name': new_volume_name,\n 'err_output': err_output}\n raise SVCVolumeCreationFailed(**ex_args)\n else:\n # failed to create volume but with no error msg\n # really shouldn't hit this condition\n ex_args = {'cmd': cmd,\n 'e': _(\"No error available\")}\n raise SVCCommandException(**ex_args)\n\n return new_volume_name, volume_uid",
"def add_storage_group2(self):\n\n faked_storage_group = self.faked_console.storage_groups.add({\n 'object-id': SG2_OID,\n # object-uri will be automatically set\n # parent will be automatically set\n # class will be automatically set\n 'cpc-uri': CPC_URI,\n 'name': SG2_NAME,\n 'description': 'Storage Group #2',\n 'type': 'fc',\n 'shared': False,\n 'fulfillment-state': 'complete',\n 'connectivity': 4,\n })\n return faked_storage_group",
"def create_groups(**kwargs):\n for gname in SEC_GROUP_NAMES.itervalues():\n Group.objects.get_or_create(name=gname)",
"def create_secgroup(self, args):\n message = MessageClass()\n region = args[\"Region\"]\n sgid = args[\"Group-Name\"]\n desc = args[\"Description\"]\n\n # Boto3 client creation by providing the access_id and access_secret\n ec2 = boto3.client(service_name='ec2', region_name=region, api_version=None, use_ssl=True,\n verify=None, endpoint_url=None, aws_access_key_id=self.aws_access_key,\n aws_secret_access_key=self.aws_secret_token, aws_session_token=None,\n config=None)\n response = ec2.describe_vpcs()\n vpc_id = response.get('Vpcs', [{}])[0].get('VpcId', '')\n\n response = ec2.create_security_group(GroupName=sgid,\n Description=desc,\n VpcId=vpc_id)\n attachment = MessageAttachmentsClass()\n d = response[\"GroupId\"]\n attachment.title = d\n message.message_text = \"Security group created:\"\n message.attach(attachment)\n\n return message.to_json()",
"def post_security_group_create(self, resource_dict):\n pass",
"def build_cluster_template(\n cluster_config: BaseClusterConfig, bucket: S3Bucket, stack_name: str, log_group_name: str = None\n ):\n LOGGER.info(\"Importing CDK...\")\n from aws_cdk.core import App # pylint: disable=C0415\n\n # CDK import must be inside the redirect_stdouterr_to_logger contextmanager\n from pcluster.templates.cdk_artifacts_manager import CDKArtifactsManager # pylint: disable=C0415\n from pcluster.templates.cluster_stack import ClusterCdkStack # pylint: disable=C0415\n\n LOGGER.info(\"CDK import completed successfully\")\n LOGGER.info(\"Starting CDK template generation...\")\n with tempfile.TemporaryDirectory() as cloud_assembly_dir:\n output_file = str(stack_name)\n app = App(outdir=str(cloud_assembly_dir))\n ClusterCdkStack(app, output_file, stack_name, cluster_config, bucket, log_group_name)\n\n cloud_assembly = app.synth()\n LOGGER.info(\"CDK template generation completed successfully\")\n\n cdk_artifacts_manager = CDKArtifactsManager(cloud_assembly)\n assets_metadata = cdk_artifacts_manager.upload_assets(bucket=bucket)\n generated_template = cdk_artifacts_manager.get_template_body()\n\n return generated_template, assets_metadata",
"def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )",
"def create_group():\n groupname = request.get_json().get(\"name\")\n description = request.get_json().get(\"description\")\n grp = admin.create_group(current_app.scoped_session(), groupname, description)\n if grp:\n response = admin.get_group_info(current_app.scoped_session(), groupname)\n else:\n response = {\"result\": \"group creation failed\"}\n response = jsonify(response)\n return response"
] | [
"0.65889084",
"0.5891975",
"0.5885409",
"0.58008415",
"0.5717402",
"0.55526215",
"0.5502991",
"0.5459517",
"0.5453945",
"0.53172046",
"0.53113604",
"0.5268771",
"0.5266061",
"0.5265724",
"0.5209222",
"0.5186905",
"0.51733065",
"0.5149206",
"0.5142596",
"0.51349574",
"0.51007265",
"0.5083314",
"0.5069509",
"0.50648785",
"0.5058751",
"0.5030799",
"0.50131583",
"0.49955955",
"0.49943578",
"0.49829373"
] | 0.635483 | 1 |
Adds the given log string to the database | def add_log_entry_string(self, logstring):
parsed = self.parse_log_entry(logstring)
self.add_log_entry(parsed) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_to_db(self, loglines):\n self.database = self.database.append(loglines, ignore_index=True)",
"def InsertLog():",
"def add_log(self, log):\n try:\n if log.name in self.existing_logs:\n raise Exception(\"A log with the name already exists\")\n # if len(log) == 0:\n if not log:\n raise Exception(\"No valid data in log\")\n if self.__len__() < len(log):\n raise Exception(\"length does not match\")\n # add new row to curves table\n with sqlite3.connect(self.db_file) as conn:\n cur = conn.cursor()\n cur.execute(\"SELECT COUNT(*) FROM curves\")\n index = cur.fetchone()[0] + 1\n curvesTuple = (index, log.name, log.units, log.descr)\n cur.execute(\"INSERT INTO curves VALUES (?, ?, ?, ?)\",\n curvesTuple)\n # add new column to data table\n with sqlite3.connect(self.db_file) as conn:\n cur = conn.cursor()\n cur.execute(\"ALTER TABLE data \\\n ADD COLUMN {} REAL\".format(log.name.lower()))\n dataList = [(a,) for a in log.data]\n for de, da in zip(log.depth, dataList):\n cur.execute(\"UPDATE data \\\n SET {} = ?\\\n WHERE dept = {}\".format(\n log.name.lower(), de), da)\n except Exception as inst:\n print(inst.args[0])",
"def add_account(self, log, pword):\r\n #Placeholder : insert variables in sqlite3\r\n self.curs.execute(f\"\"\"INSERT INTO main_table VALUES (?, ?)\"\"\", (log, pword))\r\n self.conn.commit()",
"def add_log(self, text, user=None):\n entry = GameLog(game=self, text=text, player=user).save()\n return entry",
"def add_log(self, text, user=None):\n entry = GameLog(game=self, text=text, player=user).save()\n return entry",
"def writing_log(self, conn, message):\n c = conn.cursor()\n c.execute(\"INSERT INTO logs VALUES('\" + str(datetime.now()) + \"', ' \" + message + \"')\")\n conn.commit()",
"def log(self, my_string):\n ## Open/Close each call is ridiculously inefficient.\n ## This was just a quick solution to build from\n ## TODO: Improve the logging mechanism\n logto = open(self.logfile, 'a')\n logto.write(my_string)\n logto.close()",
"def add_log(conn, task, start_time):\n cursor = conn.cursor()\n cursor.execute('INSERT INTO timelogs (task, start_time) VALUES (?, ?);', (task, start_time))",
"def __add_log(self, logType: int, message: str) -> None:\n\n if isinstance(message, BaseException):\n ex: BaseException = message\n if hasattr(ex, 'message'):\n message = ex.message\n else:\n message = ex.__str__()\n\n message += f'\\n{traceback.format_exc().__str__()}'\n\n if message is None:\n return\n\n if isinstance(message, str) and message.strip().__len__() == 0:\n return\n\n st = stack()\n caller: Traceback = getframeinfo(st[2][0])\n log = LogModel()\n log.log_level = logType\n log.filename = caller.filename\n log.function = caller.function\n log.line_number = caller.lineno\n log.message = message\n log.creation_date = datetime.now()\n\n self.__logs.append(log)",
"def add_log(self, log, name=None, unit=None):\n log_name = log.descr.replace(' ', '_')\n log_unit = log.units\n if name is not None:\n log_name = name\n if unit is not None:\n log_unit = unit\n if log_name not in self.logs:\n temp_dataframe = pd.DataFrame(\n data={\n 'Depth(m)':log.depth,\n '{}({})'.format(\n log_name, log_unit): log.data})\n self.data_frame = self.data_frame.join(\n temp_dataframe.set_index(\"Depth(m)\"), on=\"Depth(m)\")\n else:\n raise Warning(\"{} already exists in well {}\".format(\n log_name, self.well_name))",
"def store(**kwargs):\r\n stored = AppLog(**kwargs)\r\n DBSession.add(stored)",
"def write_log_to_db(data, error):\n data.content = error\n data.timestamp = Timestamp.timestamp()\n data.dataset = data.name\n log_to_db(data)",
"def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()",
"def addLog(log_info,seed_keyword=\"\",meta_keyword=\"\"):\n payload = {\n \"user\" : os.getlogin(),\n \"seed_keyword\":seed_keyword,\n \"meta_keyword\":meta_keyword,\n \"log_info\":log_info\n }\n res = requests.post('{}add/issue/'.format(base_url),data=payload)\n return res.status_code",
"def add_log_exec(self, script, exec_error):\n # Prepare variables\n err_v = 1 if exec_error else 0\n # Insert log \n cursor = self.dbcon.cursor()\n query = \"INSERT INTO log_exec (script, was_error) VALUES (%s, %s)\"\n cursor.execute(query, (script, err_v))\n # Commit remaining queries\n self.dbcon.commit()\n cursor.close()",
"async def add_log(self, value):\n log_string = value\n print(log_string)\n self.embed.title = log_string\n self.embed.timestamp = datetime.datetime.now()\n self.embed.description = \"\"",
"def insertLog(data=None):\n query = \"\"\"INSERT INTO log_gps (name, address, evento, fecha, posicion, ubicacion, grados, altura, satelites, estado_data, trama) \n VALUES (%(id)s, %(address)s, \n %(codEvent)s, %(datetime)s, \n %(position)s, %(geocoding)s, \n %(course)s, %(altura)s, %(gpsSource)s, \n %(ageData)s, %(data)s)\n \"\"\"\n db = PgSQL()\n return db.exe(query, data)",
"def append_log_message(self, text):\n self._new_logs.append(text)",
"def insert_data(self, json_string):\n json_ob = json.loads(json_string)\n fields = ', '.join([f for f in json_ob])\n values = ', '.join([str(json_ob[u.unicode_to_string(f)])\n for f in json_ob])\n sql = (\"INSERT INTO snapshot_log (timestamp, \" +\n fields + \") VALUES (NOW(), \" + values +\n \")\")\n cur = self.cursor()\n try:\n cur.execute(sql)\n #self.conn.commit()\n except sqlc.Error as e:\n print (\"Error #{0}: {1}\\nCouldn't insert\\nsql={2}\".\n format(e.errno, e.msg, sql))\n except Exception as e:\n print (\"Error: {0}\\nCouldn't insert\\nsql={1}\".\n format(e.message, sql))\n finally:\n self.close()",
"def add_log_entry(self, log_entry):\n self.log_entries.append(log_entry)",
"def Log(self, msg):\n self.DBExecute(\"INSERT INTO Log (class, instance, event) VALUES (%s, %s, %s)\",\n self.__class__.__name__, self._instance, msg)\n print '%s/%s: %s' % (self.__class__.__name__, self._instance, msg)",
"def DB_Insert(hit, logfile, count, timestamp):\n items = { 'hit' : hit.replace(\"'\",\"''\"),\n 'logfile' : logfile,\n 'count' : count,\n 'date' : timestamp }\n cursor.execute('''INSERT INTO master (hit, logfile, count, date)\n VALUES (\n '%(hit)s',\n '%(logfile)s',\n %(count)s,\n '%(date)s')''' % items)\n con.commit()",
"def log_request(req: 'Flask_Request', results: str) -> None:\n\n with UseDatabase(app.config['dbconfig']) as cursor:\n _SQL_INSERT = \"\"\"insert into log_table\n (phrase, letters, ip, browser_string, results)\n values\n (?, ?, ?, ?, ?)\"\"\"\n\n cursor.execute(_SQL_INSERT,\n (req.form['phrase'], req.form['letter'], req.remote_addr, req.user_agent.browser,\n results,))",
"def sql_insert(self, sqlstr):\n get_connection().insert_raw(sqlstr)\n return 1",
"def insert(self, string: str):\n features = self._ngram.get_features(string)\n # NOTE: Skip short strings that do not produce any features.\n if features:\n self._db.set(\n {\n 'term': string,\n 'sz': len(features),\n 'ng': features,\n },\n # NOTE: Unique document key for database with pipeline enabled.\n key=(len(features), features),\n )",
"def add_record(self, record):\n logging.debug('Adding new entry to table')\n if not self._dbconnect or not self._cursor:\n raise Exception('Invalid call to Context Manager method!')\n\n date = record.get('date', '')\n time = record.get('time', '')\n location = record.get('location', '')\n node_id = record.get('nodeID', '')\n\n if '' in (date, time, node_id, location):\n raise Exception('Invalid SecuritySystemDB record!')\n\n self._cursor.execute(\"insert into {} values(?, ?, ?, ?)\".format(self._name),\n (date, time, location, node_id))",
"def register_log(self, log):\n self._log = log",
"def add_log(self, logType: int, message: str) -> None:\n\n if logType not in self.__log_levels:\n logType = self.NOTSET\n\n self.__add_log(logType, message)",
"def putLog(self, log):\n \n moduleCoordinator.ModuleCoordinator().addEvent(moduleCoordinator.LOG_EVENT, log, self.hash, self.config)"
] | [
"0.74086714",
"0.6943016",
"0.67869294",
"0.6622376",
"0.64174575",
"0.64174575",
"0.6402143",
"0.630372",
"0.62612665",
"0.620834",
"0.61886513",
"0.6175429",
"0.61297804",
"0.61234784",
"0.61194706",
"0.61027837",
"0.60944754",
"0.60650307",
"0.6048371",
"0.60200405",
"0.5979549",
"0.59399265",
"0.5919508",
"0.58837235",
"0.58742267",
"0.5835112",
"0.5820944",
"0.5820469",
"0.58093363",
"0.5806628"
] | 0.7881291 | 0 |
parses the given logstring into MAC source, MAC dest, IP source, IP dest, IP source, IP dest, timestamp | def parse_log_entry(self, logstring):
splitLogInfo = logstring.partition(self.LOGFILE_PREFIX)
if len(splitLogInfo[1]) == 0:
raise errorhandler.LogDatabaseError("separator {} not found in log entry".format(self.LOGFILE_PREFIX))
str2 = splitLogInfo[2]
entrytype = None
for k, v in self.validpostfixes.items():
if splitLogInfo[2][0:len(k)] == k:
entrytype = v
break
if entrytype is None:
raise errorhandler.LogDatabaseError("Invalid log type: {}".format(splitLogInfo[2][0:10]))
try:
timestringtrimmed = logstring.partition(".")[0]
timestamp = datetime.datetime(*time.strptime(timestringtrimmed, "%Y-%m-%dT%H:%M:%S")[:6])
except ValueError:
raise errorhandler.LogDatabaseError("Value error parsing timestamp out of log entry")
mactokens = {
"MAC source": "MAC source = ",
"MAC dest": "MAC dest = ",
}
indices = []
lastidx = 0
for k, v in mactokens.items():
nextidx = str2.find(v, lastidx)
if nextidx < 0:
raise errorhandler.LogDatabaseError("{} not found in log entry".format(k))
indices.append(nextidx + len(v))
lastidx = nextidx
srcMAC = str2[indices[0] : indices[0] + 17]
dstMAC = str2[indices[1] : indices[1] + 17]
iptokens = {
"IP source": "IP SRC=",
"IP dest": "IP DST=",
"IP source port": "SPT=",
"IP dest port": "DPT="
}
if entrytype == LogEntryType.UNKNOWN_IP or entrytype == LogEntryType.IP_TRAFFIC_IN \
or entrytype == LogEntryType.IP_TRAFFIC_OUT or entrytype == LogEntryType.DROP:
for k, v in iptokens.items():
nextidx = str2.find(v, lastidx)
if nextidx < 0:
raise errorhandler.LogDatabaseError("{} not found in log entry".format(k))
indices.append(nextidx + len(v))
lastidx = nextidx
srcIP = extract_ip(str2, indices[2])
dstIP = extract_ip(str2, indices[3])
srcPort = str2[indices[4]:].partition(" ")[0]
dstPort = str2[indices[5]:]
else:
srcIP = ""
dstIP = ""
srcPort = ""
dstPort = ""
logdataentry = LogDataEntry(entry_type=entrytype, timestamp=timestamp, srcMAC=srcMAC, dstMAC=dstMAC, srcIP=srcIP, dstIP=dstIP,
srcPort=srcPort, dstPort=dstPort)
return logdataentry | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parseMonitorLog(log_file, attack_props):\n if not os.path.exists(log_file):\n return\n report = open(log_file, 'r')\n lines = report.readlines()\n #print lines\n report.close()\n \n readingStations = False\n readingAps = False\n for line in lines:\n line = line.strip()\n #print line\n if not readingStations and not readingAps:\n if line.startswith(\"BSSID\"):\n readingAps = True\n continue\n elif line.startswith(\"Station\"):\n readingStations = True\n continue\n elif readingAps:\n if len(line) < 4:\n readingAps =False\n else:\n fields = line.split(',')\n #print fields\n ap_mac = fields[0].strip()\n if attack_props.hasAP(ap_mac):\n ap = attack_props.getActiveAP(ap_mac)\n else:\n ap = AccessPoint(ap_mac, attack_props.log_path)\n attack_props.addActiveAP(ap)\n ap.update(fields)\n elif readingStations and len(line) > 4:\n fields = line.split(',')\n station_mac = fields[0].strip()\n ap_mac = fields[5].strip()\n if attack_props.hasAP(ap_mac):\n ap = attack_props.getAP(ap_mac) \n if ap.stations.has_key(station_mac):\n station = ap.stations[station_mac]\n else:\n station = Station(station_mac)\n ap.stations[station_mac] = station\n station.ap = station\n station.update(fields)",
"def parse(self, log_message):\n\n # try to resolve the IP address\n try:\n ipaddr = log_message[\"SOURCEIP\"].decode(\"utf-8\")\n\n hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ipaddr)\n # print(ipaddr)\n # print(hostname)\n parts = str(hostname).split(\".\")\n name = parts[0]\n # print(name)\n if len(parts) > 1:\n log_message[\"HOST\"] = name\n except:\n return False\n\n # return True, other way message is dropped\n return True",
"def process_log_line(self, line):\n int_map = self.int_map\n timestamp = line[0:26]\n if len(timestamp) >= 26:\n msg = {}\n try:\n # %Y-%m-%d %H:%M:%S.%f - 2017-06-27 13:46:10.048844\n day = int_map[timestamp[8:10]]\n hour = int_map[timestamp[11:13]]\n minute = int_map[timestamp[14:16]]\n second = int_map[timestamp[17:19]]\n usecond = int_map[timestamp[20:22]] * 10000 + \\\n int_map[timestamp[22:24]] * 100 + int_map[timestamp[24:26]]\n event_time = (hour * 3600.0 + minute * 60.0 + second) + (usecond / 1000000)\n if day == self.start_day:\n elapsed = event_time - self.start_time\n else:\n elapsed = event_time + (float(3600 * 24) - self.start_time)\n msg['timestamp'] = elapsed\n if msg['timestamp'] >= 0:\n offset = line.find(']: ', 32)\n if offset >= 0:\n try:\n thread = line[34:offset]\n separator = thread.find(':')\n if separator >= 0:\n thread = thread[separator + 1:].strip()\n msg['thread'] = thread\n msg['level'] = line[offset + 3:offset + 4]\n msg_start = line.find(' ', offset + 5)\n if msg_start >= 0:\n msg['category'] = line[offset + 5:msg_start]\n msg['message'] = line[msg_start + 1:]\n if msg['category'] == 'nsHttp':\n if msg['thread'] == 'Main Thread':\n self.main_thread_http_entry(msg)\n elif msg['thread'] == 'Socket Thread':\n self.socket_thread_http_entry(msg)\n elif msg['category'] == 'nsSocketTransport':\n self.socket_transport_entry(msg)\n elif msg['category'] == 'nsHostResolver':\n self.dns_entry(msg)\n except Exception:\n logging.exception('Error processing log line')\n except Exception:\n pass",
"def parse(self, line):\n try:\n (year, month, day, hour, minute, second, microseconds, offset_hour, offset_minute, source, process, logentry) = re.match('^(\\d\\d\\d\\d)-(\\d\\d)-(\\d\\d)T(\\d\\d):(\\d\\d):(\\d\\d)\\.([\\d]+)\\+(\\d\\d):(\\d\\d) ([a-z]+)\\[([a-zA-Z0-9_.]+)\\]: ([0-9a-z-A-Z\\-_\\.\\[\\]:\\?\\#\\\",/\\ ={}\\'\\(\\)<>]+)$', line).groups()\n except:\n pass\n \n try:\n parsed_data = dict()\n parsed_data['timestamp'] = \" \".join([\"-\".join([year, month, day]), \":\".join([hour, minute, second])])\n parsed_data['log_time'] = datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n parsed_data['log_source'] = source\n parsed_data['log_type'] = process\n except (AttributeError, UnboundLocalError):\n PARSE_ERRORS.append(line)\n return False\n\n #TODO: This still needs work on spaces in values surrounded by \" \" \n if parsed_data['log_source'] == \"heroku\":\n if logentry.__len__() > 1:\n logentry = re.sub(', ', ',', logentry)\n line_chunks = re.split(' ', logentry)\n for chunk in line_chunks:\n line_chunks = re.split('=', chunk)\n if line_chunks.__len__() > 2:\n #fwd and path are a little clunky to parse\n pass\n elif line_chunks.__len__() > 1:\n parsed_data[line_chunks[0]] = line_chunks[1]\n else:\n pass\n else:\n return False\n else:\n # TODO: [app] \n # Needs parsing. Do that here.\n return False\n\n return parsed_data",
"def parseLog(self, log_lines):\n abstract",
"def parse_header(line):\n # 2015-09-27 14:55:41 UTC [192.0.2.1]:56721 -> [192.0.2.2]:443 (37):\n m = re.match(r'(\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2} \\S+) \\[(.+?)\\]:(\\d+) -> \\[(.+?)\\]:(\\d+) \\((\\d+|EOF)\\):?', line)\n if not m:\n raise LogSyntaxError(line)\n res = {}\n res['timestamp'] = m.group(1)\n res['src_addr'] = m.group(2)\n res['src_port'] = int(m.group(3))\n res['dst_addr'] = m.group(4)\n res['dst_port'] = int(m.group(5))\n if m.group(6) == 'EOF':\n res['eof'] = True\n else:\n res['eof'] = False\n res['size'] = int(m.group(6))\n return res",
"def _parse_run_script_raw_logs(log_string):\n pattern = r'((?=\\[(?:Log-Start|Log-End|Output|Info|Warning|Error|Debug) ' \\\n r'[0-9][0-9]/[0-9][0-9]/[0-9][0-9][0-9][0-9] [0-9][0-9]:[0-9][0-9]:[0-9][0-9]\\])|(?=\\[STATUS\\]))'\n log_list = _regex_split(pattern, log_string)\n # Remove empty strings in logs\n log_list = list(filter(None, log_list))\n # Format them in dictionary to parse out level\n logs_dict_list = []\n for log in log_list:\n log_dict = {}\n log_dict['message'] = log.strip('\\n')\n # Set log level property\n if '[STATUS]::' in log_dict['message']:\n log_dict['level'] = 'STATUS'\n else:\n log_dict['level'] = log_dict['message'].split(' ', 1)[0][1:]\n logs_dict_list.append(log_dict)\n\n return logs_dict_list",
"def parse_server_log(raw_lines):\n\n # from http://www.seehuhn.de/blog/52\n parts = [\n r'(?P<host>\\S+)', # host %h\n r'\\S+', # indent %l (unused)\n r'(?P<user>\\S+)', # user %u\n r'\\[(?P<time>.+)\\]', # time %t\n r'\"(?P<request>.+)\"', # request \"%r\"\n r'(?P<status>[0-9]+)', # status %>s\n r'(?P<size>\\S+)', # size %b (careful, can be '-')\n r'\"(?P<referer>.*)\"', # referer \"%{Referer}i\"\n r'\"(?P<agent>.*)\"', # user agent \"%{User-agent}i\"\n ]\n # eg\n # 66.249.75.112 - - [27/May/2013:00:00:00 +0100] \"GET /qin-xie/rss HTTP/1.1\" 200 3560 \"-\" \"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_1 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8B117 Safari/6531.22.7 (compatible; Googlebot-Mobile/2.1; +http://www.google.com/bot.html)\"\n\n log_cracker = re.compile(r'\\s+'.join(parts)+r'\\s*\\Z')\n\n for line in raw_lines:\n m = log_cracker.match(line)\n assert m is not None\n yield (m.group('host'),\n m.group('user'),\n m.group('time'),\n m.group('request'),\n m.group('status'),\n m.group('size'),\n m.group('referer'),\n m.group('agent'))",
"def parse_event_attlog(self):\n uid = ''\n ver_type = -1\n date_str = ''\n if self.last_event_code == DEFS.EF_ATTLOG:\n uid = self.last_payload_data[0:9].decode('ascii').\\\n replace('\\x00', '')\n ver_type = struct.unpack('<H', self.last_payload_data[24:26])[0]\n date_str = \"20%i/%i/%i %i:%i:%i\" %\\\n tuple(self.last_payload_data[26:32])\n\n return [uid, ver_type, date_str]",
"def parseLog(self, log):\n return 0",
"def parse_log_entry(line):\n\n line_pattern = r\"^(?P<host>.*) - - \\[(?P<timestamp>.*)\\] \" \\\n \"\\\"(?P<request>.*)\\\" (?P<http_code>\\d\\d\\d) (?P<bytes>.*)$\"\n line_groups = re.match(line_pattern, line)\n request_pattern = r\"^(?P<request_method>[A-Z]*) (?P<resource>\\S+) ?.*$\"\n request_groups = re.match(request_pattern, line_groups.group('request'))\n host = line_groups.group('host')\n timestamp = line_groups.group('timestamp')\n timestamp = parse_date(line_groups.group('timestamp'))\n http_code = int(line_groups.group('http_code'))\n num_bytes = line_groups.group('bytes')\n num_bytes = 0 if num_bytes == '-' else int(num_bytes)\n if request_groups:\n request_method = request_groups.group('request_method')\n resource = request_groups.group('resource')\n else:\n request_method = None\n resource = None\n return ParsedRequest(\n host, timestamp, request_method,\n resource, http_code, num_bytes)",
"def ParseLog(self, time_ranges):\r\n # Indicates whether it has detected a scan has started\r\n scan_status = 0\r\n\r\n for line in self.log_file:\r\n if re.search(self.karnak_error, line):\r\n self.FilterError(time_ranges, self.GetLogTime(line))\r\n continue\r\n\r\n if re.search(self.start_capture, line):\r\n if scan_status:\r\n self.FilterScan(time_ranges, start_time, None, None)\r\n scan_status = 1\r\n start_time = self.GetLogTime(line)\r\n continue\r\n\r\n if re.search(self.end_capture, line):\r\n if scan_status:\r\n end_time = self.GetLogTime(line)\r\n scan_status = 2\r\n continue\r\n\r\n if re.search(self.upload_capture, line):\r\n if scan_status == 2:\r\n scan_status = 0\r\n upload_time = self.GetLogTime(line)\r\n self.FilterScan(time_ranges, start_time, end_time, upload_time)",
"def seperate_data(string_log):\n seperated_data = []\n ending_len = len(STOP_DATA)+1\n starting_len = len(START_DATA)+1\n\n moving_pointer = 0\n chunk_end = 0\n while 1:\n moving_pointer = string_log.find(START_DATA, moving_pointer)\n chunk_end = string_log.find(STOP_DATA, moving_pointer)\n\n if moving_pointer == -1 or chunk_end == -1:\n break\n\n chunk_end += ending_len\n\n seperated_data.append(string_log[moving_pointer+starting_len+1:\n chunk_end-ending_len-1])\n moving_pointer = chunk_end - starting_len\n return seperated_data, moving_pointer",
"def parse_packets(pcap):\n # For each packet in the pcap process the contents\n flow_Info = []\n times = 0\n for timestamp, buf in pcap:\n times += 1\n tmp_flow_Info = {}\n\n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n # Unpack the data whthin the Ethernet frame (the IP packet)\n ip = eth.data\n\n # if protocol(ip.p) is not UDP(17) ,skip this packet\n if ip.p != 17:\n continue\n\n udp = ip.data\n # Temp_data = parse_data(eth.data.udp.data)\n # Filter CoAP by port\n if(udp.sport != 5683 or udp.dport != 5683):\n continue\n\n str_udp_data = parse_data(eth.data.udp.data)\n # skip packets of Non_confirmable\n if str_udp_data[0] == '5': \n continue\n\n cycle = 0\n index = 0\n Udp_data = []\n \n len_str_udp_data = len(str_udp_data)\n while cycle < (len_str_udp_data//3+1):\n # Udp_data.append(int('0x'+Str_Udp_data[index:index + 2], 16))\n Udp_data.append(int('0x' + str_udp_data[index:index + 2], 16))\n cycle += 1\n index += 3\n tmp_flow_Info['udp_data'] = (Udp_data)\n\n # confirmable or ack\n tmp_flow_Info['Coap_type'] = str_udp_data[0]\n #print(str_udp_data) \n \n # skip space and get \"Message ID\" \n HexMide = str_udp_data[6:8] + str_udp_data[9:11]\n tmp_flow_Info['Mid'] = int('0x'+HexMide, 16)\n\n tmp_flow_Info['Timestamp'] = str(datetime.datetime.fromtimestamp(timestamp))\n # print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type)\n tmp_flow_Info['src'] = inet_to_str(ip.src)\n tmp_flow_Info['dst'] = inet_to_str(ip.dst)\n\n tmp_flow_Info['sport'] = udp.sport\n tmp_flow_Info['dport'] = udp.dport\n flow_Info.append(tmp_flow_Info)\n\n return flow_Info",
"def parse(self):\n i = 0\n while i < len(self.__lines):\n line = self.__lines[i]\n dt = re.match(r\"(\\d{4}-\\d{1,2}-\\d{1,2}\\s\\d{1,2}:\\d{1,2}:\\d{1,2})\", line)\n if not dt:\n i += 1\n continue\n log = {\n \"datetime\": dt.group()\n }\n line = line[dt.end()+1:].rstrip(\"\\n\")[::-1]\n qq_flag = line.find(\"(\")\n log[\"qq\"] = line[qq_flag-1:0:-1]\n log[\"name\"] = line[:qq_flag:-1].strip(\" \")\n i += 1\n log[\"content\"] = self.__lines[i].rstrip(\"\\n\")\n while self.__lines[i+1] != \"\\n\":\n i += 1\n log[\"content\"] += \" \" + self.__lines[i].rstrip(\"\\n\")\n self.__logs.append(log)\n i += 2",
"def parse_log_start_time(log_data):\n try:\n # Get the log starting time\n time_match = search(\n r\"Log Started at (\\w+, \\w+ \\d{2}, \\d{4} \\d{2}:\\d{2}:\\d{2})\",\n log_data)\n log_start_time = datetime.strptime(\n time_match.group(1), \"%A, %B %d, %Y %H:%M:%S\")\n\n # Get the timezone of the log\n timezone_match = search(\n r\"<\\d{2}:\\d{2}> \\w+ \\w+: [(]g_timezone,([^)]*)[)]\", log_data)\n timezone_info = timezone(timedelta(hours=int(timezone_match.group(1))))\n\n return log_start_time.replace(tzinfo=timezone_info)\n except Exception:\n print(\"Something is wrong with the log file!\")",
"def parse_log(driver, timestamp):\n log = driver.get_log('performance')\n log = log[bisect.bisect_left(\n [entry['timestamp'] for entry in log], timestamp):]\n log = [json.loads(entry['message'])['message'] for entry in log]\n\n requests = []\n data_received = []\n for message in log:\n if message['method'] == 'Network.requestWillBeSent':\n request_id = message['params']['requestId']\n request_url = message['params']['request']['url']\n initiator = message['params']['initiator']\n type_ = message['params']['type']\n if initiator['type'] == 'script':\n initiator = initiator['stack']\n while 'parent' in initiator:\n initiator = initiator['parent']\n initiator = initiator['callFrames'][-1]['url']\n elif initiator['type'] == 'parser':\n initiator = initiator['url']\n else:\n initiator = initiator['type']\n requests.append({'id': request_id, 'url': request_url,\n 'initiator': initiator, 'type': type_})\n elif message['method'] == 'Network.dataReceived':\n data_received.append(message['params']['requestId'])\n return [r for r in requests if r['id'] in data_received]",
"def processLogLine(logline):\n logline = logline.split()\n log = LogLine(logline[0], logline[1], logline[2], logline[4],\\\n float(logline[6]), float(logline[8]), float(logline[10]), logline[12])\n return log",
"def test_parsing_webserver_logs(file, host_ip):\n\n ip_regexp = r\"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\" # regexp for searching ip in logfile\n client_error_regexp = r'\" 4\\d{2} ' # 4xx number\n server_error_regexp = r'\" 5\\d{2} ' # 5xx number\n request_time_regexp = r'] \\d+ \"' # any number between ] and \" (configured apache's /D (request time by\n # microseconds logging parameter to be between these symbols)\n timestamp_regexp = r'\\d{2}/\\D+/\\d{4}:\\d{2}:\\d{2}:\\d{2}'\n\n values = {}\n top_count = \"10\"\n\n def file_last_timestamp(file):\n with open(file) as f:\n last_line = list(f)[-1]\n print(last_line)\n timestamp = re.search(timestamp_regexp, last_line)\n if timestamp:\n return str(timestamp[0]).replace('/', '.').replace(':', '.')\n else:\n return path.basename(file)\n\n\n def copy_unique_elements(list_of_all):\n \"\"\"\n Copies unique elements from list to unique_list\n :param list_of_all:\n :return:\n \"\"\"\n unique_list = []\n for i in list_of_all:\n if i not in unique_list:\n unique_list.append(i)\n if len(unique_list) == int(top_count):\n break\n return unique_list\n\n def findall_unique_exps(regexp, data):\n \"\"\"\n Returns list of unique expressions in data (may be limited by length\n if copy_unique_elements function's used)\n :param regexp:\n :param data:\n :return:\n \"\"\"\n list_of_all = re.findall(regexp, data)\n count = Counter(list_of_all)\n sorted_list = sorted(list_of_all, key=lambda x: (count[x], x), reverse=True)\n return copy_unique_elements(sorted_list)\n\n def findall_unique_exps_lines(regexp, logfile):\n \"\"\"\n Returns a list of lines with unique expressions in logfile (may be limited by length\n if copy_unique_elements function's used)\n :param regexp:\n :param logfile:\n :return:\n \"\"\"\n list_of_all = []\n with open(logfile) as logfile:\n for line in logfile:\n exp = re.compile(regexp)\n if exp.search(line):\n list_of_all.append(line)\n return copy_unique_elements(list_of_all)\n\n def requests_count(regexp, data):\n \"\"\"\n Returns requests count containing expression like 'GET' or 'POST'\n :param regexp:\n :param data:\n :return:\n \"\"\"\n requests_list = re.findall(regexp, data)\n return int(list(Counter(requests_list).values())[0])\n\n def requests_exec_time_list(regexp, data, logfile):\n \"\"\"\n Returns lines with longest requests (number of lines is set in top_count variable)\n :param regexp:\n :param data:\n :param logfile:\n :return:\n \"\"\"\n # find every request time and add to collection\n request_time_list = re.findall(regexp, data)\n request_time_count = Counter(request_time_list)\n sorted_request_time_list = sorted(request_time_list, key=lambda x: (request_time_count[x], x), reverse=True)\n # convert strings in requests time integer values\n for index, item in enumerate(sorted_request_time_list):\n sorted_request_time_list[index] = int(''.join(c for c in item if c.isdigit()))\n # collect top <top_count> longest requests\n top_request_time_list = []\n for i in range(int(top_count)):\n max_request = max(sorted_request_time_list)\n top_request_time_list.append(max_request)\n sorted_request_time_list.remove(max_request)\n # search top <top_count> longest requests in a file and collect lines with them to a list\n top_request_lines = []\n with open(logfile) as f:\n for line in f:\n for i in top_request_time_list:\n if str(i) in line:\n top_request_lines.append(line)\n return top_request_lines\n\n def parse(log):\n\n timestamp = file_last_timestamp(log)\n json_file_name = 'log_parse_{}.json'.format(timestamp)\n open(json_file_name, 'w').close() # clears json-file for every test run\n\n print('\\n{}'.format(log)) # to see what log was parsed\n\n unique_client_error_lines_list = findall_unique_exps_lines(client_error_regexp, log)\n unique_server_error_lines_list = findall_unique_exps_lines(server_error_regexp, log)\n\n with open(log) as f:\n data = f.read()\n\n unique_ips_list = findall_unique_exps(ip_regexp, data)\n\n get_count = requests_count('GET', data)\n post_count = requests_count('POST', data)\n\n total_requests_count = get_count + post_count\n\n top_request_lines = requests_exec_time_list(request_time_regexp, data, log)\n\n with open(json_file_name, 'a') as json_file:\n values[\"{}\".format(log)] = {'TOP {} IPs'.format(top_count): unique_ips_list,\n 'Total requests count': total_requests_count,\n 'GET requests count': get_count,\n 'POST requests count': post_count,\n 'TOP {} client errors'.format(top_count): unique_client_error_lines_list,\n 'TOP {} server errors'.format(top_count): unique_server_error_lines_list,\n 'TOP {} longest requests'.format(top_count): top_request_lines}\n json.dump(values, json_file)\n\n if isinstance(file, list):\n for i in file:\n try:\n parse(i)\n except UnicodeDecodeError:\n print(\"File wasn't parsed (check format) ({})\".format(file))\n pass\n else:\n try:\n parse(file)\n except UnicodeDecodeError:\n print(\"File wasn't parsed (check format) ({})\".format(file))",
"def parse_log():\n error_dict = {}\n user_dict = {}\n pattern = r': (INFO|ERROR) (.*) \\((.*)\\)'\n\n with open('syslog.log', 'r') as log_file:\n for line in log_file.readlines():\n capture_groups = re.findall(pattern, line)\n main = capture_groups[0][0]\n detail = capture_groups[0][1]\n user = capture_groups[0][2]\n if user not in user_dict:\n user_dict[user] = {}\n if main == 'ERROR':\n error_dict[detail] = error_dict.get(detail, 0) + 1\n user_dict[user]['ERROR'] = user_dict[user].get('ERROR', 0) + 1\n else:\n user_dict[user]['INFO'] = user_dict[user].get('INFO', 0) + 1\n\n # cover use cases where users never have 'ERRORS' or 'INFO' events in their usage history.\n for user in user_dict:\n if 'INFO' not in user_dict[user]:\n user_dict[user]['INFO'] = user_dict[user].get('INFO', 0)\n if 'ERROR' not in user_dict[user]:\n user_dict[user]['ERROR'] = user_dict[user].get('ERROR', 0)\n\n sorted_errors = sorted(error_dict.items(), key=operator.itemgetter(1), reverse=True)\n sorted_users = sorted(user_dict.items())\n logging.debug(sorted_errors)\n logging.debug(sorted_users)\n\n return sorted_errors, sorted_users",
"def parse_log(path_to_log):\n regex_iteration = re.compile('Iteration (\\d+), loss = ([\\.\\deE+-]+)')\n regex_train_output = re.compile('Train net output #(\\d+): (\\S+) = ([\\.\\deE+-]+)')\n regex_learning_rate = re.compile('lr = ([\\.\\deE+-]+)')\n regex_test_output = re.compile('Test net output #(\\d+): detection_eval = ([\\.\\deE+-]+)')\n\n\n # Pick out lines of interest\n iteration = 0\n loss = -1\n learning_rate = 0.001\n train_dict_list = []\n train_row = None\n test_score=0.0\n\n logfile_year = extract_seconds.get_log_created_year(path_to_log)\n with open(path_to_log) as f:\n start_time = extract_seconds.get_start_time(f, logfile_year)\n last_time = start_time\n\n for line in f:\n iteration_match = regex_iteration.search(line)\n if iteration_match:\n iteration = float(iteration_match.group(1))\n loss = float(iteration_match.group(2))\n try:\n time = extract_seconds.extract_datetime_from_line(line,\n logfile_year)\n except:\n # Skip lines with bad formatting, for example when resuming solver\n continue\n\n # if it's another year\n if time.month < last_time.month:\n logfile_year += 1\n time = extract_seconds.extract_datetime_from_line(line, logfile_year)\n last_time = time\n\n seconds = (time - start_time).total_seconds()\n\n learning_rate_match = regex_learning_rate.search(line)\n\n if learning_rate_match:\n learning_rate = float(learning_rate_match.group(1))\n\n test_score_match = regex_test_output.search(line)\n if test_score_match:\n test_score = float(test_score_match.group(2))\n\n train_dict_list, train_row = parse_line_for_net_output(\n regex_train_output, train_row, train_dict_list,\n line, iteration, seconds, learning_rate,loss,test_score\n )\n\n\n return train_dict_list",
"def parse_raw_string(string):\n raw = string.split('\\n')\n # ignore time elapsed between the recording start\n # and the arrival of the first IR signal\n raw = raw[2:]\n return ''.join(raw).split()",
"def _parse_logs_for_results(self, logs):\n results = {}\n for line in logs.split(\"\\n\"):\n split_line = line.split(\":\")\n if len(split_line) == 2:\n results[split_line[0].strip()] = split_line[1].strip()\n if results == {}:\n results = None\n return results",
"def preprocess_log(self, log_file_full_path: str) -> Union[Dict, None]:\n\n try:\n with open(log_file_full_path, 'r') as log_file:\n\n switcher, coords = {}, {}\n\n self.monitor.info('-> Started to parse log file...')\n for line in log_file:\n try:\n if 'control_switch_on' in line:\n switch, ts = json.loads(line).values()\n switcher[ts] = int(switch)\n elif 'geo' in line:\n geo, ts = json.loads(line).values()\n coords[ts] = geo\n else:\n self.monitor.warning('-> Unknown happened on line while parsing:\\n', line)\n continue\n except Exception as e:\n self.monitor.exception(\" Something bad happened\", repr(e))\n self.monitor.info(f'-> Parsed log with {len(switcher)} switcher marks and {len(coords)} coords.')\n log_file.close()\n\n merged_log = {**switcher, **coords}\n sequence = {key: merged_log[key] for key in sorted(merged_log.keys())}\n self.monitor.info(f' -> Merged signal types and sorted by ts. Got a sequenced log with {len(sequence)} records.')\n\n return sequence\n\n except Exception as e:\n self.monitor.exception(f'-> Something bad happened. Details:\\n {repr(e)}')\n\n return None",
"def _readin_syslog(file, time_offset='+0000'):\n\tf = open(file, 'r')\n\tcounter = 0\n\tcontent = []\n\tsources = []\n\tp = re.compile(r'^(\\D{3}\\s+\\d+\\s\\d{2}:\\d{2}:\\d{2})\\s(\\S+)\\s([^\\][:]+)(\\[\\d+\\]){0,1}([^:])*:\\s(.*)$')\n\tp2 = re.compile(r'^.*---\\slast\\smessage\\srepeated\\s\\d+\\stime[s]{0,1}\\s---$')\n\tprecise_date = re.compile(r'^(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.\\d{1,6}\\+\\d{2}:\\d{2})\\s(\\S+)\\s([^\\][:]+)(\\[\\d+\\]){0,1}([^:])*:\\s(.*)$')\n\n\tfor x in f.readlines():\n\t\tcounter+=1\n\t\tm = p.search(x)\n\t\t_print_progress(counter)\n\t\tif m:\n\t\t\t# default syslog line was read, herre we assign the year 2017 to all timestamps\n\t\t\tformatted_date = datetime.datetime.strptime('2017 ' + m.group(1)+ time_offset,\"%Y %b %d %H:%M:%S%z\")\n\t\t\tcontent.append(logfile_entry(counter, file, m.group(6), m.group(0), formatted_date, m.group(2),m.group(3)))\n\t\t\tif not m.group(3) in sources:\n\t\t\t\tsources.append(m.group(3))\n\t\telif p2.search(x):\n\t\t\t# a message syaing \"last message repeated x times\" was read, here we simply ignore such lines\n\t\t\tcounter -= 1\n\t\telse:\n\t\t\tm3 = precise_date.search(x)\n\t\t\tif m3:\n\t\t\t\t# precise timestamps are detected\n\t\t\t\tunformatted_date = m3.group(1)\n\t\t\t\tunformatted_date = unformatted_date[:-3]+unformatted_date[-2:]\n\t\t\t\t# this hack around is not needed in Python 3.7, see https://bugs.python.org/issue15873\n\t\t\t\tformatted_date = datetime.datetime.strptime(unformatted_date,\"%Y-%m-%dT%H:%M:%S.%f%z\")\n\t\t\t\tcontent.append(logfile_entry(counter, file, m3.group(6), m3.group(0), formatted_date, m3.group(2), m3.group(3)))\n\t\t\t\tif not m3.group(3) in sources:\n\t\t\t\t\tsources.append(m3.group(3))\n\t\t\telse:\n\t\t\t\t# in case no prior regex matches, the line is added to the line read before\n\t\t\t\tif len(content) > 0:\n\t\t\t\t\tcontent[-1].message += x\n\t\t\t\t\tcontent[-1].structured_data += x\n\t\t\t\t\tcounter -= 1\n\t\t\t\telse:\n\t\t\t\t\tcounter -= 1\n\t\t\t\t\tpass\n\tf.close()\n\t_delete_print()\n\tlf = logfile(file, counter, 'syslog', content,sources)\n\treturn lf",
"def _linux_parse(line, s):\n output_line = {}\n\n if line.startswith('PING '):\n s.ipv4 = 'bytes of data' in line\n\n if s.ipv4 and line[5] not in string.digits:\n s.hostname = True\n # fixup for missing hostname\n line = line[:5] + 'nohost' + line[5:]\n elif s.ipv4 and line[5] in string.digits:\n s.hostname = False\n elif not s.ipv4 and ' (' in line:\n s.hostname = True\n else:\n s.hostname = False\n\n if s.ipv4 and not s.hostname:\n dst_ip, dta_byts = (2, 3)\n elif s.ipv4 and s.hostname:\n dst_ip, dta_byts = (2, 3)\n elif not s.ipv4 and not s.hostname:\n dst_ip, dta_byts = (2, 3)\n else:\n dst_ip, dta_byts = (3, 4)\n\n line = line.replace('(', ' ').replace(')', ' ')\n s.destination_ip = line.split()[dst_ip].lstrip('(').rstrip(')')\n s.sent_bytes = line.split()[dta_byts]\n\n return None\n\n if line.startswith('---'):\n s.footer = True\n return None\n\n if s.footer:\n if 'packets transmitted' in line:\n if ' duplicates,' in line:\n s.packets_transmitted = line.split()[0]\n s.packets_received = line.split()[3]\n s.packet_loss_percent = line.split()[7].rstrip('%')\n s.duplicates = line.split()[5].lstrip('+')\n s.time_ms = line.split()[11].replace('ms', '')\n return None\n\n s.packets_transmitted = line.split()[0]\n s.packets_received = line.split()[3]\n s.packet_loss_percent = line.split()[5].rstrip('%')\n s.duplicates = '0'\n s.time_ms = line.split()[9].replace('ms', '')\n return None\n\n split_line = line.split(' = ')[1]\n split_line = split_line.split('/')\n output_line = {\n 'type': 'summary',\n 'destination_ip': s.destination_ip or None,\n 'sent_bytes': s.sent_bytes or None,\n 'pattern': s.pattern or None,\n 'packets_transmitted': s.packets_transmitted or None,\n 'packets_received': s.packets_received or None,\n 'packet_loss_percent': s.packet_loss_percent or None,\n 'duplicates': s.duplicates or None,\n 'time_ms': s.time_ms or None,\n 'round_trip_ms_min': split_line[0],\n 'round_trip_ms_avg': split_line[1],\n 'round_trip_ms_max': split_line[2],\n 'round_trip_ms_stddev': split_line[3].split()[0]\n }\n\n return output_line\n\n # ping response lines\n\n # request timeout\n if 'no answer yet for icmp_seq=' in line:\n timestamp = False\n isequence = 5\n\n # if timestamp option is specified, then shift icmp sequence field right by one\n if line[0] == '[':\n timestamp = True\n isequence = 6\n\n output_line = {\n 'type': 'timeout',\n 'destination_ip': s.destination_ip or None,\n 'sent_bytes': s.sent_bytes or None,\n 'pattern': s.pattern or None,\n 'timestamp': line.split()[0].lstrip('[').rstrip(']') if timestamp else None,\n 'icmp_seq': line.replace('=', ' ').split()[isequence]\n }\n\n return output_line\n\n # normal responses\n if ' bytes from ' in line:\n\n line = line.replace('(', ' ').replace(')', ' ').replace('=', ' ')\n\n # positions of items depend on whether ipv4/ipv6 and/or ip/hostname is used\n if s.ipv4 and not s.hostname:\n bts, rip, iseq, t2l, tms = (0, 3, 5, 7, 9)\n elif s.ipv4 and s.hostname:\n bts, rip, iseq, t2l, tms = (0, 4, 7, 9, 11)\n elif not s.ipv4 and not s.hostname:\n bts, rip, iseq, t2l, tms = (0, 3, 5, 7, 9)\n elif not s.ipv4 and s.hostname:\n bts, rip, iseq, t2l, tms = (0, 4, 7, 9, 11)\n\n # if timestamp option is specified, then shift everything right by one\n timestamp = False\n if line[0] == '[':\n timestamp = True\n bts, rip, iseq, t2l, tms = (bts + 1, rip + 1, iseq + 1, t2l + 1, tms + 1)\n\n output_line = {\n 'type': 'reply',\n 'destination_ip': s.destination_ip or None,\n 'sent_bytes': s.sent_bytes or None,\n 'pattern': s.pattern or None,\n 'timestamp': line.split()[0].lstrip('[').rstrip(']') if timestamp else None,\n 'response_bytes': line.split()[bts],\n 'response_ip': line.split()[rip].rstrip(':'),\n 'icmp_seq': line.split()[iseq],\n 'ttl': line.split()[t2l],\n 'time_ms': line.split()[tms],\n 'duplicate': 'DUP!' in line\n }\n\n return output_line",
"def __init__(self, log_string):\n match_string = r'\\[([0-9]+) \\| ([a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}) \\| \\((\\s*[0-9]+,\\s*[0-9]+)\\)\\] ([\\S\\s]*)' # noqa: E501\n groups = re.search(match_string, log_string).groups()\n self.time = int(groups[0]) # Event timestamp\n self.id = groups[1] # ID of the sprite that the event affects\n x, y = int(groups[2].split()[0][:-1]), int(groups[2].split()[1])\n self.point = Point(x, y) # The location of the event\n for action in Actions:\n if groups[3].strip() == action.name:\n self.action = action # The type of event\n break\n else:\n # Spawn because it is followed by what spawned\n self.action = Actions.Spawned\n self.sprite = groups[3][7:].strip() # The type of sprite that was spawned",
"def extract_connecting_ip(logpart):\n print logpart\n smtp_ip = logpart.split('[')[1].split(']')[0]\n return smtp_ip",
"def from_log_string(log_string):\n\n\t\tfirst_part = None\n\t\tsecond_part = None\n\n\t\tif not log_string.endswith(\"}\"):\n\t\t\t# Value error for later use\n\t\t\tvalue_error = ValueError(\"Given string has invalid format: {}\".format(log_string))\n\n\t\t\tbracket_idx = log_string.find(\"}\")\n\t\t\tlast_comma_idx = log_string.find(\",\", bracket_idx)\n\t\t\tif last_comma_idx != bracket_idx + 1:\n\t\t\t\traise value_error\n\n\t\t\t# The bracket is kept\n\t\t\tfirst_part = log_string[:bracket_idx + 1]\n\t\t\t# The comma is removed\n\t\t\tsecond_part = log_string[last_comma_idx + 1:]\n\t\t\tif \"}\" not in first_part or \"}\" in second_part or \"{\" in second_part:\n\t\t\t\traise value_error\n\n\t\tdata_dict = json.loads(first_part)\n\t\treturn LogEntry.from_data(data_dict, second_part)",
"def parse_line(log_line):\n\n logger = logging.getLogger(__name__)\n\n REGEX = [\n # universal-transcoder\n re.compile('.*GET\\s\\/music\\/:\\/transcode\\/universal\\/start\\.mp3.*metadata%2F(\\d+)\\&.*'),\n # stream based transcoder\n re.compile('.*\\sDEBUG\\s-\\sLibrary\\sitem\\s(\\d+)\\s\\'.*\\'\\sgot\\splayed\\sby\\saccount.*')\n ]\n\n for regex in REGEX:\n m = regex.match(log_line)\n\n if m:\n logger.info('Found played song and extracted library id \"{l_id}\" from plex log '.format(l_id=m.group(1)))\n return m.group(1)"
] | [
"0.64265484",
"0.59902716",
"0.5971159",
"0.57045716",
"0.5659824",
"0.5659817",
"0.5655153",
"0.5649358",
"0.56467295",
"0.5573839",
"0.55555034",
"0.5528865",
"0.55214393",
"0.5519047",
"0.55149513",
"0.5511338",
"0.54538894",
"0.5397081",
"0.5387243",
"0.5363179",
"0.5359764",
"0.5317096",
"0.53152424",
"0.5296047",
"0.5262153",
"0.52599686",
"0.5240256",
"0.52071124",
"0.51997054",
"0.5199607"
] | 0.7670208 | 0 |
Set speed limit of servo on a given channel. | def servo_set_speed_limit(ch, speed):
# Check to make sure speed is in range
speed = max(speed, speed_limit_min)
speed = min(speed, speed_limit_max)
# Send command to servo controller
servo_send_cmd(cmd_set_speed, ch, speed) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def servo_set_speed_limit(ch, accel):\n\n # Check to make sure speed is in range\n speed = max(accel, accel_limit_min)\n speed = min(accel, accel_limit_max)\n\n # Send command to servo controller\n servo_send_cmd(cmd_set_accel, ch, accel)",
"def set_param_motor():\n servo.setSpeed(0, 0) # max = 255\n servo.setAccel(0, 0)\n servo.setSpeed(1, 150) # max = 255\n servo.setAccel(1, 150)",
"def set_speed(self, speed=0):\n speed = clamp(speed)\n self._state.speed = speed\n self.send_command(Command.SET_SPEED, [int(speed)])",
"def set_speed(self, speed, motor):\n self.driver.set_speed(speed, motor)\n self.last_control = time.time()",
"def set_speed(self,value):\n if (value>self.get_max_speed()):\n print \"asked to set the speed to %f but the max speed is %f\\n\" % (value,self.get_max_speed())\n else:\n return self.put_par(\"slew_speed\",value)",
"def set_speed(self, v):\n self.v = v",
"def setWheelsSpeed(self, dc_motor_speed):\n self.dcmotorSpeed = dc_motor_speed # changed rightSpeed to dcmotorSpeed and right to\n self.updatePWM()",
"def changeSpeed(self, speed, accel):\n\t\t\n max_speed = 1000\n min_speed = 0\n \n # limit max speed\n if speed >= max_speed:\n speed = max_speed\n \n # limit min speed\n if speed <= min_speed:\n speed = min_speed\n \n command = struct.pack(\"<BHHB\", 0x24, speed, accel, 0x01)\n self.sendCommand(command)",
"def set_max_speed(self, value):\n if self.mot_type == 'ims':\n return self.put_par(\"max_speed\",value)\n elif self.mot_type == 'xps8p':\n print \"asked to set the max speed to %f but max speed is read only for %s motors\\n\" % (value,mot_type)\n else:\n return self.put_par(\"max_speed\",value)",
"def set_speed(self,speed):\n self.speed = speed",
"def set_speed(self, speed, ports='ABCD'):\n\n speed += self.avg_speed\n if self.inverted:\n speed = -speed\n\n if speed > self.margin:\n speed = self.margin\n elif speed < -self.margin:\n speed = self.margin\n\n for p in ports:\n if self.motors[p].connected:\n self.motors[p].run_forever(speed_sp=speed, speed_regulation=True)\n else:\n print(\"Cant run motor on\", p, \"- not connected\")",
"def setMotorSpeed(self,motorID,speed):\n speed = max(min(speed,1.0),-1.0) #range limit\n direction = speed < 0 # set reverse direction bit if speed less than 0\n bit8speed = self.params[1] & 1 #first bit of paramter 1 can be used to determin if its in 8 bit speed mode\n speedMultiplyer = 127 # speed is between 0-127 for 7bit speed mode\n if bit8speed:\n speedMultiplyer = 255 #speed is between 0-255 for 8bit speed mode\n speedByte = int(abs(speed)*speedMultiplyer)# covert floating speed to scaled byte\n \n cmd = speedByte >= 128 # bit 0 of command is used for 8th bit of speedbyte as speedbyte can only use 7 bits\n \n speedByte &= 127 #clear the 8th bit of the speedbyte as it can only use 7 bits\n \n cmd |= direction << 1 #shift direction into bit 1\n cmd |= motorID << 2 #shift motor id into bit 2\n cmd |= 1 << 3 # just set bit 3\n\n #send the speed command\n self.driver.sendReceive([0xaa,self.id,cmd,speedByte],0)",
"def set_speed(self, speed):\n self._kernel.set_speed(float(speed))",
"def set_speed(rpm):\n ret = _LIB.fan_click_set_speed(rpm)\n if ret < 0:\n raise Exception(\"fan click set speed failed\")",
"def setMotorSpeed(self, idMotor=0, sense=0, speed=0, board=0):\n msg = [idMotor, sense, int(speed / 256.0), speed % 256]\n return self.callModule('motors', board, 0, 'setvelmtr', msg)",
"def setSpeed(self, v):\n\t\tconverted = self.convertSpeed(v)\n\t\tprint(converted)\n\t\t# set both stage speeds\n\t\tself.zaberSend(self.translation[\"hor\"], self.cmd[\"setTargetSpeed\"], data = converted)\n\t\tself.zaberSend(self.translation[\"ver\"], self.cmd[\"setTargetSpeed\"], data = converted)",
"def change_motor_speed(self, speed=0.0):\r\n if not self.enabled:\r\n self.set_neutral(braked=False)\r\n return\r\n\r\n # logging.info(\"{} Motor Speed: {}\".format(self.motor_name, speed))\r\n self.current_speed = speed # Store current set speed\r\n\r\n # If speed is < 0.0, we are driving in reverse.\r\n self.forward = True\r\n if speed < 0.0:\r\n # Normalise speed value to be in range [0, 100]\r\n speed = -speed\r\n # Store direction\r\n self.forward = False\r\n\r\n # Apply a factor to the speed to limit speed\r\n speed *= self.speed_factor\r\n\r\n # Set motor directional pins\r\n if self.forward:\r\n if self.a_pin >= 0:\r\n self.GPIO.output(self.a_pin, 1)\r\n if self.b_pin >= 0:\r\n self.GPIO.output(self.b_pin, 0)\r\n else:\r\n if self.a_pin >= 0:\r\n self.GPIO.output(self.a_pin, 0)\r\n if self.b_pin >= 0:\r\n self.GPIO.output(self.b_pin, 1)\r\n\r\n # Convert speed into PWM duty cycle\r\n # and clamp values to min/max ranges.\r\n dutycycle = speed\r\n if dutycycle < 0.0:\r\n dutycycle = 0.0\r\n elif dutycycle > self.max_speed:\r\n dutycycle = self.max_speed\r\n\r\n # Change the PWM duty cycle based on fabs() of speed value.\r\n self.PWM.ChangeDutyCycle(dutycycle)",
"def set_cmd_velocity(self, speed):\n self.gripper_io.set_signal_value(\"speed_mps\", speed)",
"def set_motor_speed(self, speed=0.0):\r\n self.target_speed = speed",
"def adjustSpeed(self, speed):\n\t\tif self.timeout <= 0:\n\t\t\tself.speed = max(self.minimumSpeed, min(self.maximumSpeed, self.speed + speed))",
"def set_fan_speed(self, value):\n self.parent.fancoolers.set_speed(value)",
"def set_speed(self, speed):\n self.speed = speed",
"def set_speed(self, speed):\r\n speed = float(speed)\r\n speed = int(round(speed * 27.7778))\r\n return self.send_command('speed %s' % speed)",
"def set_speed(self,speed):\n self.speed_p = speed",
"def set_speed(self, speed):\n return self.bot_client.send_command(_Command.SetSpeed, speed)",
"def setspeed(speed):\n if speed is None:\n click.echo(\"speed value is required\")\n raise click.Abort()\n\n for fan in range(_wrapper_get_num_fans()):\n status = _wrapper_set_fan_speed(fan, speed)\n if not status:\n click.echo(\"Failed\")\n sys.exit(1)\n\n click.echo(\"Successful\")",
"def pwm_limit(self, value):\n self._write(MX_PWM_LIMIT, value)",
"def set_speed(self, speed):\n # create the MAV_CMD_DO_CHANGE_SPEED command\n msg = self.message_factory.command_long_encode(0, 0,mavutil.mavlink.MAV_CMD_DO_CHANGE_SPEED,0,0,speed,0, 0, 0, 0, 0)\n\n # send command to vehicle\n self.send_mavlink(msg)\n self.flush()",
"def set_speed(self, speed):\n self.device.set_speed(speed)\n return \"OK\"",
"def setCutoffSpeed(self, pulses_per_sec):\n cmd_string = 'c{0}'.format(pulses_per_sec)\n self.sim_speed_change = True\n self.cmd_chain += cmd_string"
] | [
"0.8116307",
"0.7313522",
"0.6900925",
"0.68464",
"0.683779",
"0.6770115",
"0.6756657",
"0.6677769",
"0.6668522",
"0.6620393",
"0.66193795",
"0.6602939",
"0.65876895",
"0.6579509",
"0.65753126",
"0.6565928",
"0.6559673",
"0.6551852",
"0.6544445",
"0.6532947",
"0.6528034",
"0.64930624",
"0.64821726",
"0.64782494",
"0.6433047",
"0.6393382",
"0.6388397",
"0.6384769",
"0.63785833",
"0.63768154"
] | 0.8289777 | 0 |
Set accel limit of servo on a given channel. | def servo_set_speed_limit(ch, accel):
# Check to make sure speed is in range
speed = max(accel, accel_limit_min)
speed = min(accel, accel_limit_max)
# Send command to servo controller
servo_send_cmd(cmd_set_accel, ch, accel) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_param_motor():\n servo.setSpeed(0, 0) # max = 255\n servo.setAccel(0, 0)\n servo.setSpeed(1, 150) # max = 255\n servo.setAccel(1, 150)",
"def servo_set_speed_limit(ch, speed):\n\n # Check to make sure speed is in range\n speed = max(speed, speed_limit_min)\n speed = min(speed, speed_limit_max)\n\n # Send command to servo controller\n servo_send_cmd(cmd_set_speed, ch, speed)",
"def setChannel(self, channel='A', coupling=\"AC\", VRange=2.0,\n VOffset=0.0, enabled=True, BWLimited=0,\n probeAttenuation=1.0):\n if enabled:\n enabled = 1\n else:\n enabled = 0\n\n if not isinstance(channel, int):\n chNum = self.CHANNELS[channel]\n else:\n chNum = channel\n\n if not isinstance(coupling, int):\n coupling = self.CHANNEL_COUPLINGS[coupling]\n\n # finds the next largest range\n VRangeAPI = None\n for item in self.CHANNEL_RANGE:\n if item[\"rangeV\"] - VRange / probeAttenuation > -1E-4:\n if VRangeAPI is None:\n VRangeAPI = item\n # break\n # Don't know if this is necessary assuming that it will iterate\n # in order\n elif VRangeAPI[\"rangeV\"] > item[\"rangeV\"]:\n VRangeAPI = item\n\n if VRangeAPI is None:\n raise ValueError(\n \"Desired range %f is too large. Maximum range is %f.\" %\n (VRange, self.CHANNEL_RANGE[-1][\"rangeV\"] * probeAttenuation))\n\n # store the actually chosen range of the scope\n VRange = VRangeAPI[\"rangeV\"] * probeAttenuation\n\n if not isinstance(BWLimited, int):\n BWLimited = self.BW_LIMITS[BWLimited]\n\n if BWLimited == 3:\n BWLimited = 3 # 1MHz Bandwidth Limiter for PicoScope 4444\n elif BWLimited == 2:\n BWLimited = 2 # Bandwidth Limiter for PicoScope 6404,\n # 100kHz Bandwidth Limiter for PicoScope 4444\n elif BWLimited == 1:\n BWLimited = 1 # Bandwidth Limiter for PicoScope 6402/6403,\n # 20kHz Bandwidth Limiter for PicoScope 4444\n else:\n BWLimited = 0\n\n self._lowLevelSetChannel(chNum, enabled, coupling,\n VRangeAPI[\"apivalue\"],\n VOffset / probeAttenuation, BWLimited)\n\n # if all was successful, save the parameters\n self.CHRange[chNum] = VRange\n self.CHOffset[chNum] = VOffset\n self.CHCoupling[chNum] = coupling\n self.ProbeAttenuation[chNum] = probeAttenuation\n\n return VRange",
"def changeSpeed(self, speed, accel):\n\t\t\n max_speed = 1000\n min_speed = 0\n \n # limit max speed\n if speed >= max_speed:\n speed = max_speed\n \n # limit min speed\n if speed <= min_speed:\n speed = min_speed\n \n command = struct.pack(\"<BHHB\", 0x24, speed, accel, 0x01)\n self.sendCommand(command)",
"def set_accel(self, accel):\n \"\"\" Accel is pixel per second second \"\"\"\n self.accel = accel",
"def setCurrentAccel(self):\n if self.currentAccel != self.accel:\n if self.currentAccel != round(self.accel,1):\n if self.currentAccel < self.accel:\n self.currentAccel += 0.01\n elif self.currentAccel > self.accel:\n self.currentAccel -= 0.01",
"def Add_Analog_Voltage_Channel(self,channel,min=-10.0,max=10.0):\n self.num_channels += 1\n self.channel = self.device + \"/\" + channel\n self._CHK(nidaq.DAQmxCreateAIVoltageChan(self.task_handle,self.channel,\"\",\n DAQmx_Val_NRSE,\n float64(min),float64(max),\n DAQmx_Val_Volts,None))",
"def set_channel_v_unit(self , channel_v_unit:float): \n self.__channel_v_unit = channel_v_unit",
"def setComp( self, c, name, voltage ):\n self.validateChannel( name )\n self.validateVoltage( name, voltage )\n channel = self.d[name]\n channel.setValue(voltage)\n message = channel.comstring\n self.tryToSend( message )\n self.notifyOtherListeners(c, (name,voltage))",
"def dmx_slider_change(self, channel, value):\n self.dmx.set_channel(channel, value)",
"def set_ao(self,channel,value = 0):\n import numpy as np\n sf = 32767/10.0\n if ((value < 0) and (value >= -10)):\n val_bin = int(np.ceil(65536 + value*sf))\n elif (value <= 10):\n val_bin = np.floor(value*sf)\n else:\n raise ValueError\n self.msg.command = 'set_ao'\n self.clear_args()\n self.msg.arg1 = channel\n self.msg.arg2 = val_bin\n self.pub.publish(self.msg)",
"def acceleration_limit(self, value):\n self._write(MX_ACCELERATION_LIMIT, value)",
"def __motorLimitHit(self, channelValue, channelName):\n if channelValue:\n if channelName.endswith('low_lim_hit'):\n self.limit = self.limit | LOWLIMIT\n self.__changeMotorState(ONLIMIT)\n else:\n self.limit = self.limit | HIGHLIMIT\n self.__changeMotorState(ONLIMIT)",
"def __init__(self, channel):\n self.servo = wpilib.PWM(channel)\n self.close_value = 0\n #self.setBounds(1.0, 1.48, 1.5, 1.52, 2.0)\n self.setBounds(2.0, 1.65, 1.5, 1.35, 1.0)",
"def motorLimitsChanged(self):\n pass",
"def set_channel(self, channel_value):\n error_message = \"Value '{chan}' is not \".format(chan=channel_value)\n # Make sure channel_value is an int before getting the number of keys to send\n try:\n channel_value = int(channel_value)\n assert(0<=channel_value<=999)\n except AssertionError:\n raise TVError(error_message + \"in the channel range\", 'set_channel')\n except ValueError:\n raise TVError(error_message + \"an integer\", 'set_channel')\n except:\n raise TVError(\"Unexpected error !\", 'set_fm')\n for char in str(channel_value).zfill(3):\n self.send_key('BTN_' + char)\n time.sleep(0.1)",
"def set_servo(name,servo,value):\n name = _lookup(name)\n servo_data = list(name) + [-1,-1,-1,-1]\n servo_data[servo + 1] = value\n mc.set('servo_values',servo_data)",
"def setAngle(channel, angle, delta=170, min_delay=0.02):\n delay = max(delta * 0.003, min_delay)\n zero_pulse = (servoMin + servoMax) / 2 # half-way == 0 degrees\n pulse_width = zero_pulse - servoMin \n pulse = zero_pulse + (pulse_width * angle / 80)\n pwm.setPWM(channel, 0, int(pulse))\n time.sleep(delay)",
"def setComp( self, c, devChannel, voltage ):\n dev = 'comp'\n self.validateDevChannel( dev, devChannel )\n self.validateInput( dev, voltage )\n channel = self.dcDict[dev]['devChannels'][devChannel]['channel']\n self.tryToSend( channel, voltage )",
"def pwm_limit(self, value):\n self._write(MX_PWM_LIMIT, value)",
"def set_accel_range(self, accel_range):\n # First change it to 0x00 to make sure we write the correct value later\n self.bus.write_byte_data(self.address, self.ACCEL_CONFIG, 0x00)\n\n # Write the new range to the ACCEL_CONFIG register\n self.bus.write_byte_data(self.address, self.ACCEL_CONFIG, accel_range)",
"def set_accel_range(self, accel_range):\n\t\t# First change it to 0x00 to make sure we write the correct value later\n\t\tself.bus.write_byte_data(self.address, self.ACCEL_CONFIG, 0x00)\n\n\t\t# Write the new range to the ACCEL_CONFIG register\n\t\tself.bus.write_byte_data(self.address, self.ACCEL_CONFIG, accel_range)",
"def servo_gainupdate(self, *args, **kwargs) -> Any:\n pass",
"def set_channel(self, c, channel):\n try:\n self.binding.set_switcher_channel(channel)\n except Exception, e:\n self.handle_wavemeter_error(e)\n return False\n\n return True",
"def set_accel_range(self, accel_range):\r\n # First change it to 0x00 to make sure we write the correct value later\r\n self.bus.write_byte_data(self.deviceaddress, self.accel_config, 0x00)\r\n\r\n # Write the new range to the ACCEL_CONFIG register\r\n self.bus.write_byte_data(self.deviceaddress, self.accel_config, accel_range)",
"def set_servo_angle(self, channel: int, angle: float):\n if channel < 0 or channel > 15:\n raise ValueError('Channel must be between 0 and 15')\n\n if channel not in self._servos:\n raise KeyError('There is no servo registered on channel %d' % channel)\n \n servo = self._servos[channel]\n servo.set_angle(angle)",
"def set_channel(self, channel, coupling='dc', scale='10V', offset=0.0, bandwidth='full', enabled=True):\n channel = self.convert_to_enum(channel, self.enChannel, to_upper=True)\n coupling = self.convert_to_enum(coupling, self.enCoupling, to_upper=True)\n scale = self.convert_to_enum(scale, self.enRange, prefix='R_', to_upper=True)\n\n try: # not all PicoScopes have a BandwidthLimiter enum\n bandwidth = self.convert_to_enum(bandwidth, self.enBandwidthLimiter, prefix='BW_', to_upper=True)\n except:\n bandwidth = None\n\n if self.IS_PS2000 or self.IS_PS3000 or self.IS_PS4000 or self.IS_PS5000:\n self.SetChannel(self._handle, channel, enabled, coupling, scale)\n elif self.IS_PS6000:\n self.SetChannel(self._handle, channel, enabled, coupling, scale, offset, bandwidth)\n else:\n self.SetChannel(self._handle, channel, enabled, coupling, scale, offset)\n\n # get the voltage range as a floating-point number\n voltage_range = float(re.findall(r'\\d+', scale.name)[0])\n if 'M' in scale.name:\n voltage_range *= 1e-3 # milli volts\n\n # create/update the PicoScopeChannel in the dictionary\n self._channels_dict[channel.name] = PicoScopeChannel(channel, bool(enabled), coupling, voltage_range,\n offset, bandwidth, self.maximum_value())",
"def _set_dac(self, channel, mvoltage):\n proceed = True\n\n if self.check_setpoints():\n cur_val = self.get('dac{}'.format(channel))\n # dac range in mV / 16 bits FIXME make range depend on polarity\n byte_res = self.full_range / 2**16\n # eps is a magic number to correct for an offset in the values\n # the IVVI returns (i.e. setting 0 returns byte_res/2 = 0.030518\n # with rounding\n eps = 0.0001\n\n proceed = False\n\n if (mvoltage > (cur_val + byte_res / 2 + eps) or\n mvoltage < (cur_val - byte_res / 2 - eps)):\n proceed = True\n\n if self.dac_set_sleep() > 0.0:\n time.sleep(self.dac_set_sleep())\n\n # only update the value if it is different from the previous one\n # this saves time in setting values, set cmd takes ~650ms\n if proceed:\n polarity_corrected = mvoltage - self.pol_num[channel - 1]\n byte_val = self._mvoltage_to_bytes(polarity_corrected)\n message = bytes([2, 1, channel]) + byte_val\n\n reply = self.ask(message)\n self._time_last_update = 0 # ensures get command will update\n\n return reply",
"def servo_gainfactor(self, *args, **kwargs) -> Any:\n pass",
"def set_voltage(self, v, ch): \n self.write(\"VSET\" + str(ch) + \":\" + str(v) + \"\\n\")"
] | [
"0.6756529",
"0.66715246",
"0.6317015",
"0.6237403",
"0.6211317",
"0.6124698",
"0.6102591",
"0.6064866",
"0.60071737",
"0.5985983",
"0.59721977",
"0.59433496",
"0.5912166",
"0.5886724",
"0.58355105",
"0.5830066",
"0.58292514",
"0.58101195",
"0.577613",
"0.577541",
"0.57703537",
"0.57699114",
"0.57371455",
"0.5713823",
"0.57111514",
"0.5704697",
"0.56701106",
"0.5655753",
"0.56442505",
"0.56229234"
] | 0.7922705 | 0 |
Looks up or creates the gsutil tracker file directory. This is the configured directory where gsutil keeps its resumable transfer tracker files. This function creates it if it doesn't already exist. | def CreateTrackerDirIfNeeded():
tracker_dir = config.get(
'GSUtil', 'resumable_tracker_dir',
os.path.join(GetGsutilStateDir(), 'tracker-files'))
CreateDirIfNeeded(tracker_dir)
return tracker_dir | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_directory(tracking_id):\n upload_path = os.path.join(app.config['DRS_UPLOADS'], '{0}'.format(tracking_id))\n if not os.path.isdir(upload_path):\n os.mkdir(upload_path)",
"def create_base_temp_dir(cls):\n if cls._thread_local.state.temp_dirs:\n base_temp_dir = os.path.join(cls._thread_local.state.temp_dirs[-1],\n cls._TEMP_SUBDIR)\n else:\n raise ValueError(\n 'A tf.Transform function that required a temp dir was called but no '\n 'temp dir was set. To set a temp dir use the impl.Context context '\n 'manager.')\n tf.gfile.MakeDirs(base_temp_dir)\n return base_temp_dir",
"def mklocdir(self):\n dir = self._localpath+\"/%s.%s\" % (self._jobname, self._jobid)\n os.mkdir(dir)",
"def create_directory(path):\n if path.startswith('gs://'):\n return\n if os.path.isdir(path):\n return\n if os.path.isfile(path):\n raise ValueError('Unable to create location. \"%s\" exists and is a file.' %\n path)\n\n try:\n os.makedirs(path)\n except: # pylint: disable=broad-except\n raise ValueError('Unable to create location. \"%s\"' % path)",
"def _make_unique_temp_dir(base_temp_dir):\n return os.path.join(base_temp_dir, uuid.uuid4().hex)",
"def tmpdir(self):\n dir_ = os.path.dirname(self.filename)\n try:\n path = At_code_checker.Dir_Map[dir_.lower()]\n if not os.path.isdir(path):\n create_dir(path)\n except KeyError:\n path = self.get_temp_dir()\n At_code_checker.Dir_Map[dir_.lower()] = path\n finally:\n return path",
"def create_cache_dir(self) -> None:\n try:\n os.makedirs(self.cache_folder)\n except FileExistsError:\n pass",
"def init_work_dir(\n request: pytest.FixtureRequest, base_path: Path, name: Optional[str]\n) -> Tuple[Path, Union[FileCounter, NullCounter]]:\n target_dir = base_path / name if name else base_path\n target_dir.mkdir(exist_ok=True)\n\n counter: Union[FileCounter, NullCounter]\n if using_xdist(request):\n counter = FileCounter(target_dir / \"counter\")\n else:\n counter = NullCounter()\n\n return target_dir, counter",
"def setup_log_dir():\n log_dir = get_log_dir()\n if log_dir.endswith('latest'):\n shutil.rmtree(log_dir, ignore_errors=True)\n mkdirs(log_dir)\n return log_dir",
"def create_temp_dir():\n\n try:\n temp_dir = os.getenv('TEMP_FILE_DIR')\n\n if not isinstance(temp_dir, type(None)):\n if os.path.exists(temp_dir):\n LOGGER.warning('Temp Directory Already Exists.')\n else:\n temp_dir = tempfile.mkdtemp()\n os.environ['TEMP_FILE_DIR'] = temp_dir\n else:\n temp_dir = tempfile.mkdtemp()\n os.environ['TEMP_FILE_DIR'] = temp_dir\n\n LOGGER.debug(f'Temp Dir: {temp_dir}')\n except Exception as ex:\n LOGGER.exception(ex)\n raise ex",
"def init_sharing_path(self):\n if not os.path.isdir(self.cfg['sharing_path']):\n try:\n os.makedirs(self.cfg['sharing_path'])\n except OSError:\n self.stop(1, '\\nImpossible to create \"{0}\" directory! Check sharing_path value contained in the following file:\\n\"{1}\"\\n'\n .format(self.cfg['sharing_path'], Daemon.CONFIG_FILEPATH))",
"def create_working_directory(self):\n os.makedirs(self.working_directory, exist_ok=True)",
"def _get_directory(self):\n directory = os.environ.get(\"EEMETER_WEATHER_CACHE_DIRECTORY\",\n os.path.expanduser('~/.eemeter/cache'))\n if not os.path.exists(directory):\n os.makedirs(directory)\n return directory",
"def _check_or_create_dir(directory):\n if not tf.gfile.Exists(directory):\n tf.gfile.MakeDirs(directory)",
"def mk_work_dir():\n return tempfile.mkdtemp(prefix='pentaho-aws-', suffix='')",
"def makeTempDir(self):\n try:\n os.mkdir(self.temp_dir)\n except FileExistsError:\n pass",
"def on_created(self, event):\n \n file_name = os.path.basename(event.src_path)\n parent = os.path.dirname(event.src_path)\n parents_id = self.filesystem[parent][\"id\"]\n\n if event.is_directory:\n if file_name not in self.ignore_dirs:\n file_id = self.gapy.create_file(file_name, path=parent, parents_id=[parents_id], isFolder=True)\n self.filesystem[file_name.rstrip(\"/\")] = file_id \n self.gapy.logger.info(\"The directory {} was created with id {}\".format(file_name, file_id))\n else:\n if file_name not in self.ignore_files:\n with open(event.src_path, \"w\") as empty_file:\n empty_file.write(\"\\t\")\n file_id = self.gapy.create_file(file_name, path=parent, parents_id=[parents_id])\n self.filesystem[parent.rstrip(\"/\")][\"files\"].append({\"name\": file_name, \"id\": file_id})\n self.gapy.logger.info(\"The file {} was created with id {}\".format(file_name, file_id))\n print(f\"\\nFile created: {file_name} at {datetime.now()}\")\n\n self.update_fs()",
"def make_tempdir(self):\n self.tempdir_path = self.dst_path + '_temp'\n if not os.path.exists(self.tempdir_path):\n os.makedirs(self.tempdir_path)\n return self.tempdir_path",
"def create_directories(working_dir, timesteps):\n\tfor time in timesteps:\n\t\tpath = working_dir + str(time) + '_ps_snapshot'\n\t\t\n\t\t# Make sure no old snapshot dir of same name exists - if it does, remove it.\n\t\tif os.path.isdir(path):\n\t\t\tprint('{} alread exists. Deleting it ...'.format(path))\n\t\t\tshutil.rmtree(path)\n\n\t\t# Copy the template\n\t\tshutil.copytree(working_dir + '.template_snapshot', path, symlinks=True)\n\t\tprint('{} initialized.'.format(path))\n\n\treturn path",
"def store_file(file, tracking_id):\n errors = {}\n try:\n upload_path = os.path.join(app.config['DRS_UPLOADS'], '{0}'.format(tracking_id))\n Utilities.create_directory(tracking_id)\n file_path = os.path.join(upload_path, file.filename)\n file.save(file_path)\n if os.path.getsize(file_path) == 0:\n errors['size'] = ['The file should not be Empty']\n return errors\n except IOError:\n raise Exception",
"def createDirectories(self):\n # -- LOG\n thepath = os.path.dirname(self.settings.logfile)\n distutils.dir_util.mkpath(thepath)\n\n # -- SESSION \n thepath = self.settings.sessionpath\n distutils.dir_util.mkpath(thepath)\n\n # -- DATABASE\n thepath = self.settings.dbpath\n distutils.dir_util.mkpath(thepath)",
"def fs_create_dir(self, path):\n\t\treturn Job(SDK.PrlSrv_FsCreateDir(self.handle, path)[0])",
"def create(self):\n if os.path.isdir(self.repodir):\n if os.listdir(self.repodir):\n raise EmtError('%s is not empty' % self.repodir)\n else:\n os.makedirs(self.repodir)\n self.git_cmd('init')\n self.initialized = True",
"def _save_tracker_uri_to_file(self):\r\n if not self.tracker_file_name:\r\n return\r\n f = None\r\n try:\r\n f = open(self.tracker_file_name, 'w')\r\n f.write(self.tracker_uri)\r\n except IOError, e:\r\n raise ResumableUploadException(\r\n 'Couldn\\'t write URI tracker file (%s): %s.\\nThis can happen'\r\n 'if you\\'re using an incorrectly configured upload tool\\n'\r\n '(e.g., gsutil configured to save tracker files to an '\r\n 'unwritable directory)' %\r\n (self.tracker_file_name, e.strerror),\r\n ResumableTransferDisposition.ABORT)\r\n finally:\r\n if f:\r\n f.close()",
"def create_run_tracker(info_dir=None):\r\n # TODO(John Sirois): Rework uses around a context manager for cleanup of the info_dir in a more\r\n # disciplined manner\r\n info_dir = info_dir or safe_mkdtemp()\r\n run_tracker = RunTracker(info_dir)\r\n report = Report()\r\n run_tracker.start(report)\r\n return run_tracker",
"def create_directory():\n global dirName\n dirName = 'Downloaded Files'\n global folder_path\n if os.path.isdir(dirName) == True:\n print(\"This folder already exists, path:\", os.path.abspath(dirName))\n else:\n os.mkdir(dirName)\n global folder_path\n folder_path = os.path.abspath(dirName)\n print(\"Directory \" , dirName , \" Created \")",
"def _use_temp_directory(self):\n if not self._is_temp_dir:\n self._orig_base_data_dir = self._base_data_dir\n self._orig_base_logs_dir = self._base_logs_dir\n temp_dir = Path(tempfile.mkdtemp())\n self._base_data_dir = temp_dir / \"data\"\n self._base_logs_dir = temp_dir / \"logs\"\n self.db.change_path(\":memory:\")\n self.set_current(\"default\", update=False)\n self._is_temp_dir = True\n return temp_dir",
"def make_dir (path, empty=False):\n\n\n # check if google_cloud is set\n if not path[0:5] == 'gs://':\n\n # if already exists but needs to be empty, remove it first\n if isdir(path) and empty:\n shutil.rmtree(path)\n\n # do not check if directory exists, just try to make it; changed this\n # after racing condition occurred on the ilifu Slurm cluster when\n # reducing flatfields, where different tasks need to make the same\n # directory\n os.makedirs(path, exist_ok=True)\n\n\n return",
"def create_duplicates_directory(self) -> None:\n dups_path = os.path.join(self.get_directory(), \"duplicates\")\n if not self.directory_exists(dups_path): os.mkdir(dups_path)",
"def _setup_dir(self):\n if not os.path.exists(self._save_dir):\n logger.info(\"save_dir {} does not exist, \"\n \"creating it\".format(self._save_dir))\n os.makedirs(self._save_dir)\n\n # Log the run parameters.\n logger.info(\"Writing logs to {}\".format(self._log_dir))\n\n if not os.path.exists(self._log_dir):\n logger.info(\"log path {} does not exist, \"\n \"creating it\".format(self._log_dir))\n os.makedirs(self._log_dir)"
] | [
"0.5943972",
"0.5766383",
"0.57206106",
"0.57040995",
"0.5617084",
"0.5609605",
"0.5540115",
"0.5536164",
"0.55042833",
"0.5486796",
"0.5483267",
"0.54412764",
"0.54346126",
"0.54261184",
"0.5397497",
"0.53728074",
"0.5369626",
"0.5355074",
"0.534489",
"0.5343588",
"0.53029925",
"0.5294086",
"0.52812076",
"0.52630746",
"0.52409655",
"0.5226005",
"0.5210844",
"0.52105284",
"0.52097887",
"0.51988846"
] | 0.827229 | 0 |
Creates an MD5 hex digest of the parameters for a rewrite call. Resuming rewrites requires that the input parameters are identical. Thus, the rewrite tracker file needs to represent the input parameters. For easy comparison, hash the input values. If a user does a performs a samesource/samedestination rewrite via a different command (for example, with a changed ACL), the hashes will not match and we will restart the rewrite from the beginning. | def HashRewriteParameters(
src_obj_metadata, dst_obj_metadata, projection, src_generation=None,
gen_match=None, meta_gen_match=None, canned_acl=None, fields=None,
max_bytes_per_call=None):
if (not src_obj_metadata or
not src_obj_metadata.bucket or
not src_obj_metadata.name or
not src_obj_metadata.etag or
not dst_obj_metadata or
not dst_obj_metadata.bucket or
not dst_obj_metadata.name or
not projection):
return
md5_hash = hashlib.md5()
for input_param in (
src_obj_metadata, dst_obj_metadata, projection, src_generation,
gen_match, meta_gen_match, canned_acl, fields, max_bytes_per_call):
md5_hash.update(str(input_param))
return md5_hash.hexdigest() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_hash(*args):\n key = bytes(' '.join(args), 'utf_8')\n hashh = hashlib.md5()\n hashh.update(key)\n return hashh.hexdigest()",
"def MD5(self) -> _n_0_t_3[_n_0_t_9]:",
"def hash_args(self, args, secret=None):\n for a in args:\n if isinstance(args[a], list): args[a] = json.dumps(args[a])\n\n args_joined = ''\n for a in sorted(args.keys()):\n if isinstance(a, unicode):\n args_joined += a.encode('utf-8')\n else:\n args_joined += str(a)\n\n args_joined += '='\n\n if isinstance(args[a], unicode):\n args_joined += args[a].encode('utf-8')\n else:\n args_joined += str(args[a])\n\n hash = hashlib.md5(args_joined)\n\n if secret:\n hash.update(secret)\n elif self.api_secret:\n hash.update(self.api_secret)\n return hash.hexdigest()",
"def get_request_digest(self, prepared_request):\n d = hashlib.md5()\n d.update(prepared_request.method)\n d.update(\":\")\n d.update(prepared_request.path_url)\n d.update(\":\")\n if prepared_request.body:\n d.update(prepared_request.body)\n return d.hexdigest()",
"def checksum(**kwargs):\n\n # remove secretkey from kwargs, lookup if missing\n secretkey = kwargs.pop('secretkey', resolve_secretkey())\n\n # sort the args, and concatenate them\n param_string = ''.join([''.join([str(x), str(y)])\n for x, y in sorted(kwargs.items())])\n\n return b64encode(str(new_hmac(secretkey, param_string, sha1).digest()))",
"def _hash_5tuple(ip_A, ip_B, tp_src, tp_dst, proto):\n if ip_A > ip_B:\n direction = 1\n elif ip_B > ip_A:\n direction = 2\n elif tp_src > tp_dst:\n direction = 1\n elif tp_dst > tp_src:\n direction = 2\n else:\n direction = 1\n hash_5t = hashlib.md5()\n if direction == 1:\n flow_tuple = (ip_A, ip_B, tp_src, tp_dst, proto)\n else:\n flow_tuple = (ip_B, ip_A, tp_dst, tp_src, proto)\n flow_tuple_as_string = str(flow_tuple)\n hash_5t.update(flow_tuple_as_string)\n return hash_5t.hexdigest()",
"def checksum(self, **kwargs):\n try:\n # if a secretkey is in **kwargs, use it, and remove it\n secretkey = kwargs['secretkey']\n del kwargs['secretkey']\n except KeyError:\n # if the kwargs lookup fails, get secretkey elsewhere\n secretkey = self.secretkey or resolve_secretkey()\n args = kwargs.items()\n args.sort()\n\n param_string = ''\n for key, value in args:\n param_string += str(key)\n param_string += str(value)\n return b64encode(str(new_hmac(secretkey, param_string, sha1).digest()))",
"def GenerateHash(params):\n exp_params = params.ConvertToDict()\n return hashlib.sha1(\n repr(sorted(exp_params.items())).encode('utf-8')).hexdigest()",
"def calculate_rule_hash(self, rule):\r\n hash_strings = []\r\n condition_string_prefaces = (\"$\", \"!\", \"#\", \"@\")\r\n # dictionary for substitutions\r\n string_substitutions = {}\r\n all_strings = []\r\n # original code used md5\r\n # m = hashlib.md5()\r\n m = hashlib.sha3_256()\r\n # Adding all string contents to the list\r\n if 'strings' in rule:\r\n for s in rule['strings']:\r\n if s['type'] == \"byte\":\r\n # original code just needed to append the converted hex code as a string. We need to create the dictionary entries for substitutions as well\r\n # hash_strings.append(re.sub(r'[^a-fA-F\\?0-9]+', '', s['value']))\r\n byte_code_string = re.sub(r'[^a-fA-F\\?0-9]+', '', s['value'])\r\n dict_entry = {s['name']: byte_code_string}\r\n string_substitutions.update(dict_entry)\r\n hash_strings.append(byte_code_string)\r\n else:\r\n # The following line was the only portion of this else statement in the original code\r\n # This change takes modifiers into account for string arguments\r\n # hash_strings.append(s['value'])\r\n string_and_modifiers = []\r\n string_and_modifiers.append(s['value'])\r\n if 'modifiers' in s:\r\n for modifier in s['modifiers']:\r\n string_and_modifiers.append(modifier)\r\n string_and_modifiers = \" \".join(string_and_modifiers)\r\n all_strings.append(\"$\"+string_and_modifiers)\r\n dict_entry = {s['name']: string_and_modifiers}\r\n string_substitutions.update(dict_entry)\r\n #hash_strings.append(\"$\"+string_and_modifiers)\r\n all_strings = self.resort_stings_add_commas(all_strings)\r\n # Adding the components of the condition to the list (except the variables)\r\n all_wild_card_1 = \"\\$\\*\"\r\n all_wild_card_2 = \"them\"\r\n for e in rule['condition_terms']:\r\n if re.match(all_wild_card_1, e) or re.match(all_wild_card_2, e):\r\n hash_strings.extend(all_strings)\r\n elif e.startswith(condition_string_prefaces):\r\n if len(e) > 1:\r\n string_preface, string_name = e[:1], e[1:]\r\n string_name = \"$\" + string_name\r\n if e.endswith(\"*\"):\r\n hash_strings.extend(self.resort_stings_add_commas(self.regex_match_string_names_for_values(string_preface, string_name, string_substitutions)))\r\n #hash_strings.extend(\"Pull all the matching strings\")\r\n else:\r\n if string_name in string_substitutions:\r\n substituted = string_preface + string_substitutions[string_name]\r\n hash_strings.append(substituted)\r\n else:\r\n hash_strings.append(e)\r\n else:\r\n hash_strings.append(e)\r\n else:\r\n hash_strings.append(e)\r\n # Generate a hash from the sorted contents\r\n #hash_strings.sort()\r\n m.update(\"\".join(hash_strings).encode(\"ascii\"))\r\n return m.hexdigest()",
"def _md5(input):\n m = hashlib.md5()\n m.update(input)\n return m.hexdigest()",
"def _fingerprint(self):\n hasher = hashlib.md5()\n source = inspect.getsource(self._func)\n hasher.update(source.encode('utf-8'))\n\n return hasher.hexdigest()",
"def hexdigest(self, *args, **kwargs): # real signature unknown\n pass",
"def hexdigest(self, *args, **kwargs): # real signature unknown\n pass",
"def hexdigest(self, *args, **kwargs): # real signature unknown\n pass",
"def hexdigest(self, *args, **kwargs): # real signature unknown\n pass",
"def hexdigest(self, *args, **kwargs): # real signature unknown\n pass",
"def hexdigest(self, *args, **kwargs): # real signature unknown\n pass",
"def seed_hash(*args):\n args_str = str(args)\n return int(hashlib.md5(args_str.encode(\"utf-8\")).hexdigest(), 16) % (2**31)",
"def str_to_hash(self, param):\n param = param.encode('utf-8')\n my_hash = hashlib.md5(param)\n return my_hash.hexdigest()",
"def checksum(*objects):\n hasher = hashlib.md5()\n _checksum(hasher, objects)\n return hasher.hexdigest()",
"def svn_fs_file_md5_checksum(*args):\r\n return _fs.svn_fs_file_md5_checksum(*args)",
"def create_config_hash(config):\n value_str = \"\"\n for section in config.sections:\n for key in section.keys():\n value_str += str(config[section][key])\n value_hash = hashlib.md5(value_str.encode('utf-8')).hexdigest()\n\n return value_hash",
"def hash(self):\n hash_properties = self.artifacts\n return hashlib.md5(','.join(hash_properties).encode()).hexdigest()",
"def get_path_from_args(args):\n a = copy.copy(args)\n del a.rerun\n a = str(a)\n path = hashlib.md5(a.encode()).hexdigest()\n return path",
"def get_path_from_args(args):\n a = copy.copy(args)\n del a.rerun\n a = str(a)\n path = hashlib.md5(a.encode()).hexdigest()\n return path",
"def calc_md5(string):\n\treturn md5(string).hexdigest()",
"def create_hash(*match_props):\n fingerprint = hashlib.md5()\n # fingerprint is a md5 HASH object\n if isinstance(match_props, tuple):\n for p in match_props:\n fingerprint.update(str(p).encode())\n hash_value = fingerprint.hexdigest()\n return hash_value\n else:\n print(\"The input to the create_hash function must be a tuple.\")\n return None",
"def memoize_key(prefix, *args, **kwargs):\n key = hashlib.md5()\n for arg in itertools.chain(args, sorted(kwargs.items())):\n key.update(str(arg))\n return '%s:memoize:%s:%s' % (settings.CACHE_PREFIX,\n prefix, key.hexdigest())",
"def __hash__(self):\n if self._hash is None:\n self._hash = hash(self._scheme) ^ hash(self._host) ^ hash(self._port) ^ hash(self._path) ^ hash(self._query) ^ hash(self._isRegularURI)\n return self._hash",
"def crack_md5(cand_len, b_values):\n global s, K # `s` and `K` are global\n\n slv = z3.Solver()\n \n inp = [z3.BitVec(f'inp_{i}', 32) for i in range(16)]\n\n add_inp_constraint(cand_len, inp, slv)\n\n # MD5 implementation using symbolic variables.\n a0 = 0x67452301 # A\n b0 = 0xefcdab89 # B\n c0 = 0x98badcfe # C\n d0 = 0x10325476 # D\n\n A, B, C, D = a0, b0, c0, d0\n \n for i in range(64):\n if 0 <= i and i <= 15:\n F = (B & C) | (~B & D)\n g = i\n elif 16 <= i and i <= 31:\n F = (D & B) | (~D & C)\n g = (5*i + 1) % 16\n elif 32 <= i and i <= 47:\n F = B ^ C ^ D\n g = (3*i + 5) % 16\n elif 48 <= i <= 63:\n F = C ^ (B | ~D)\n g = (7*i) % 16\n\n F &= 0xFFFFFFFF\n F = (F + A + K[i] + inp[g]) & 0xFFFFFFFF \n A = D\n D = C\n C = B\n\n # NOTE: rol DOES NOT WORK! WE HAVE TO USE z3's `RotateLeft`.\n B = (B + z3.RotateLeft(F, s[i])) & 0xFFFFFFFF\n\n slv.add(B & 0x3FF == b_values[i])\n\n \n # Check for solutions\n def to_ascii(x):\n return chr(x & 0xFF) + chr((x >> 8) & 0xFF) + chr((x >> 16) & 0xFF) + chr(x >> 24)\n\n while slv.check() == z3.sat:\n mdl = slv.model()\n\n print('[+] Solution FOUND!')\n \n flag = ''\n for i, j in enumerate(inp):\n yy = mdl.evaluate(j).as_long() \n print(f'[+] {i:2d} ~~> {yy:08X} ~~> {repr(to_ascii(yy))}')\n flag += to_ascii(yy)\n\n flag = flag[:cand_len]\n\n print('[+] FLAG IS: hxp{%s}' % flag)\n return 1\n else:\n print('[+] Cannot find satisfiable solution :\\\\')\n return -1"
] | [
"0.5853579",
"0.58514607",
"0.5819284",
"0.5770626",
"0.5658538",
"0.55982107",
"0.5581722",
"0.55005234",
"0.5434314",
"0.54289603",
"0.5340615",
"0.53380597",
"0.53380597",
"0.53380597",
"0.53380597",
"0.53380597",
"0.53380597",
"0.53021276",
"0.5255513",
"0.52426136",
"0.5234174",
"0.52038324",
"0.5181833",
"0.5171993",
"0.5171993",
"0.5156983",
"0.5151853",
"0.5150697",
"0.5147734",
"0.5141758"
] | 0.693234 | 0 |
Attempts to read a rewrite tracker file. | def ReadRewriteTrackerFile(tracker_file_name, rewrite_params_hash):
# Check to see if we already have a matching tracker file.
tracker_file = None
if not rewrite_params_hash:
return
try:
tracker_file = open(tracker_file_name, 'r')
existing_hash = tracker_file.readline().rstrip('\n')
if existing_hash == rewrite_params_hash:
# Next line is the rewrite token.
return tracker_file.readline().rstrip('\n')
except IOError as e:
# Ignore non-existent file (happens first time a rewrite is attempted.
if e.errno != errno.ENOENT:
print('Couldn\'t read Copy tracker file (%s): %s. Restarting copy '
'from scratch.' %
(tracker_file_name, e.strerror))
finally:
if tracker_file:
tracker_file.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read(path):",
"def read(self, filename):\n pass",
"def read(self, filename):\n pass",
"def read_from_file(self, filename: str) -> None:",
"def read_file(self):\n try:\n with open(self.file_name, 'r') as ach_file:\n file_contents = ach_file.read().replace('\\n', '').replace('\\r', '')\n\n self._parse_ach_file(file_contents)\n except FileNotFoundError as err:\n print(\"File does not exist -> \" + str(err))",
"def read(self, filename):\n raise NotImplementedError",
"def read(self, file, path):\n pos, = struct.unpack('<Q', file.read(8))\n if pos == 0:\n raise VergeMLError(\"Invalid cache file: {}\".format(path))\n file.seek(pos)\n self.index, self.meta, self.info = pickle.load(file)",
"def read_file(path_to_file):\n 8",
"def read_relmotion_file(self, datadir='.'):\n\n filename = 'relmotion.txt'\n fullname = Path(datadir) / filename\n\n return self.read_file(fullname)",
"def __read_file(self):\n try:\n with open(self.filename) as fh:\n for line in fh:\n if self.__input_data_ok(line.strip()):\n timestamp, url = line.strip().split(\"|\")\n LOGGER.debug(\"%s %s\" %(timestamp, url))\n self.__create_record(self.__get_date_string(timestamp), url)\n else:\n LOGGER.warn(\"URLCrawler Malformed Line (Skipping): \\\"%s\\\"\" %line)\n\n LOGGER.debug(json.dumps(self.record_d, indent=4, separators=(',',':')))\n return True\n\n except Exception as e:\n LOGGER.error(\"URLCrawler File Read Exception: %s\" %(e))\n return False",
"def __read_cache_file_if_exists(self) -> None:\n if os.path.exists(self.__cache_file):\n self.__config.open_file(self.__cache_file, \"r\", self.__process_cache)",
"def read_cached_file(filename, cache_info, reload_func=None):\n mtime = os.path.getmtime(filename)\n if not cache_info or mtime != cache_info.get('mtime'):\n LOG.debug(_(\"Reloading cached file %s\") % filename)\n with open(filename) as fap:\n cache_info['data'] = fap.read()\n cache_info['mtime'] = mtime\n if reload_func:\n reload_func(cache_info['data'])\n return cache_info['data']",
"def read_cached_file(filename, cache_info, reload_func=None):\n mtime = os.path.getmtime(filename)\n if not cache_info or mtime != cache_info.get('mtime'):\n LOG.debug(\"Reloading cached file %s\" % filename)\n with open(filename) as fap:\n cache_info['data'] = fap.read()\n cache_info['mtime'] = mtime\n if reload_func:\n reload_func(cache_info['data'])\n return cache_info['data']",
"def read_redo_file(self, redo_file):\n self.GUI_log.write(\n \"-I- reading redo file and processing new temperature bounds\")\n self.redo_specimens = {}\n # first delete all previous interpretation\n for sp in list(self.Data.keys()):\n del self.Data[sp]['pars']\n self.Data[sp]['pars'] = {}\n self.Data[sp]['pars']['lab_dc_field'] = self.Data[sp]['lab_dc_field']\n self.Data[sp]['pars']['er_specimen_name'] = self.Data[sp]['er_specimen_name']\n self.Data[sp]['pars']['er_sample_name'] = self.Data[sp]['er_sample_name']\n # print sp\n # print self.Data[sp]['pars']\n self.Data_samples = {}\n self.Data_sites = {}\n\n fin = open(redo_file, 'r')\n lines = fin.readlines()\n fin.close()\n for Line in lines:\n line = Line.strip('\\n').split()\n specimen = line[0]\n tmin_kelvin = float(line[1])\n tmax_kelvin = float(line[2])\n if specimen not in list(self.redo_specimens.keys()):\n self.redo_specimens[specimen] = {}\n self.redo_specimens[specimen]['t_min'] = float(tmin_kelvin)\n self.redo_specimens[specimen]['t_max'] = float(tmax_kelvin)\n if specimen in list(self.Data.keys()):\n if tmin_kelvin not in self.Data[specimen]['t_Arai'] or tmax_kelvin not in self.Data[specimen]['t_Arai']:\n self.GUI_log.write(\n \"-W- WARNING: can't fit temperature bounds in the redo file to the actual measurement. specimen %s\\n\" % specimen)\n else:\n self.Data[specimen]['pars'] = thellier_gui_lib.get_PI_parameters(\n self.Data, self.acceptance_criteria, self.preferences, specimen, float(tmin_kelvin), float(tmax_kelvin), self.GUI_log, THERMAL, MICROWAVE)\n try:\n self.Data[specimen]['pars'] = thellier_gui_lib.get_PI_parameters(\n self.Data, self.acceptance_criteria, self.preferences, specimen, float(tmin_kelvin), float(tmax_kelvin), self.GUI_log, THERMAL, MICROWAVE)\n self.Data[specimen]['pars']['saved'] = True\n # write intrepretation into sample data\n sample = self.Data_hierarchy['specimens'][specimen]\n if sample not in list(self.Data_samples.keys()):\n self.Data_samples[sample] = {}\n if specimen not in list(self.Data_samples[sample].keys()):\n self.Data_samples[sample][specimen] = {}\n self.Data_samples[sample][specimen]['B'] = self.Data[specimen]['pars']['specimen_int_uT']\n site = thellier_gui_lib.get_site_from_hierarchy(\n sample, self.Data_hierarchy)\n if site not in list(self.Data_sites.keys()):\n self.Data_sites[site] = {}\n if specimen not in list(self.Data_sites[site].keys()):\n self.Data_sites[site][specimen] = {}\n self.Data_sites[site][specimen]['B'] = self.Data[specimen]['pars']['specimen_int_uT']\n\n except:\n print(\"-E- ERROR 1\")\n self.GUI_log.write(\n \"-E- ERROR. Can't calculate PI paremeters for specimen %s using redo file. Check!\\n\" % (specimen))\n else:\n self.GUI_log.write(\n \"-W- WARNING: Can't find specimen %s from redo file in measurement file!\\n\" % specimen)\n print(\n \"-W- WARNING: Can't find specimen %s from redo file in measurement file!\\n\" % specimen)\n if not fin.closed:\n fin.close()\n self.pars = self.Data[self.s]['pars']\n self.clear_boxes()\n self.draw_figure(self.s)\n self.update_GUI_with_new_interpretation()",
"def try_read_file():\n try:\n logging.info('open config file %s', config_file_path)\n with open(config_file_path) as f:\n logging.info('begin io %s', config_file_path)\n config_file = json.load(f)\n logging.info('end io %s', config_file_path)\n return config_file\n except (OSError, IOError) as error:\n logging.info('try_read error %s', error)\n return {}",
"def readFromFile(filename):\n raise NotImplementedError",
"def _load_tracker(self):\n\n if os.path.isfile(config.TRACKER_JSON):\n with self.__writelock, open(config.TRACKER_JSON, encoding='utf-8-sig') as f:\n d = json.loads(f.read())\n try:\n self.stats.previous_requests = d[self.maps.key]\n except KeyError:\n self.stats.previous_requests = 0\n else:\n self.stats.previous_requests = 0",
"def read_locations(db, openfile):\n pass",
"def reread(self) -> None:\n old = self.getSubgraph(self.uri)\n new = Graph()\n try:\n contents = open(self.path).read()\n if contents.startswith(\"#new\"):\n log.debug(\"%s ignoring empty contents of my new file\",\n self.path)\n # this is a new file we're starting, and we should not\n # patch our graph as if it had just been cleared. We\n # shouldn't even be here reading this, but\n # lastWriteTimestamp didn't work.\n return\n\n new.parse(location=self.path, format='n3')\n self.readPrefixes = dict(new.namespaces())\n except SyntaxError as e:\n print(e)\n traceback.print_exc()\n log.error(\"%s syntax error\", self.path)\n # todo: likely bug- if a file has this error upon first\n # read, I think we don't retry it right.\n return\n except IOError as e:\n log.error(\"%s rereading %s: %r\", self.path, self.uri, e)\n return\n\n old = inContext(old, self.uri)\n new = inContext(new, self.uri)\n\n p = Patch.fromDiff(old, new)\n if p:\n log.debug(\"%s applying patch for changes in file\", self.path)\n self.patch(p, dueToFileChange=True)\n else:\n log.debug(\"old == new after reread of %s\", self.path)",
"def readInServers(self):\n # we'll be using the global server tracker file\n global server_tracker_file\n # first, grab a list of all files in the current working directory\n current_dir = os.listdir('.')\n # verify that our server tracker file exists here\n if server_tracker_file not in current_dir:\n # if there's nothing to read in, simply return\n return\n \n # read in the csv\n with open(server_tracker_file, 'rb') as infile:\n # initialize the reader\n reader = csv.reader(infile)\n # verify that the header looks exactly as we expect\n header = reader.next()\n if header != ['Server','Ping Interval','Status']:\n # if this isn't the case, we won't try to read the file\n return\n else:\n # update our servers with the records we know about\n # while we update, we'll keep a count of how many\n # we can successfully read in\n server_count = 0\n for record in reader:\n # pull out the server name and ping interval\n server = record[0]\n try:\n interval = int(record[1])\n except ValueError:\n continue\n # ping the server to determine whether it is online\n # or offline\n status = sendPing(server)\n if status == 'Online':\n # allocate to online\n self.online_servers[server] = [0, interval]\n else:\n # allocate to offline\n self.offline_servers[server] = [0, interval]\n # udpate our count\n server_count += 1\n # repeat for every record from our pseudo memory dump file\n # report and return\n print 'Read in {0} known servers'.format(server_count)\n \n # file read complete\n return",
"def read_file(self, file_name):\n f = file(file_name, \"r\")\n temp = f.read()\n f.close()",
"def read(self, filename): # real signature unknown; restored from __doc__\n pass",
"def read_file(self):\n # This is quite ugly but works for now.\n self.header = read_csv(self.file_name, delim_whitespace=True,\n header=TrackData.header_line,\n nrows=1).to_dict(orient='index')[0]\n self.data = read_csv(self.file_name, delim_whitespace=True, \n header=TrackData.data_line)",
"def _file_read(fname):\n if not os.path.exists(fname):\n parser.error(\"File '{0}' not found.\".format(fname))\n return open(fname, 'r')",
"def read(*rnames):\n with open(os.path.join(os.path.dirname(__file__), *rnames)) as f:\n return f.read()",
"def read(self, file_name):\n msg = \"ReaderWriterBase::read called!\"\n raise NotImplementedError(msg)",
"def read(self):\n self.record_d = {}\n if self.__read_file():\n self.__print_report()",
"def read():\n global counter\n\n try:\n with open(BOOKS_FILE_NAME) as f:\n book_json = json.load(f)\n book_json_manipulation(book_json)\n except FileNotFoundError:\n # First time program has run. Assume no books.\n pass\n\n try:\n with open(COUNTER_FILE_NAME) as f:\n try:\n counter = int(f.read())\n except:\n counter = 0\n except:\n counter = len(book_list)",
"def read_rad_lost(filename, force_override=False, verbose=False):\n \n fpkl = filename + '.p'\n if not force_override and os.path.exists(fpkl) and \\\n os.path.getmtime(fpkl) > os.path.getmtime(filename):\n df = pd.read_pickle(fpkl)\n if verbose:\n print('[read_radiators]: reading from existing pickle.')\n\n if verbose:\n print('[read_radiators]: pickle does not exist or file updated.' + \\\n ' Reading {0:s}'.format(filename))\n\n df = pd.read_csv(filename, sep=' ', header=None, skiprows=0)\n\n # drop nan column (due to space at the end of line in output file)\n df = df.drop(labels=[df.columns[-1]], axis=1)\n col = {0:'time',1:'nfreq',2:'nsrc',3:'N_mu'}\n nfreq = df[1][0]\n N_mu = df[3][0]\n for i in range(4, 4 + nfreq):\n col[i] = 'L_tot{0:d}'.format(i-4)\n\n df = df.rename(columns=col)\n\n return df",
"def read_file(self) -> None:\n if not os.path.exists(self.location) or not os.path.isfile(\n self.location\n ):\n raise FileNotFoundError(self.location)\n\n self.graph = rdflib.Graph()\n try:\n if self.file_type is None:\n self.graph.parse(\n self.location, format=self.location.split(\".\")[-1]\n )\n else:\n self.graph.parse(self.location, format=self.file_type)\n except Exception:\n self.graph.parse(self.location)\n\n for (s, p, o) in self.graph:\n if p not in self.label_predicates:\n s_v = Vertex(str(s))\n o_v = Vertex(str(o))\n p_v = Vertex(str(p), predicate=True, vprev=s_v, vnext=o_v)\n self.add_vertex(s_v)\n self.add_vertex(p_v)\n self.add_vertex(o_v)\n self.add_edge(s_v, p_v)\n self.add_edge(p_v, o_v)"
] | [
"0.5381786",
"0.5348522",
"0.5348522",
"0.5151396",
"0.5106103",
"0.5066471",
"0.50565517",
"0.5037974",
"0.5034386",
"0.5010468",
"0.5004394",
"0.49670166",
"0.49628448",
"0.49199966",
"0.49115968",
"0.4866254",
"0.48558313",
"0.48332718",
"0.481736",
"0.4799835",
"0.47888243",
"0.4787947",
"0.47855544",
"0.4776181",
"0.47723836",
"0.4754657",
"0.47333592",
"0.4701616",
"0.46982276",
"0.46953094"
] | 0.7462509 | 0 |
Writes a rewrite tracker file. | def WriteRewriteTrackerFile(tracker_file_name, rewrite_params_hash,
rewrite_token):
_WriteTrackerFile(tracker_file_name, '%s\n%s\n' % (rewrite_params_hash,
rewrite_token)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write(self, outfile, rebasings=None):\r\n raise NotImplementedError()",
"def _WriteTrackerFile(tracker_file_name, data):\n try:\n with os.fdopen(os.open(tracker_file_name,\n os.O_WRONLY | os.O_CREAT, 0600), 'w') as tf:\n tf.write(data)\n return False\n except (IOError, OSError) as e:\n raise RaiseUnwritableTrackerFileException(tracker_file_name, e.strerror)",
"def write(self, filename):\n pass",
"def write(self, filename):\n pass",
"def write_to_file(self, filename: str) -> None:",
"def write(self, fname):\n pass",
"def writefile():\n\n print(\"Writing to file...\")\n\n # Open the heartbeat file in append mode and save the current time.\n with open(settings.ROOT_DIR + \"/heartbeat\", \"a\") as f:\n f.write(str(time()))",
"def saveStatsFile(self):\n if not os.path.exists(\"stats\"):\n os.mkdir(\"stats\")\n now = datetime.datetime.now()\n parts = [now.year, now.month, now.day]\n parts = [\"%02d\"%x for x in parts]\n todaysFileName = \"-\".join(parts)+\".txt\" \n timeStamp = time.strftime(\"%y%m%d%H%M\", time.localtime())\n log = \",\".join(self.logLinesStats)\n fname = \"stats/\"+todaysFileName\n with open(fname, 'a') as f:\n f.write(timeStamp+\",\"+log+\"\\n\")\n self.log(\"wrote \"+fname)",
"def writeOutServers(self):\n # we'll be writing out to the server tracker file, overwriting\n # anything that may exist in it\n global server_tracker_file\n \n with open(server_tracker_file, 'wb') as outfile:\n # let's leverage the printStatus method we have\n outfile.write(self.printStatus())\n \n return",
"def filewrite(self, filename):\n io.write(self, filename)",
"def write_file(self):\n if self._write_file == None:\n return\n\n try:\n out = file(self._write_file, \"w\")\n except IOError, e:\n print e\n sys.exit(1)\n out.writelines(\"A cases\") \n out.close()",
"def rewrite_all_file(self, data):\r\n with open(self.file_name, 'w', encoding='utf-8') as self.file:\r\n self.file.write(data)",
"def write(self, filename): # real signature unknown; restored from __doc__\n pass",
"def saveLogFile(self, fname = \"data/status.txt\"):\n with open(fname, 'w') as f:\n f.write(\"<br>\\n\".join(self.logLines))\n self.log(\"wrote \"+fname)",
"def write_to_file(self, file, content):\n with open(file, 'a') as report_file:\n report_file.write('{}\\n'.format(content))",
"def write_to_file(self, filepath, mode = \"a\"): \n if \"r\" in mode: \n print(\"Only accepts write and append modes\")\n return \n with open(filepath, mode) as f: \n f.write(\"{}\\n\".format(self.title))\n verified, seen, ratio = self.get_verified_ratio()\n f.write(\"Verified Names: {}\\n\".format(str(verified)))\n f.write(\"Names: {}\\n\".format(str(seen)))\n f.write(\"Ratio: {}\\n\".format(str(ratio)))",
"def write(self, file):\n pos = file.tell()\n pickle.dump((self.index, self.meta, self.info), file)\n file.seek(0)\n\n # update the header with the position of the content index.\n file.write(struct.pack('<Q', pos))",
"def _file_writer(self, lines, filename):\n if self.MockRun:\n return\n\n if self.Verbose:\n print \"Writing file %s\" % filename\n\n updated_file = open(filename, 'w')\n updated_file.write(''.join(lines))\n updated_file.close()",
"def _write_cache_file(self, data):\n\n with open(self.cache_file, mode='wb') as f:\n f.write(data)\n\n self.log.info(f\"Cached facilities at {self.cache_file}\")",
"def write_to_file(info: List[str]) -> None:\n return",
"def _save_tracker_uri_to_file(self):\r\n if not self.tracker_file_name:\r\n return\r\n f = None\r\n try:\r\n f = open(self.tracker_file_name, 'w')\r\n f.write(self.tracker_uri)\r\n except IOError, e:\r\n raise ResumableUploadException(\r\n 'Couldn\\'t write URI tracker file (%s): %s.\\nThis can happen'\r\n 'if you\\'re using an incorrectly configured upload tool\\n'\r\n '(e.g., gsutil configured to save tracker files to an '\r\n 'unwritable directory)' %\r\n (self.tracker_file_name, e.strerror),\r\n ResumableTransferDisposition.ABORT)\r\n finally:\r\n if f:\r\n f.close()",
"def __write_measurement(self, measurement):\n with self.__filename.open(mode='a') as history_file:\n history_file.write(measurement + '\\n')",
"def file_write(stuff, file_path):\n with open(file_path, \"wt\") as fo:\n fo.write(stuff)",
"def write_sitemap ( self ):\n try:\n self.output_fd = open ( file=dflt_cfg.DFLT_CFG[ OUTPUT_PATH ], mode='w' )\n self.print_url_links ( self.root )\n except (PermissionError, AttributeError) as err:\n self.logger.error ( \"Error {0} occurred. Output file {1} cannot be created\".format ( err, \\\n dflt_cfg.DFLT_CFG[\n OUTPUT_PATH ] ) )\n except Exception as err:\n self.logger.error ( \"Error {0} occurred while writing sitemap in output file: {1}\".format ( err, \\\n dflt_cfg.DFLT_CFG[ OUTPUT_PATH ] ) )\n self.output_fd.close ( )\n else:\n print(\"Sitemap for {} is written in {}.\".format(dflt_cfg.DFLT_CFG[DOMAIN], dflt_cfg.DFLT_CFG[ OUTPUT_PATH ]))\n print( \"Logs (Broken or dead URLs along with application logs) for domain {0} are available in {1} directory.\".format ( dflt_cfg.DFLT_CFG[DOMAIN], \"./logs\" ) )\n self.output_fd.close ( )",
"def write(self):\n # # Sometimes file is not written properly. So delete and rewrite it\n # os.system('rm {}'.format(snip_dir + '/' + self.name))\n # if 'NUM_TIME_STEPS' not in self.define.keys():\n # warnings.warn('NUM_TIME_STEPS missing in header. Execution may hang!')\n with open(snip_dir + '/' + self.name, 'w') as f:\n f.write('/* Temporary generated file for snip process definitions before compilation */\\n')\n f.write(self.__str__())\n\n # os.system('ls {}'.format(snip_dir + '/' + self.name))",
"def writeShiftFile(self, filename=\"shifts.txt\"):\n lines = ['# frame: ', self['frame'], '\\n',\n '# refimage: ', self['refimage'], '\\n',\n '# form: ', self['form'], '\\n',\n '# units: ', self['units'], '\\n']\n\n for o in self['order']:\n ss = \" \"\n for shift in self[o]:\n ss += str(shift) + \" \"\n line = str(o) + ss + \"\\n\"\n lines.append(line)\n\n fshifts= open(filename, 'w')\n fshifts.writelines(lines)\n fshifts.close()",
"def write_file(self):\n rl_df, lift_df = self.create_df()\n\n number = re.findall('\\d+', self.url)[0]\n\n if self.write is True:\n with open('house_{}.csv'.format(number), 'w',\n encoding='utf-8-sig') as file:\n rl_df.to_csv(file, sep=';')\n with open('house_lifts_{}.csv'.format(number), 'w',\n encoding='utf-8-sig') as file2:\n lift_df.to_csv(file2, sep=';')",
"def writeToFile(self, basedir, write_code=0):",
"def write_file(self):\n\n running_time = str(self.running_time_end - self.running_time_start)\n rounded_running_time = '{:.10}'.format(running_time)\n output = 'path_to_goal: ' + str(self.path_to_goal) + '\\n'\n output += 'cost_of_path: ' + str(self.cost_of_path) + '\\n'\n output += 'nodes_expanded: ' + str(self.nodes_expanded) + '\\n'\n output += 'fringe_size: ' + str(self.fringe_size) + '\\n'\n output += 'max_fringe_size: ' + str(self.max_fringe_size) + '\\n'\n output += 'search_depth: ' + str(self.search_depth) + '\\n'\n output += 'max_search_depth: ' + str(self.max_search_depth) + '\\n'\n output += 'running_time: ' + rounded_running_time + '\\n'\n\n system_name = system()\n if system_name == 'Windows':\n output += 'max_ram_usage: (Not available on Windows OS)'\n elif system_name == 'Linux':\n output += 'max_ram_usage: ' + \\\n str(getrusage(RUSAGE_SELF).ru_maxrss / 1024) + '\\n'\n\n file = open('output.txt', 'w+')\n file.write(output)\n print(output)",
"def write(self, args, file_dat):\n assert self.checker_(file_dat)\n file_path = self.path(args)\n file_str = self.writer_(file_dat)\n autofile.write_file(file_path, file_str)"
] | [
"0.6788737",
"0.63134295",
"0.62709975",
"0.62709975",
"0.6243932",
"0.6146459",
"0.6125916",
"0.5933308",
"0.5903819",
"0.588318",
"0.5856907",
"0.5842018",
"0.579014",
"0.577316",
"0.57695144",
"0.57399994",
"0.5729899",
"0.5718736",
"0.56982774",
"0.5697997",
"0.568461",
"0.56658494",
"0.56544864",
"0.56486976",
"0.56408924",
"0.5631406",
"0.55907416",
"0.55608326",
"0.5541611",
"0.5525625"
] | 0.7979988 | 0 |
Checks for a download tracker file and creates one if it does not exist. | def ReadOrCreateDownloadTrackerFile(src_obj_metadata, dst_url,
api_selector):
if src_obj_metadata.size < ResumableThreshold():
# Don't create a tracker file for a small downloads; cross-process resumes
# won't work, but restarting a small download is inexpensive.
return False
assert src_obj_metadata.etag
tracker_file_name = GetTrackerFilePath(
dst_url, TrackerFileType.DOWNLOAD, api_selector)
tracker_file = None
# Check to see if we already have a matching tracker file.
try:
tracker_file = open(tracker_file_name, 'r')
etag_value = tracker_file.readline().rstrip('\n')
if etag_value == src_obj_metadata.etag:
return True
except IOError as e:
# Ignore non-existent file (happens first time a download
# is attempted on an object), but warn user for other errors.
if e.errno != errno.ENOENT:
print('Couldn\'t read URL tracker file (%s): %s. Restarting '
'download from scratch.' %
(tracker_file_name, e.strerror))
finally:
if tracker_file:
tracker_file.close()
# Otherwise, create a new tracker file and start from scratch.
_WriteTrackerFile(tracker_file_name, '%s\n' % src_obj_metadata.etag) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download_agent_if_missing(filename):\n if file_missing(filename):\n print filename+'is missing, downloading it first'\n download(filename)",
"def download_if_needed(url, filename):\n if os.path.exists(filename):\n print \"already exists\"\n else:\n wget.download(url)",
"def CreateTrackerDirIfNeeded():\n tracker_dir = config.get(\n 'GSUtil', 'resumable_tracker_dir',\n os.path.join(GetGsutilStateDir(), 'tracker-files'))\n CreateDirIfNeeded(tracker_dir)\n return tracker_dir",
"def download_if_not_exists(filename, url):\n if not os.path.exists(filename):\n download_file(filename, url)\n return True\n return False",
"def download_if_not_exists(filename, url):\n if not os.path.exists(filename):\n download_file(filename, url)\n return True\n return False",
"def _download_if_needed(file_path, url, show_progress):\n if file_path.exists() and not file_path.is_file():\n raise NotAFileError(file_path)\n elif not file_path.exists():\n get_logger().info('Downloading %s ...', file_path)\n reporthook = None\n if show_progress:\n reporthook = _UrlRetrieveReportHook()\n urllib.request.urlretrieve(url, str(file_path), reporthook=reporthook)\n if show_progress:\n print()\n else:\n get_logger().info('%s already exists. Skipping download.', file_path)",
"def _maybe_download(self, filename, work_directory):\n if not os.path.exists(work_directory):\n os.mkdir(work_directory)\n filepath = os.path.join(work_directory, filename)\n if not os.path.exists(filepath):\n filepath, _ = urllib.urlretrieve(self.url + filename, filepath)\n statinfo = os.stat(filepath)\n log.info('Successfully downloaded', filename, statinfo.st_size,\n 'bytes.')\n return filepath",
"def _check_url_file (url, path_download, outfile) :\n if \"http://\" in url.lower () :\n dest = outfile if outfile != None else _get_file_url (url, path_download)\n down = False\n nyet = dest + \".notyet\"\n \n if os.path.exists (dest) and not os.path.exists (nyet) :\n try :\n fLOG(\"trying to connect\", url)\n f1 = urllib.urlopen (url)\n down = _first_more_recent (f1, dest)\n newdate = down\n f1.close ()\n except IOError :\n fLOG(\"unable to connect Internet, working offline for url\", url)\n down = False\n else : \n down = True\n newdate = False\n \n if down :\n if newdate : fLOG (\" downloading (updated) \", url)\n else : fLOG (\" downloading \", url)\n \n if len (url) > 4 and url [-4].lower () in [\".txt\", \".csv\", \".tsv\", \".log\"] :\n fLOG (\"creating text file \", dest)\n format = \"w\"\n else : \n fLOG (\"creating binary file \", dest)\n format = \"wb\"\n \n if os.path.exists (nyet) :\n size = os.stat (dest).st_size\n fLOG (\"resume downloading (stop at\", size, \") from \", url)\n request = urllib.request.Request(url) \n request.add_header(\"Range\", \"bytes=%d-\" % size)\n fu = urllib.request.urlopen (request) \n f = open (dest, format.replace (\"w\", \"a\"))\n else :\n fLOG (\"downloading \", url)\n request = urllib.request.Request(url) \n fu = urllib.request.urlopen (url)\n f = open (dest, format)\n \n open (nyet, \"w\").close ()\n c = fu.read (2**21)\n size = 0\n while len (c) > 0 :\n size += len (c)\n fLOG(\" size\", size)\n f.write (c)\n f.flush ()\n c = fu.read (2**21)\n fLOG (\"end downloading\")\n f.close ()\n fu.close ()\n os.remove (nyet)\n \n url = dest\n return url",
"def download_if_not_exist(self):\n for (fname, furl) in cornell_file_urls:\n # dir_path = os.path.dirname(os.path.realpath(__file__))\n input_folder = '{input_dir}/cornell'.format(input_dir=self.input_dir)\n full_dirname = input_folder\n full_fname = '/'.join([full_dirname, fname])\n if not file_exists(full_fname):\n remote_file = urlopen(furl)\n data = remote_file.read()\n remote_file.close()\n # Try creating the dir\n try_create_dir(full_dirname)\n print('download if not exist fname:', fname, 'url:', furl)\n # Write the file\n with open(full_fname, 'wb') as f:\n f.write(data)",
"def _check_file_exists_helper(self, report_path, filename):\n\n if not check_data_exists(report_path, [filename]):\n raise AssertionError(\n \"{} does not exist in location {}\".format(\n filename, report_path\n )\n )",
"def download_track(self, track = None, url = None):\n # check that track doesn't exist\n if url == None or track == None:\n return\n\n print \"Retrieving the name of the track.\"\n filename = self.get_track_filename(url)\n\n print \"Filename found: \" + filename\n \n if (filename, track.user[\"username\"]) in self.past_songs_db_data or \\\n (filename, \"\") in self.past_songs_db_data or \\\n os.path.isfile(filename): \n print \"File exists\"\n else:\n print \"Downloading\"\n filename = wget.download(url)\n self.set_track_metadata(track, filename, url)\n mp3_name = filename[:-4] + \".mp3\"\n\n # Save filename for future reference\n self.past_songs_db.write(filename + \"\\n\")\n self.past_songs_db_data.append((filename, track.user[\"username\"]))\n \n if not filename.endswith(\".mp3\"):\n self.past_songs_db.write(mp3_name + \"\\n\")\n self.past_songs_db_data.append((mp3_name, track.user[\"username\"]))\n \n print",
"def _save_tracker_uri_to_file(self):\r\n if not self.tracker_file_name:\r\n return\r\n f = None\r\n try:\r\n f = open(self.tracker_file_name, 'w')\r\n f.write(self.tracker_uri)\r\n except IOError, e:\r\n raise ResumableUploadException(\r\n 'Couldn\\'t write URI tracker file (%s): %s.\\nThis can happen'\r\n 'if you\\'re using an incorrectly configured upload tool\\n'\r\n '(e.g., gsutil configured to save tracker files to an '\r\n 'unwritable directory)' %\r\n (self.tracker_file_name, e.strerror),\r\n ResumableTransferDisposition.ABORT)\r\n finally:\r\n if f:\r\n f.close()",
"def check_exists(self, name):\n if self.pyload.config.get(\"download\", \"skip_existing\"):\n download_folder = self.pyload.config.get(\n 'general', 'download_folder')\n dest_file = fsjoin(download_folder,\n self.pyfile.package().folder if self.pyload.config.get(\n \"general\", \"folder_per_package\") else \"\",\n name)\n if exists(dest_file):\n self.pyfile.name = name\n self.skip(_(\"File exists.\"))",
"def test_DL_export_existing_file(self):\n filepath = '3.txt'\n existing_file = open(filepath, 'x')\n existing_file.write(\"This file is existing.\")\n existing_file.close()\n dl = flow_processing_input.DetectorsLocation(2021)\n dl.detectors_location_dict = createDLDataset(5).dataset\n # Check if warning was raised for existing file\n with warnings.catch_warnings(record=True) as w:\n dl.export_to_file(filepath)\n self.assertTrue(len(w) == 1)\n os.remove(filepath)",
"def maybe_download(directory, filename, url):\n if not os.path.exists(directory):\n print(\"Creating directory %s\" % directory)\n os.mkdir(directory)\n filepath = os.path.join(directory, filename)\n if not os.path.exists(filepath):\n print(\"Downloading %s to %s\" % (url, filepath))\n filepath, _ = urllib.request.urlretrieve(url, filepath)\n statinfo = os.stat(filepath)\n print(\"Succesfully downloaded\", filename, statinfo.st_size, \"bytes\")\n return filepath",
"def ensure_file_exists_and_open(filename):\n if not os.path.exists(filename):\n return open(filename, 'w')\n else:\n return open(filename, 'a')",
"def _ensure_attachment_exists(target):\n if target is not None:\n target = Path(target)\n if not target.exists():\n msg = f'COMMUNICATOR WARNING: The file specified for attachment to email does not exist'\n fancy_print(msg, fg=COMMUNICATOR_WARN_COLOR)\n return False\n return True",
"def maybe_download(url, dest):\n if not os.path.exists(dest):\n logger.info('Downloading %s to %s', url, dest)\n download(url, dest)",
"def maybe_download(url, dest):\n if not os.path.exists(dest):\n logger.info('Downloading %s to %s', url, dest)\n download(url, dest)",
"def get_gtfs(agency, fetch):\n if not fetch.get('filename') or not fetch.get('file_url'):\n print \"Feed reference incomplete!:\", fetch\n return\n makedirs(agency)\n filename = os.path.join(agency, fetch['filename'])\n if os.path.exists(filename) and os.stat(filename).st_size == fetch['size']:\n print \"Existing, skipping:\", fetch['file_url']\n else:\n print \"Downloading:\", fetch['file_url']\n urllib.urlretrieve(fetch['file_url'], filename)\n print \"Done\"",
"def maybe_download(directory, filename, url):\n if not os.path.exists(directory):\n print(\"Creating directory %s\" % directory)\n os.mkdir(directory)\n filepath = os.path.join(directory, filename)\n if not os.path.exists(filepath):\n print(\"Downloading %s to %s\" % (url, filepath))\n filepath, _ = urllib.request.urlretrieve(url, filepath)\n statinfo = os.stat(filepath)\n print(\"Successfully downloaded\", filename, statinfo.st_size, \"bytes\")\n return filepath",
"def maybe_download(directory, filename, url):\n if not os.path.exists(directory):\n print(\"Creating directory %s\" % directory)\n os.mkdir(directory)\n filepath = os.path.join(directory, filename)\n if not os.path.exists(filepath):\n print(\"Downloading %s to %s\" % (url, filepath))\n filepath, _ = urllib.request.urlretrieve(url, filepath)\n statinfo = os.stat(filepath)\n print(\"Succesfully downloaded\", filename, statinfo.st_size, \"bytes\")\n return filepath",
"def download_track(trackpath):\n\n if trackpath.startswith('gs://'):\n localpath = path.basename(trackpath)\n if not any([path.exists(localpath), path.isfile(localpath)]):\n subprocess.run(['gsutil', '-m', 'cp', trackpath, './'], check=True)\n\n elif any(trackpath.startswith(prefix) for prefix in 'http:// https:// ftp://'.split()):\n localpath = path.basename(trackpath)\n if not any([path.exists(localpath), path.isfile(localpath)]):\n subprocess.run(['wget', '--no-check-certificate', trackpath], check=True)\n\n else:\n localpath = trackpath\n\n # Correct for URL convention of replacing spaces with \"%20\"\n localpath = localpath.replace('%20', ' ')\n\n return localpath",
"def test_load_missing_file(self):\n # Technically there's a race condition here, but... I'm not\n # particularly fussed about it.\n\n filename = '/%s' % (uuid.uuid4())\n while os.path.exists(filename): # pragma: no cover\n filename = '/%s' % (uuid.uuid4())\n\n with self.assertRaises(Exception):\n track = Track.from_filename(filename)",
"def download_if_stale(filepath, fileurl):\n if not os.path.exists(filepath) or needs_refreshing(filepath):\n try:\n urllib.request.urlretrieve(fileurl, filepath)\n except urllib.error.HTTPError:\n print('The {0} is not reachable'.format(fileurl))",
"def _maybe_download(self, url):\n filename = os.path.basename(url)\n download_path = os.path.join(self._model_dir, filename)\n if os.path.exists(download_path):\n return download_path\n\n def _progress(count, block_size, total_size):\n sys.stdout.write(\n '\\r>> Downloading %s %.1f%%' %\n (filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n urllib.request.urlretrieve(url, download_path, _progress)\n statinfo = os.stat(download_path)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n return download_path",
"def maybe_download(filename, work_directory):\n if not os.path.exists(work_directory):\n os.mkdir(work_directory)\n filepath = os.path.join(work_directory, filename)\n if not os.path.exists(filepath):\n filepath, _ = urlretrieve(SOURCE_URL + filename, filepath)\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n return filepath",
"def maybe_download(filename):\n if not tf.gfile.Exists(WORK_DIRECTORY):\n tf.gfile.MakeDirs(WORK_DIRECTORY)\n filepath = os.path.join(WORK_DIRECTORY, filename)\n if not tf.gfile.Exists(filepath):\n filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)\n with tf.gfile.GFile(filepath) as f:\n size = f.size()\n print('Successfully downloaded', filename, size, 'bytes.')\n return filepath",
"def maybe_download(filename):\n if not tf.gfile.Exists(WORK_DIRECTORY):\n tf.gfile.MakeDirs(WORK_DIRECTORY)\n filepath = os.path.join(WORK_DIRECTORY, filename)\n if not tf.gfile.Exists(filepath):\n filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)\n with tf.gfile.GFile(filepath) as f:\n size = f.size()\n print('Successfully downloaded', filename, size, 'bytes.')\n return filepath",
"def maybe_download(filename):\n\n if not tf.gfile.Exists(WORK_DIRECTORY):\n tf.gfile.MakeDirs(WORK_DIRECTORY)\n filepath = os.path.join(WORK_DIRECTORY, filename)\n if not tf.gfile.Exists(filepath):\n filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)\n with tf.gfile.GFile(filepath) as f:\n size = f.Size()\n print('Successfully downloaded', filename, size, 'bytes.')\n return filepath"
] | [
"0.6273756",
"0.60826844",
"0.597988",
"0.59786946",
"0.59786946",
"0.59084964",
"0.577573",
"0.5752773",
"0.5725254",
"0.56218076",
"0.5591753",
"0.55877703",
"0.55799156",
"0.55631",
"0.55320764",
"0.5526527",
"0.5502081",
"0.5462428",
"0.5462428",
"0.5461796",
"0.54582834",
"0.54351085",
"0.5433621",
"0.5406951",
"0.5375907",
"0.5353336",
"0.53446496",
"0.53425723",
"0.53425723",
"0.5335696"
] | 0.69397694 | 0 |
appends line to file if not present | def add_line_to_file(line, filepath):
filepath = os.path.realpath(filepath)
if not os.path.isdir(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
found = False
if os.path.isfile(filepath):
with open(filepath, 'r+') as myfile:
lst = myfile.readlines()
for existingline in lst:
if line in existingline:
print("line already present")
found = True
if not found:
myfile = open(filepath, 'a+')
myfile.write(line+"\n")
myfile.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_line(self, line):\n if self.status:\n with open(self.file_out_name,'a') as fout:\n fout.write(line)",
"def append_line(self, file_path, line):\n try:\n with open(file_path, \"a\") as file:\n file.write(line + \"\\n\")\n return True\n except FileNotFoundError:\n logging.error(\n \"Could not write {}, maybe the parent directory is missing.\".format(\n file_path\n )\n )\n return False\n except PermissionError:\n logging.error(\n \"Could not write {}, you don't have sufficient permissions to write here.\".format(\n file_path\n )\n )\n return False",
"def append_line_to_log(line = '\\n'):\n with open(logPath, 'a') as f:\n f.write(line + '\\n')",
"def new_write_line(self, line):\n if self.status:\n with open(self.file_out_name,'w') as fout:\n fout.write(line)",
"def _write_line(self, line):\n self._file.write(line + '\\n')",
"def line_prepender(file_path: str, line: str) -> None:\n with open(file_path, 'r+') as f:\n content = f.read()\n f.seek(0, 0)\n f.write(line.rstrip('\\r\\n') + '\\n' + content)",
"def write_to_file(self, line):\r\n self.file.write(line)\r\n self.file.write(NEW_LINE)",
"def append_new_line(file_name, text_to_append):\n # Open the file in append & read mode ('a+')\n with open(file_name, \"a+\") as file_object:\n # Move read cursor to the start of file.\n file_object.seek(0)\n # If file is not empty then append '\\n'\n data = file_object.read(100)\n if len(data) > 0:\n file_object.write(\"\\n\")\n # Append text at the end of file\n file_object.write(text_to_append)",
"def append_file(file_path, contents):\n logger.debug(f'append to file:{file_path}')\n with open(file_path, 'r+') as outfile:\n for line in outfile:\n if contents.strip() in line:\n break\n else:\n outfile.write(contents)",
"def append(self, line):\n self.buffer.append(line)",
"def add_to_output(line: str) -> None:\n with open(OUTPUT, \"a+\") as problem_output:\n problem_output.write(line + \"\\n\")",
"def prepend_line(file_name, line):\n # define name of temporary dummy file\n dummy_file = file_name + '.bak'\n # open original file in read mode and dummy file in write mode\n with open(file_name, 'r') as read_obj, open(dummy_file, 'w') as write_obj:\n # Write given line to the dummy file\n write_obj.write(line + '\\n')\n # Read lines from original file one by one and append them to the dummy file\n for line in read_obj:\n write_obj.write(line)\n # remove original file\n os.remove(file_name)\n # Rename dummy file as the original file\n os.rename(dummy_file, file_name)",
"def file_append(filepath,contents):\n with open(filepath, 'a') as f:\n f.write(contents+'\\n')",
"def write_file(data):\n with open(FILE_NAME, 'w') as f:\n if not (data in read_file()):\n f.write(data)",
"def make_append_file(fpath):\n\n if fpath is None:\n return None\n\n file(fpath, 'w')\n return file(fpath, 'a')",
"def add_import_line(self, line: str) -> None:\n if line not in self._import_lines:\n self._import_lines.append(line)",
"def write_file(a_file, lines):\r\n return append_file(a_file, lines, append=False)",
"def write_file(a_file, lines):\r\n return append_file(a_file, lines, append=False)",
"def append_write(filename=\"\", text=\"\"):\n with open(filename, \"a\") as file:\n return file.write(text)",
"def append_write(filename=\"\", text=\"\"):\n with open(filename, 'a') as f:\n return f.write(text)",
"def append_write(filename=\"\", text=\"\"):\n with open(filename, mode=\"a\") as file:\n return file.write(text)",
"def append_write(filename=\"\", text=\"\"):\n with open(filename, mode=\"a+\") as f:\n return f.write(text)",
"def __appendToFile(self, st):\n fh = open(self.__fileName, \"a\")\n line = st.get_id_student() + \" \" + st.get_nume_student()\n fh.write(\"\\n\")\n fh.write(line)\n fh.close()",
"def append_end(self, data):\r\n with open(self.file_name, 'a', encoding='utf-8') as self.file:\r\n self.file.write(data)",
"def append(self, file):\n\t\tself.stream.next.append_file(file)",
"def Prepend(filepath, text):\n file_data = text\n if os.path.exists(filepath):\n file_data += open(filepath).read()\n f = open(filepath, 'w')\n f.write(file_data)\n f.close()",
"def append_write(filename=\"\", text=\"\"):\n with open(filename, \"a\", encoding=\"utf-8\") as file1:\n return file1.write(text)",
"def write(self, line):\r\n\r\n self.buffer.append(line)\r\n if (len(self.buffer) > self.buf_size):\r\n self._flush()",
"def append_write(filename=\"\", text=\"\"):\n with open(filename, \"a\", encoding=\"utf-8\") as content:\n return content.write(text)",
"def append_file(a_file, lines, append=True):\r\n try_times = 1\r\n a_file = os.path.abspath(a_file)\r\n while True:\r\n try:\r\n if append:\r\n a_ob = open(a_file, \"a\")\r\n else:\r\n a_ob = open(a_file, \"w\")\r\n if type(lines) is str:\r\n print(lines, file=a_ob)\r\n else:\r\n for item in lines:\r\n print(item, file=a_ob)\r\n a_ob.flush()\r\n a_ob.close()\r\n break\r\n except IOError:\r\n try_times += 1\r\n if try_times > 10:\r\n say_it(\"-- Warning: can not open %s\" % a_file)\r\n return 1\r\n time.sleep(5)\r\n say_it(\"-- Note: try to open %s %d times.\" % (a_file, try_times))"
] | [
"0.704986",
"0.6987986",
"0.6951259",
"0.6841392",
"0.673778",
"0.67046696",
"0.6702362",
"0.65841836",
"0.6418238",
"0.6222445",
"0.61815375",
"0.6160046",
"0.61536825",
"0.6147683",
"0.6115943",
"0.60670424",
"0.60089946",
"0.60089946",
"0.59469897",
"0.594275",
"0.5923019",
"0.5864438",
"0.5856349",
"0.5853752",
"0.5842404",
"0.5825336",
"0.5822935",
"0.58147603",
"0.581226",
"0.58115107"
] | 0.7030425 | 1 |
Return gold labels as a list. | def gold(self):
return self.labels | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_labels(self):\n return []",
"def get_labels(self):\n return [token.label for token in self.tokens]",
"def get_labels(self) -> List[str]:\n return self.labels",
"def labels(self) -> list:\n return self._labels",
"def get_labels() -> list[Label]:\n\n labels_file = deepcopy(get_data(\"labels.yml\"))\n standard_labels = []\n for group_info in labels_file[\"groups\"]:\n labels = group_info.pop(\"labels\", [])\n group = LabelGroup(**group_info)\n for label_info in labels:\n label = Label(**label_info, group=group)\n standard_labels.append(label)\n for label_info in labels_file[\"standalone\"]:\n label = Label(**label_info)\n standard_labels.append(label)\n return standard_labels",
"def get_labels(self) -> List[str]:\n raise NotImplementedError()",
"def return_histogram_labels():\n return list(labels)",
"def labels(self):\n return self._labels",
"def get_labels(self):\n\n labels = list(self.meta_data[self.target_column])\n\n return labels",
"def get_labels(self):\n return self.labels",
"def labels(self) -> List[str]:\n\n return list(self.t0.keys())",
"def get_labels(self):\n return self.labels[1:]",
"def labels(self):\n return self._labels",
"def labels(self):\n return self._labels",
"def labels(self):\n return self._labels",
"def labels(self):\n return self._labels",
"def labels(self):\n return self._labels",
"def labels(self):\n return self._labels",
"def get_labels(self):\n resp = self._client.scan(TableName=self.LABELS_TABLE)\n return [self._item_to_label(item) for item in resp['Items']]",
"def get_labels(self) -> Set[str]:",
"def labels_all(self):\n return self._labels_all",
"def get_all_labels(self):\n labels = self.wls_board.get_labels\n return labels",
"def labels(self):\n return self._get_labels(self.label_vector)",
"def output_labels(self):\n return list(self._output_labels)",
"def output_labels(self):\n return list(self._output_labels)",
"def getLabels(self) -> List[str]:\n\n results = self.service.users().labels().list(userId='me').execute()\n labels = results.get('labels', [])\n\n return labels",
"def labels(self) -> list[\"Label\"]:\n _args: list[Arg] = []\n _ctx = self._select(\"labels\", _args)\n _ctx = Label(_ctx)._select_multiple(\n _name=\"name\",\n _value=\"value\",\n )\n return _ctx.execute_sync(list[Label])",
"def list_labels(self):\n # Create empty list\n label_names = []\n \n # For every name in training directory\n for name in os.listdir(self.train_data):\n # If it does not start with . (which hidden files do)\n if not name.startswith('.'):\n label_names.append(name)\n \n return label_names",
"def getLabels(self):\n labels = np.empty(self.numSites, dtype=np.intc)\n _cgco.gcoGetLabels(self.handle, labels)\n return labels",
"def get_labels_decomposed(self) -> List[List[str]]:\n return [list(label) for label in self.labels]"
] | [
"0.73611605",
"0.7302142",
"0.72801894",
"0.713625",
"0.7063405",
"0.7038624",
"0.70314384",
"0.69941795",
"0.6881636",
"0.68506217",
"0.6839455",
"0.6778665",
"0.6774362",
"0.6774362",
"0.6774362",
"0.6774362",
"0.6774362",
"0.6774362",
"0.6681522",
"0.6644164",
"0.66342205",
"0.6617143",
"0.6584692",
"0.6554749",
"0.6554749",
"0.65106076",
"0.650403",
"0.64726347",
"0.64705753",
"0.6446894"
] | 0.8026147 | 0 |
Randomly dropout tokens (IDs) and replace them with tokens. | def word_dropout(tokens, dropout):
return [constant.UNK_ID if x != constant.UNK_ID and np.random.random() < dropout \
else x for x in tokens] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def replace_tokens(self, tokens):\n details = []\n idx = 0\n if len(tokens) >= min_token_num:\n for i in range(len(tokens)):\n old_token = tokens[i]\n if old_token in self.vocab and self.get_random_prob() < self.token_prob:\n tokens[i] = self.get_delete_token()\n details.append((old_token, tokens[i], idx, idx + len(tokens[i])))\n idx += len(tokens[i])\n return tokens, details",
"def replace_tokens(self, tokens):\n details = []\n idx = 0\n if len(tokens) >= min_token_num:\n for i in range(len(tokens)):\n old_token = tokens[i]\n if old_token in self.vocab and self.get_random_prob() < self.token_prob:\n tokens[i] = self.get_insert_token(tokens[i])\n details.append((old_token, tokens[i], idx, idx + len(tokens[i])))\n idx += len(tokens[i])\n return tokens, details",
"def reset_token_list(self):\n self.token_list = list(self.vocab.keys())\n self.token_ptr = len(self.token_list) - 1\n np.random.shuffle(self.token_list)",
"def reset_token_list(self):\n self.token_list = list(self.vocab.keys())\n self.token_ptr = len(self.token_list) - 1\n np.random.shuffle(self.token_list)",
"def reset_token_list(self):\n self.token_list = list(self.vocab.keys())\n self.token_ptr = len(self.token_list) - 1\n np.random.shuffle(self.token_list)",
"def replace_tokens(self, tokens, replace_prob):\n details = []\n idx = 0\n for i in range(len(tokens)):\n old_token = tokens[i]\n if old_token in self.idf and self.get_random_prob() < replace_prob[i]:\n tokens[i] = self.get_replace_token(tokens[i])\n if tokens[i] != old_token:\n details.append((old_token, tokens[i], idx, idx + len(tokens[i])))\n idx += len(tokens[i])\n return tokens, details",
"def replace_tokens(self, tokens, replace_prob):\n details = []\n idx = 0\n for i in range(len(tokens)):\n old_token = tokens[i]\n if old_token in self.idf and self.get_random_prob() < replace_prob[i]:\n # Use Tfidf find similar token\n tokens[i] = self.get_similar_token(tokens[i])\n if tokens[i] != old_token:\n details.append((old_token, tokens[i], idx, idx + len(tokens[i])))\n idx += len(tokens[i])\n return tokens, details",
"def get_replace_token(self, word):\n r_prob = np.random.rand()\n # Similar choose prob\n if r_prob < self.similar_prob:\n word = self.get_similar_token(word)\n elif r_prob - self.similar_prob < self.random_prob:\n word = self.get_random_token()\n elif r_prob - self.similar_prob - self.random_prob < self.delete_prob:\n word = self.get_delete_token()\n else:\n word = self.get_insert_token(word)\n return word",
"def regenerate(self):\n self.secret_code = random.randint(self.min, self.max)",
"def randomize_tokens(tokens, mask, tokenizer):\n targets = torch.ones_like(tokens) * -1\n\n # get random data\n p = torch.rand_like(tokens.float()) * mask.float()\n random_tokens = torch.randint_like(tokens, len(tokenizer.vocab))\n\n # set targets for masked tokens\n thresh = 0.85\n targets[p >= thresh] = tokens[p >= thresh]\n\n # progressively overwrite tokens while increasing the threshold\n\n # replace 80% with '[MASK]' token\n tokens[p >= thresh] = tokenizer.vocab[\"[MASK]\"]\n\n # replace 10% with a random word\n thresh = 0.85 + 0.15 * 0.8\n tokens[p >= thresh] = random_tokens[p >= thresh]\n\n # keep 10% unchanged\n thresh = 0.85 + 0.15 * 0.9\n tokens[p >= thresh] = targets[p >= thresh]\n\n return tokens, targets",
"def synonym_token_replace(tokens, ignored_tokens=stopwords.words('english'), excluded_token_regex=None,\n max_frequency=None, min_occurrences=None,\n # min_frequency=None, max_occurrences=None,\n # min_document_frequency=None, max_document_frequency=None,\n # min_document_occurrences=None, max_document_occurrences=None,\n num_candidates=25, replace_probability=0.5, tokens_to_replace=None):\n\n if not tokens_to_replace:\n # flatten tuple of tuples and get token dictionary\n token_dict, token_freq, n_tokens = vectorizers._vectorizers.construct_token_dictionary_and_frequency(\n vectorizers.utils.flatten(tokens))\n\n # prune token dictionary depending on parameters supplied by user\n # returns a dictionary of candidate tokens for replacement\n candidate_dict, candidate_freq = vectorizers._vectorizers.prune_token_dictionary(\n token_dict,\n token_freq,\n ignored_tokens=ignored_tokens,\n excluded_token_regex=excluded_token_regex,\n min_frequency=(min_occurrences / n_tokens),\n max_frequency=max_frequency,\n # min_occurrences=min_occurrences,\n # max_occurrences=max_occurrences,\n # min_document_frequency=min_document_frequency,\n # max_document_frequency=max_document_frequency,\n # min_document_occurrences=min_document_occurrences,\n # max_document_occurrences=max_document_occurrences,\n total_tokens=n_tokens,\n total_documents=len(tokens),\n )\n\n # take a random sample of tokens from the candidate dictionary\n tokens_to_replace = random.sample(list(candidate_dict.keys()), num_candidates)\n\n print(\"Tokens for replacement:\")\n print(tokens_to_replace)\n\n # normalize replacement_probability\n norm_prob = np.array([replace_probability, 1 - replace_probability]).reshape(1, -1)\n norm_prob = normalize(norm_prob, axis=1, norm='l1').flatten().tolist()\n\n new_doc_list = []\n for doc in tokens:\n new_doc = []\n for token in doc:\n if token not in tokens_to_replace:\n new_doc.append(token) # new_doc.append(f\"{token}_$$orig\")\n else:\n synonyms = []\n for idx, _ in enumerate(norm_prob):\n synonyms.append(f\"{token}_$${idx}\")\n synonym = np.random.choice(synonyms, p=norm_prob)\n # logging.info(\"replacing '{}' with '{}'\".format(token,synonym)) # print(synonym)\n new_doc.append(str(synonym))\n new_doc_list.append(new_doc)\n\n # change dataset back to tuple of tuples before returning\n new_doc_tuple = tuple(tuple(doc) for doc in new_doc_list)\n return tokens_to_replace, new_doc_tuple",
"def random_swap(sentence, distance=1):\n # lis = sent.split(' ') # split by spaces\n tokens = tokenize(sentence)\n tokens_length = len(tokens)\n assert tokens_length >= 2\n index1 = random.randint(0, tokens_length - 1)\n # canidates pool\n candidates = set(range(index1 - distance, index1 + distance + 1)) & set(range(tokens_length))\n candidates.remove(index1)\n # randomly sample another index\n index2 = random.sample(candidates, 1)[0]\n # swap two elements\n tokens[index1], tokens[index2] = tokens[index2], tokens[index1]\n # n_sen = ' '.join(lis)\n n_sentence = untokenize(tokens)\n # return new sentence\n return n_sentence",
"def token_drop(self, labels, force_drop_ids=None):\n if force_drop_ids is None:\n drop_ids = torch.rand(labels.shape[0], device=labels.device) < self.dropout_prob\n else:\n drop_ids = torch.tensor(force_drop_ids == 1)\n labels = torch.where(drop_ids, self.num_classes, labels)\n return labels",
"def random_mask_tokens(inputs, alphabet, mlm_probability=0.15):\n labels = inputs.clone()\n device = inputs.device\n # We sample a few tokens in each sequence for MLM training\n # (with probability `self.mlm_probability`)\n probability_matrix = torch.full(labels.shape, mlm_probability,\n device=device)\n special_tokens_mask = (inputs == alphabet.padding_idx)\n probability_matrix.masked_fill_(special_tokens_mask, value=0.0)\n masked_indices = torch.bernoulli(probability_matrix).bool()\n labels[~masked_indices] = -100 # We only compute loss on masked tokens\n\n # 80% of the time, we replace masked input tokens with mask_token ([MASK])\n indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8,\n device=device)).bool() & masked_indices\n inputs[indices_replaced] = alphabet.mask_idx\n\n # 10% of the time, we replace masked input tokens with random word\n indices_random = torch.bernoulli(torch.full(labels.shape, 0.5,\n device=device)).bool() & masked_indices & ~indices_replaced\n random_AAs = torch.randint(len(alphabet.prepend_toks),\n len(alphabet.standard_toks), labels.shape,\n dtype=torch.long, device=device)\n inputs[indices_random] = random_AAs[indices_random]\n\n # The rest of the time (10% of the time)\n # we keep the masked input tokens unchanged\n return inputs, labels, masked_indices",
"def test_token_replacement(eventgen_test_helper):\n events = eventgen_test_helper(\"eventgen_token_replacement.conf\").get_events()\n # assert the events size is 10 since end = 1\n assert len(events) == 10\n\n with open(os.path.join(base_dir, \"sample\", \"id.csv\"), \"rt\") as f:\n id_content = f.read()\n with open(os.path.join(base_dir, \"sample\", \"ip.csv\"), \"rt\") as f:\n ip_content = f.read()\n with open(os.path.join(base_dir, \"sample\", \"cp.csv\"), \"rt\") as f:\n cp_content = f.read()\n with open(os.path.join(base_dir, \"sample\", \"city.csv\"), \"rt\") as f:\n reader = csv.reader(f)\n country = []\n city = []\n latitude = []\n longitude = []\n for row in reader:\n country.append(row[0])\n city.append(row[1])\n latitude.append(row[3])\n longitude.append(row[4])\n\n integer_id_seed = 1\n for event in events:\n try:\n event_obj = json.loads(event)\n except ValueError:\n raise Exception(\"Token replacement error\")\n\n # assert replacementType = integerid\n assert int(event_obj[\"ppcustomdata\"][\"receiver_id\"]) == integer_id_seed\n integer_id_seed += 1\n\n # assert replacementType = file\n assert event_obj[\"id\"] in id_content\n assert event_obj[\"cp\"] in cp_content\n assert event_obj[\"message\"][\"cliIP\"] in ip_content\n\n # assert replacementType = static\n assert event_obj[\"netPerf\"][\"lastByte\"] == \"0\"\n\n # assert replacementType = random and replacement = integer[<start>:<end>]\n assert 5000 >= int(event_obj[\"message\"][\"bytes\"]) > 40\n\n # assert replacementType = random and replacement = float[<start>:<end>]\n assert 3.0 >= float(event_obj[\"netPerf\"][\"lastMileRTT\"]) >= -3.0\n\n # assert replacementType = random and replacement = ipv4 | ipv6 | mac\n ipv4_pattern = re.compile(r\"^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$\")\n ipv6_pattern = re.compile(r\"^([A-Fa-f0-9]{1,4}:){7}[A-Fa-f0-9]{1,4}$\")\n mac_pattern = re.compile(r\"^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$\")\n\n assert ipv4_pattern.match(event_obj[\"akadebug\"][\"Ak_IP\"]) is not None\n assert (\n ipv6_pattern.match(event_obj[\"akadebug\"][\"forward-origin-ip\"]) is not None\n )\n assert mac_pattern.match(event_obj[\"akadebug\"][\"end-user-ip\"]) is not None\n\n # assert replacementType = file | mvfile and replacement = <replacement file name>:<column number>\n assert event_obj[\"geo\"][\"city\"] in city\n assert event_obj[\"geo\"][\"country\"] in country\n assert event_obj[\"geo\"][\"lat\"] in latitude\n assert event_obj[\"geo\"][\"long\"] in longitude",
"def ScrambleMutation(item):\n item=copy.deepcopy(item)\n countryNo = len(item)\n [start,end] = sorted(random.sample(range(1,countryNo+1),2))\n shuffle_slice(item,start,end)\n return item",
"def __insert_random_synonym(self, tokenized_sentence: list) -> str:\n sentence_length = len(tokenized_sentence)\n # Initialize the return string\n new_sentence = \"\"\n # Some variables to keep track of changes and attempted changes\n has_changed = False\n attempts = 0\n # Keep trying to make a change until either:\n # 1) You've made a change, OR\n # 2) You've tried to make a change for half the words in the sentence with no success\n while has_changed is not True and attempts <= sentence_length/2:\n # Grab a random word from the tokenized sentence\n index_to_get_word_from = random.randint(0, sentence_length-1)\n pair_to_get_word_from = tokenized_sentence[index_to_get_word_from]\n # Get the list of synonyms based off of that (word, POS) pair from the tokenized sentence\n list_of_syns = nltk_methods.list_of_syns_from_pos_pair(pair_to_get_word_from)\n # ...but what if it's a word that doesn't have any synonyms matching the POS tag? \n if len(list_of_syns) < 1: \n # Failed synonym swap, so bump up the attempts tracker by one\n attempts += 1\n continue\n # Else, the word does have synonyms we can swap the word for\n else:\n # Randomly pick a word from the synonym list\n random_pick = random.randint(0, len(list_of_syns)-1)\n new_word = list_of_syns[random_pick]\n new_word_pair = (new_word, \"NA\") # \"NA\" is a dummy POS tag\n # Now randomly find a spot to put the new word\n index_to_place_new_word = random.randint(0, sentence_length-1)\n # Now update the tokenized sentence with the new word\n tokenized_sentence.insert(index_to_place_new_word, new_word_pair)\n sentence_length += 1\n # Pull the sentence back together\n new_sentence = nltk_methods.put_string_together_from_pos_tagged_list(tokenized_sentence)\n # Now let's clean up our brand new sentence really quickly\n new_sentence = nltk_methods.clean_sentence(new_sentence)\n # BUT WAIT, what if this is a duplicate? We don't want that!\n if new_sentence in self.return_augmented_sentences():\n # Bump up the attempts and skip this sentence\n attempts += 1\n continue\n # Update the flags\n has_changed = True\n return new_sentence",
"def _random_id(n):\n ''.join(choice(alphanums) for i in range(n))",
"def gen_random_fightID():\n pass",
"def random_word_wwm(tokens, ref_ids, tokenizer):\n output_label = []\n\n for i, (token, ref_id) in enumerate(zip(tokens, ref_ids)):\n\n prob = random.random()\n\n if ref_id == 1:\n\n # 80% randomly change token to mask token\n if prob < 0.8:\n tokens[i] = \"[MASK]\"\n\n # 10% randomly change token to random token\n elif prob < 0.9:\n tokens[i] = random.choice(list(tokenizer.vocab.items()))[0]\n\n # -> rest 10% randomly keep current token\n\n # append current token to output (we will predict these later)\n try:\n output_label.append(tokenizer.vocab[token])\n except KeyError:\n # For unknown words (should not occur with BPE vocab)\n output_label.append(tokenizer.vocab[\"[UNK]\"])\n else:\n # no masking token (will be ignored by loss function later)\n output_label.append(-100)\n\n return tokens, output_label",
"def generate_sample(seed_phrase=\" \",max_length=MAX_LENGTH): \n x_sequence = [token_to_id[token] for token in seed_phrase]\n s.run(tf.assign(h_t,h_t.initial_value))\n \n # Feed the seed phrase, if any.\n for ix in x_sequence[:-1]:\n s.run(tf.assign(h_t,next_h),{x_t:[ix]})\n \n # Generate.\n for _ in range(max_length-len(seed_phrase)):\n x_probs,_ = s.run([next_probs,tf.assign(h_t,next_h)],{x_t:[x_sequence[-1]]})\n x_sequence.append(np.random.choice(n_tokens,p=x_probs[0]))\n \n return \"\".join([tokens[ix] for ix in x_sequence])",
"def replace_tokens(the_string, sequence_switched, suggestion_list, indexer, tokenizer, model, suggestion_num):\n # Get top tokens\n top_tokens = get_top_tokens(sequence_switched, indexer, tokenizer, model, suggestion_num)\n\n iterator = 0\n for token in top_tokens:\n suggestion_list[iterator] += the_string.replace(tokenizer.mask_token, tokenizer.decode([token]))\n iterator += 1\n\n return suggestion_list",
"def generate_new_token(self):\n self.access_token = random_auth_key()",
"def degenerate2(s):\n from lasagna.utils import base_repr\n\n n = s.count('N')\n seed = hash(s) % (2**32 - 1)\n rng = random.Random(seed)\n random_base_ix = lambda: base_repr(rng.randint(0, 4**(n + 1) - 1), 4, n + 1)[::-1]\n while True:\n bases = ['ACTG'[int(j)] for j in random_base_ix()]\n s2 = s\n for b in bases:\n s2 = s2.replace('N', b, 1)\n yield s2",
"def skip_and_replace_phrases(text):\n\n # For each text in [], replace it with '' with probability 0.5.\n matches = re.findall('(\\[[ \\w]*\\])', text)\n for match in matches:\n if random.uniform(0, 1) > 0.5:\n text = text.replace(match, '')\n else:\n text = text.replace(match, match[1:-1])\n\n # Remove empty spaces, if any.\n text = re.sub(' +', ' ', text)\n # Search for synonyms, replace at uniformly random.\n text = text.lower()\n for key, values in gvars.METAINFO['synonym_keys']:\n if key in text:\n text = text.replace(key, random.choice(values))\n return text",
"def _clear_secret_token_map():\n global _secret_token_map\n _secret_token_map = None",
"def mask_tokens(self, sequence):\n n_tokens = len(sequence)\n n_masked_tokens = int(self.masking_proportion*n_tokens/100)\n indexes = [random.randint(0, n_tokens-1) for i in range(n_masked_tokens)]\n while len(set(indexes))!=n_masked_tokens:\n indexes = [random.randint(0, n_tokens-1) for i in range(n_masked_tokens)]\n sequence = np.array(sequence)\n sequence[indexes] = 4\n return list(sequence)",
"def token_dropout(tokens: torch.LongTensor,\n oov_token: int,\n exclude_tokens: List[int],\n p: float = 0.2,\n training: float = True) -> torch.LongTensor:\n if training and p > 0:\n # This creates a mask that only considers unpadded tokens for mapping to oov\n padding_mask = tokens.new_ones(tokens.size(), dtype=torch.bool)\n for pad in exclude_tokens:\n padding_mask &= (tokens != pad)\n\n # Create a uniformly random mask selecting either the original words or OOV tokens\n dropout_mask = (tokens.new_empty(tokens.size(), dtype=torch.float).uniform_() < p)\n oov_mask = dropout_mask & padding_mask\n\n oov_fill = tokens.new_empty(tokens.size(), dtype=torch.long).fill_(oov_token)\n\n result = torch.where(oov_mask, oov_fill, tokens)\n\n return result\n else:\n return tokens",
"def replace_special_token(self, tokenizer_fields: Dict[str, torch.Tensor], positions: List[int], replace_id: int):\n token_ids, offsets = tokenizer_fields[\"token_ids\"], tokenizer_fields[\"offsets\"]\n for pos in positions:\n offset = offsets[pos].numpy().tolist()\n if offset[0] != offset[1]:\n warnings.warn(f\"replace normally expect token in `positions` has not been split to pieces.\"\n f\"This warning should NOT happen unless during batch prediction at evaluation\")\n token_ids[offset[0]] = replace_id",
"def clean_user_tokens() -> None:\n asyncio.run(clean_old_user_tokens())"
] | [
"0.64954305",
"0.6290737",
"0.6272901",
"0.6272901",
"0.6272901",
"0.6148785",
"0.59451646",
"0.5890986",
"0.5751701",
"0.5737682",
"0.57351816",
"0.563494",
"0.56142646",
"0.55326474",
"0.54899573",
"0.5485592",
"0.547473",
"0.542751",
"0.54229766",
"0.53995466",
"0.5398629",
"0.5379253",
"0.537731",
"0.5352172",
"0.53437024",
"0.5343184",
"0.5338396",
"0.5330951",
"0.5328233",
"0.5319483"
] | 0.7062326 | 0 |
This function pulls dalys for specified cause IDs from GBD | def pull_dalys(cause_ids, nonfatal_cause_ids, location_ids, ages, sexes, index_cols):
if len(cause_ids) + len(nonfatal_cause_ids) == 0:
raise Exception("Must select at least one fatal or nonfatal cause_id")
#init empty dfs
ylds, ylls = pd.DataFrame(), pd.DataFrame()
if len(nonfatal_cause_ids)>0:
ylds = get_draws(
gbd_id_type='cause_id',
gbd_id=cause_ids,
source='como',
measure_id=3,
metric_id=3, # only available as rate
location_id=location_ids,
year_id=2019,
age_group_id=ages,
sex_id=sexes,
gbd_round_id=6,
status='best',
decomp_step='step5',
).set_index(index_cols + ['cause_id'])
ylds = ylds.drop(columns=[c for c in ylds.columns if 'draw' not in c])
#convert rate to count
pop = get_population(
location_id=location_ids,
year_id=2019,
age_group_id=ages,
sex_id=sexes,
gbd_round_id=6,
decomp_step='step4').set_index(index_cols)
for i in list(range(0, 1000)):
ylds[f'draw_{i}'] = ylds[f'draw_{i}'] * pop['population']
else:
print("No nonfatal ids selected; returning ylls only")
if len(cause_ids)>0:
ylls = get_draws(
gbd_id_type='cause_id',
gbd_id=cause_ids,
source='codcorrect',
measure_id=4,
metric_id=1,
location_id=location_ids,
year_id=2019,
age_group_id=ages,
sex_id=sexes,
gbd_round_id=6,
status='latest',
decomp_step='step5',
).set_index(index_cols + ['cause_id']).replace(np.nan, 0)
ylls = ylls.drop(columns=[c for c in ylls.columns if 'draw' not in c])
else:
print("No fatal ids selected; returning ylds only")
return ylls + ylds | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_adhd(caseids,mydb=None):\n\tif mydb is None:\n\t\tmydb = db_manager('vrclassroomdata')\n\tcids = ','.join([str(x) for x in caseids])\n\tthis_df = mydb.sql_query_fetch_df(\"select a.Id, b.ADHDDiagnose from `case` a, patient b where b.Id=a.SubjectId and a.Id in (%s) order by field (a.Id,%s)\"%(cids,cids),primary_key='Id')\n\tthis_df.ADHDDiagnose = this_df.ADHDDiagnose>0\n\treturn this_df",
"def populate_agdds(start_date, end_date, source, source_id, stations):\r\n # possibly grab ACIS station data (for entire date range)\r\n if source == 'ACIS':\r\n station_ids = []\r\n for station in stations:\r\n station_ids.append(station['char_network_id'])\r\n acis_data = get_acis_climate_data(\",\".join(station_ids), 'mint,maxt,gdd32,gdd50', start_date, end_date)\r\n\r\n for station in stations:\r\n print(station['char_network_id'])\r\n # grab previous days tmin, tmax, and agdd for both bases from mysql agdds table and start over at year breaks\r\n day_before_start_date = start_date - timedelta(days=1)\r\n if day_before_start_date.year == start_date.year:\r\n prev_tmin = get_element_from_qc_table(station['station_id'], source_id, day_before_start_date, 32, 'tmin')\r\n prev_tmax = get_element_from_qc_table(station['station_id'], source_id, day_before_start_date, 32, 'tmax')\r\n agdd32 = get_element_from_qc_table(station['station_id'], source_id, day_before_start_date, 32, 'agdd')\r\n agdd50 = get_element_from_qc_table(station['station_id'], source_id, day_before_start_date, 50, 'agdd')\r\n else:\r\n prev_tmin = None\r\n prev_tmax = None\r\n agdd32 = None\r\n agdd50 = None\r\n\r\n if prev_tmin is None or prev_tmin == 'M':\r\n prev_tmin = 0\r\n if prev_tmax is None or prev_tmax == 'M':\r\n prev_tmax = 0\r\n if agdd32 is None or agdd32 == 'M':\r\n agdd32 = 0\r\n if agdd50 is None or agdd50 == 'M':\r\n agdd50 = 0\r\n\r\n # possibly find station of interest from ACIS retrieved data\r\n acis_station = None\r\n if source == 'ACIS':\r\n station_found = False\r\n for a_station in acis_data['data']:\r\n if station_found:\r\n break\r\n for sid in a_station['meta']['sids']:\r\n # print(sid)\r\n # print(station['char_network_id'])\r\n if station['char_network_id'] in sid:\r\n station_found = True\r\n acis_station = a_station\r\n break\r\n if not station_found:\r\n print(\"Could not find station \" + station['char_network_id'])\r\n\r\n previous_year = start_date.year\r\n delta = end_date - start_date\r\n for i in range(delta.days + 1):\r\n day = start_date + timedelta(days=i)\r\n doy = day.timetuple().tm_yday\r\n\r\n # reset the agdd to 0 if we go into a new year\r\n if previous_year != day.year:\r\n agdd32 = 0\r\n agdd50 = 0\r\n previous_year = day.year\r\n\r\n missing_data = False\r\n print(day.strftime(\"%Y-%m-%d\"))\r\n\r\n # see if we already have tmin and tmax from local db\r\n # tmin = None\r\n # tmax = None\r\n tmin = get_element_from_qc_table(station['station_id'], source_id, day, 32, 'tmin')\r\n tmax = get_element_from_qc_table(station['station_id'], source_id, day, 32, 'tmax')\r\n\r\n already_retrieved = False\r\n if tmin is not None and tmin != 'M' and tmax is not None and tmax != 'M' and source != 'PRISM':\r\n already_retrieved = True\r\n\r\n # don't already have tmin and tmax locally so grab from URMA postgis db or ACIS data\r\n if not already_retrieved:\r\n if source == 'URMA':\r\n if station['char_value'] == 'AK':\r\n tmin = get_urma_climate_data(station['longitude'], station['latitude'], day, 'tmin', 'alaska')\r\n tmax = get_urma_climate_data(station['longitude'], station['latitude'], day, 'tmax', 'alaska')\r\n else:\r\n tmin = get_urma_climate_data(station['longitude'], station['latitude'], day, 'tmin', 'conus')\r\n tmax = get_urma_climate_data(station['longitude'], station['latitude'], day, 'tmax', 'conus')\r\n # URMA and PRISM are in celsius in our postgis db everything else is Fer so convert here\r\n if tmin is not None:\r\n tmin = tmin * 1.8 + 32\r\n if tmax is not None:\r\n tmax = tmax * 1.8 + 32\r\n elif source == 'PRISM':\r\n tmin = get_prism_climate_data(station['longitude'], station['latitude'], day, 'tmin')\r\n tmax = get_prism_climate_data(station['longitude'], station['latitude'], day, 'tmax')\r\n if tmin is not None:\r\n tmin = tmin * 1.8 + 32\r\n if tmax is not None:\r\n tmax = tmax * 1.8 + 32\r\n elif acis_station is not None:\r\n tmin = acis_station['data'][i][0]\r\n tmax = acis_station['data'][i][1]\r\n\r\n # if tmin or tmax is missing, set to previous day's and mark as missing\r\n if tmin is not None and tmin != 'M':\r\n tmin = float(tmin)\r\n prev_tmin = tmin\r\n else:\r\n missing_data = True\r\n tmin = prev_tmin\r\n if tmax is not None and tmax != 'M':\r\n tmax = float(tmax)\r\n prev_tmax = tmax\r\n else:\r\n missing_data = True\r\n tmax = prev_tmax\r\n\r\n # compute gdd and agdd for both bases\r\n gdd32 = compute_gdd(tmin, tmax, 32)\r\n gdd50 = compute_gdd(tmin, tmax, 50)\r\n\r\n agdd32 += gdd32\r\n agdd50 += gdd50\r\n\r\n if not already_retrieved:\r\n # do an insert or update\r\n add_agdd_row(station['station_id'], source_id, gdd32, agdd32, day.year, doy, day, 32, missing_data, tmin, tmax)\r\n add_agdd_row(station['station_id'], source_id, gdd50, agdd50, day.year, doy, day, 50, missing_data, tmin, tmax)",
"def find_in_simbad(sources, desig_prefix, source_id_index = None, verbose = False):\n\n n_sources = len(sources)\n\n Simbad.reset_votable_fields()\n Simbad.add_votable_fields('typed_id') # keep search term in result table\n Simbad.add_votable_fields('ids') # add all SIMBAD identifiers as an output column\n print(\"simbad query started\")\n result_table = Simbad.query_objects(sources)\n print(\"simbad query ended\")\n\n ind = result_table['SCRIPT_NUMBER_ID'] > 0 # find indexes which contain results\n\n simbad_ids = result_table['TYPED_ID', 'IDS'][ind] # .topandas()\n\n db_names = []\n simbad_designations = []\n if source_id_index is not None:\n source_ids = []\n\n for row in simbad_ids:\n db_name = row['TYPED_ID']\n ids = row['IDS'].split('|')\n designation = [i for i in ids if desig_prefix in i]\n\n if designation:\n verboseprint(db_name, designation[0])\n db_names.append(db_name)\n simbad_designations.append(designation[0])\n if source_id_index is not None:\n source_id = designation[0].split()[source_id_index]\n source_ids.append(int(source_id)) #convert to int since long in Gaia\n\n n_matches = len(db_names)\n print('Found', n_matches, desig_prefix, ' sources for', n_sources, ' sources')\n\n result_table = Table([db_names, simbad_designations, source_ids],\n names=('db_names', 'designation', 'source_id'))\n\n return result_table",
"def get_sources(queue, args, gedcom_data, dbid_map, apid_image_map):\n sources = {}\n dbid_list = []\n source_list = []\n logging.info(\"Generating updated source records\")\n gedcom = StringIO(gedcom_data)\n line = gedcom.readline()\n while line:\n if \"0 @S\" not in line:\n if \" _APID \" in line:\n dbid = line.split(\",\")[1].split(\":\")[0]\n if dbid not in dbid_list:\n dbid_list.append(dbid)\n if \" SOUR \" in line:\n source = line.split(\" \")[2].strip()\n if source not in source_list:\n source_list.append(source)\n line = gedcom.readline()\n continue\n apid = \"\"\n source = []\n source_data = [line]\n source_id = line.split(\" \")[1]\n if source_id not in source_list:\n logging.error(\"Found unreferenced source record %s\", source_id)\n line = gedcom.readline()\n continue\n line = gedcom.readline().strip()\n while line[0] != \"0\":\n source_data.append(line)\n if \"_APID\" in line:\n apid = line.strip().split(\" \")[2]\n dbid = apid.split(\":\").pop(0).split(\",\").pop(1)\n if dbid not in dbid_list:\n logging.error(\n \"Found unreferenced DBID record %s in source record %s\",\n dbid,\n source_id,\n )\n line = gedcom.readline()\n continue\n line = gedcom.readline().strip()\n if apid == \"\":\n sources.update({source_id: source_data})\n continue\n original = []\n publisher = []\n description = []\n if dbid in dbid_map:\n if \"publisher\" in dbid_map[dbid] and dbid_map[dbid][\"publisher\"] != \"\":\n publisher = build_note(dbid_map[dbid][\"publisher\"], keyword=\"PUBL\")\n if \"original\" in dbid_map[dbid] and dbid_map[dbid][\"original\"] != \"\":\n original = build_note(\n \"Original Data: {0}\".format(dbid_map[dbid][\"original\"]),\n keyword=\"NOTE\",\n )\n if \"description\" in dbid_map[dbid]:\n if dbid_map[dbid][\"description\"] not in [\"\", \"Learn more...\"]:\n description = build_note(\n dbid_map[dbid][\"description\"], keyword=\"NOTE\"\n )\n else:\n logging.error(\"Found DBID record %s with no data\", dbid)\n in_title = False\n in_publisher = False\n short_title = apid = \"\"\n for entry in source_data:\n if \" _APID \" in entry:\n apid = entry\n continue\n if in_title:\n if \" CONC \" in entry or \" CONT \" in entry:\n source.append(entry)\n continue\n in_title = False\n if short_title != \"\":\n source.append(\"1 ABBR {0}\".format(short_title))\n if in_publisher:\n if \" CONC \" in entry or \" CONT \" in entry:\n source.append(entry)\n continue\n in_publisher = False\n if args.source_url:\n source.append(\n \"1 NOTE https://search.ancestry.com/search/db.aspx?dbid={0}\".format(\n dbid\n )\n )\n if \"NOTE\" in entry and len(entry) < 8:\n continue\n if \"CONC\" in entry and len(entry) < 8:\n continue\n if \" PUBL \" in entry:\n if publisher != []:\n for item in publisher:\n source.append(item)\n else:\n source.append(entry)\n in_publisher = True\n continue\n if \" TITL \" in entry:\n if len(entry[7:].strip()) <= 60:\n short_title = entry[7:].strip()\n in_title = True\n source.append(entry)\n if original != []:\n for item in original:\n source.append(item)\n if description != []:\n for item in description:\n source.append(item)\n search = apid.split(\":\").pop(0) + \"::\"\n for entry in apid_image_map:\n if search in entry:\n source.append(\"1 OBJE {0}\".format(apid_image_map[entry]))\n if args.keep_apid:\n source.append(\"1 _APID {0}\".format(apid))\n sources.update({source_id: source})\n logging.info(\"Updated source records generated\")\n queue.put(sources)",
"def get_el_targets(params):\n data = queryDevice.queryDevice(\"\"\"\n SELECT DISTINCT dc.tid, dc.target_type, dc.dc, COUNT(DISTINCT act.assay_id), COUNT(DISTINCT activity_id)\n FROM assays ass\n JOIN(\n SELECT td.tid, td.target_type, COUNT(cd.domain_id) as dc\n FROM target_dictionary td\n JOIN target_components tc\n ON tc.tid = td.tid\n\t\t JOIN component_sequences cs\n\t\t\tON cs.component_id = tc.component_id\n JOIN component_domains cd\n \t\t\tON cd.component_id = cs.component_id\n WHERE td.target_type IN('SINGLE PROTEIN', 'PROTEIN COMPLEX')\n GROUP BY td.tid\n ) as dc\n ON dc.tid = ass.tid\n JOIN activities act\n ON act.assay_id = ass.assay_id\n WHERE act.standard_type IN('Ki','Kd','IC50','EC50', 'AC50')\n AND ass.relationship_type = 'D'\n AND assay_type IN('B')\n AND act.standard_relation IN('=')\n AND standard_units = 'nM'\n AND standard_value <= %s\n GROUP BY dc.tid ORDER BY COUNT(activity_id)\"\"\" % (int(params['threshold']) * 1000) , params)\n print \"retrieved data for \", len(data), \"tids.\"\n return data",
"def getDetIDs():\n from ROOT import TFile, GATDataSet, MJTChannelMap\n run = bkg.getRunList(5,1)[0]\n gds = GATDataSet(run)\n chMap = gds.GetChannelMap()\n # chMap.DumpDetectorNames()\n\n dets = det.allDets\n for d in dets:\n detName = chMap.GetDetectorName(int(d[0]), int(d[1]), int(d[2]))\n\n # now match it to the IDs used in DataSetInfo.cc::Load(X)DetectorMap\n detID = '1' if detName[0]==\"P\" else '2'\n detID += detName[1:]\n tmp = list(detID)\n if detID[0] == '1':\n if tmp[-1] == \"A\": tmp[-1] = '0'\n if tmp[-1] == \"B\": tmp[-1] = '1'\n if tmp[-1] == \"C\": tmp[-1] = '2'\n detID = ''.join(tmp)\n\n print(\"'%s':%s,\" % (d, detID))",
"def getThreshDB():\n calDB = db.TinyDB(\"%s/calDB-v2.json\" % dsi.latSWDir)\n pars = db.Query()\n bkg = dsi.BkgInfo()\n\n # loop over datasets\n # for ds in [0,1,2,3,4,5,6]:\n for ds in [6]:\n dsNum = ds if isinstance(ds, int) else 5\n goodChans = det.getGoodChanList(dsNum)\n\n for bkgIdx in bkg.getRanges(ds):\n\n # ==== loop over sub-ranges (when TF was run) ====\n rFirst, rLast = bkg.getRanges(ds)[bkgIdx][0], bkg.getRanges(ds)[bkgIdx][-1]\n\n subRanges = bkg.GetSubRanges(ds,bkgIdx)\n if len(subRanges) == 0: subRanges.append((rFirst, rLast))\n\n for subIdx, (runLo, runHi) in enumerate(subRanges):\n\n key = \"thresh_ds%d_bkg%d_sub%d\" % (dsNum, bkgIdx, subIdx)\n\n thD = dsi.getDBRecord(key, False, calDB, pars)\n print(key)\n for ch in thD:\n print(ch,\":\",thD[ch])\n print(\"\")",
"def get_odds(event_sids, source=utils.get_native_source):\n if not isinstance(source, games.models.Source):\n source = source()\n logger.info(\"getting odds for %s events from source %s...\", len(event_sids), source)\n pre_winners, pre_ovuns, pre_dcs, pre_hds, unsups = [], [], [], [], []\n if not source:\n return pre_winners, pre_ovuns, pre_dcs, pre_hds, unsups\n if not event_sids:\n logger.info(\"There are no event sids to process!\")\n return pre_winners, pre_ovuns, pre_dcs, pre_hds, unsups\n\n all_markets = defaultdict(int)\n counter = -1\n # with open('./sportmonks/response_texts/odds_by_id.json', 'w') as outfile:\n # odds_list = []\n for event_sid in event_sids:\n counter += 1\n data, meta, status_code = sportmonks.odds.by_fixture_id(event_sid)\n # odds_list.append({\"event id\": int(event_sid), \"data\": data})\n if status_code == 429:\n logger.warning(\"Maximum number of allowed calls for sportmonks.odds.by_fixture_id has been reached!\")\n remaining_sids = event_sids[counter:]\n chunks = gutils.utils.to_chunks(remaining_sids, constants.calls_limit)\n gutils.utils.schedule_chunks(chunks, constants.interval, utils.get_and_create_odd_trees_wrapper_sids, source_name=source.name)\n break\n if not data:\n continue\n try:\n temp_market_types = parse_odd_data(data, event_sid, source)\n for temp_market_type in temp_market_types:\n market_type = temp_market_type.market_type\n all_markets[market_type] += 1\n temp_market_type.log()\n winners, ovuns, dcs, hds, uns = temp_market_type.create_pre_markets()\n pre_winners.extend(winners)\n pre_ovuns.extend(ovuns)\n pre_dcs.extend(dcs)\n pre_hds.extend(hds)\n unsups.extend(uns)\n except Exception as e:\n logger.error('%s Unexpected problem with sportmonks.odds.by_fixture_id for event %s from source %s', repr(e), event_sid, source)\n # json.dump(odds_list, outfile, indent=4)\n logger.info('all markets (excluding those with data parsing errors): (len %s) %s', len(all_markets.keys()), all_markets.items())\n logger.info('%s pre winners, %s pre over under, %s pre double chance, %s pre handicap were created, '\n '%s unsupported market types', len(pre_winners), len(pre_ovuns), len(pre_dcs), len(pre_hds), len(unsups))\n return pre_winners, pre_ovuns, pre_dcs, pre_hds, unsups",
"def getDSids(startID, myDict, ids, stop=None):\n try:\n for key, value in myDict.iteritems():\n if value[0] == myDict[startID][1] and str(key) != stop:\n ids.append(key)\n return(getDSids(key, myDict, ids))\n if str(key) == stop:\n break\n return ids\n\n\n except Exception as e:\n tb = sys.exc_info()[2]\n print (\"Problem getting downstream IDs on line {} at {}\".format(tb.tb_lineno, stamp()))\n print(str(e))",
"def test_get_linearizer_bad_detectorIds(self):\n if self.butler_get_data.linearizer_type is unittest.SkipTest:\n self.skipTest('Skipping %s as requested' % (inspect.currentframe().f_code.co_name))\n\n for badccd in self.butler_get_data.bad_detectorIds:\n with self.assertRaises(RuntimeError):\n self.butler.get(\"linearizer\", dataId=dict(ccd=badccd), immediate=True)",
"def getMultiple(source_ids, classname='unknown'):\n if type(source_ids) == str:\n f = open(source_ids, 'r')\n ids = f.read().split()\n f.close()\n elif type(source_ids) == list:\n ids = source_ids\n\n # assuming dotAstro IDs:\n sources = []\n for id in ids:\n lc = getLcInfo(id, classname)\n if lc: # getLcInfo returns False if no data found\n sources.append(lc)\n\n return sources",
"def needs_by_district(cls):\n\n T = current.T\n db = current.db\n s3db = current.s3db\n\n table = s3db.need_line\n ntable = s3db.need_need\n etable = s3db.event_event\n ltable = s3db.event_event_need\n\n status = table.status\n number = table.id.count()\n location = ntable.location_id\n\n base_query = (etable.closed == False) & \\\n (etable.id == ltable.event_id) & \\\n (ltable.need_id == ntable.id) & \\\n (ntable.id == table.need_id) & \\\n (table.deleted == False)\n\n # Get the top-5 locations by number of need lines\n query = base_query & (location != None)\n rows = db(query).select(location,\n number,\n groupby = location,\n orderby = ~(number),\n limitby = (0, 5),\n )\n locations = [row[location] for row in rows]\n\n data = []\n if locations:\n # Get labels for locations\n location_represent = S3Represent(lookup = \"gis_location\",\n fields = [\"L2\"],\n )\n location_labels = location_represent.bulk(locations)\n\n # Count need lines per status and location\n query = base_query & (location.belongs(locations))\n rows = db(query).select(location,\n status,\n number,\n groupby = (status, location),\n )\n\n # Group results as {status: {location: number}}\n per_status = {}\n for row in rows:\n row_status = row[status]\n if row_status in per_status:\n per_status[row_status][row[location]] = row[number]\n else:\n per_status[row_status] = {row[location]: row[number]}\n\n # Build data structure for chart renderer\n # - every status gives a series\n # - every district gives a series entry\n for code, label, color in cls.REQ_STATUS:\n series = {\"key\": s3_str(T(label)),\n \"color\": color,\n \"filterKey\": code,\n }\n values = []\n per_location = per_status.get(code)\n for location_id in locations:\n if per_location:\n value = per_location.get(location_id)\n else:\n value = None\n location_label = location_labels.get(location_id)\n item = {\"label\": location_label,\n \"value\": value if value else 0,\n \"filterKey\": location_label,\n }\n values.append(item)\n series[\"values\"] = values\n data.append(series)\n\n return data",
"def test_drugs_id_get(self):\n pass",
"def find_causes(sv):\r\n for nam in sv.Object_list:\r\n nod=sv.Object[nam]\r\n nod.causes=[]\r\n # make name a cause for count(name) and show(name) - needed for display\r\n counted=applied(nam, Count) \r\n if counted: nod.causes=[counted]\r\n showed=applied(nam, Show)\r\n if showed: nod.causes+=[showed]\r\n # ordinary causes\r\n found=nod.causes \r\n for (c,v) in nod.clauses:\r\n found+=get_subtree_names(sv, c) # objects in condition\r\n found+=get_subtree_names(sv, v) # objects in value\r\n nod.causes=unique(found) # remove duplicates, keeping order\r\n if nam in nod.causes: nod.causes.remove(nam) # exclude self\r",
"def getDrops(did, dbn='core', env=None):\n global gDbEnv\n\n if env is None:\n env = gDbEnv\n\n if env is None:\n raise DatabaseError(\"Database environment not set up\")\n\n drip = \"{}/drop/\".format(did)\n dripb = drip.encode()\n entries = []\n subDb = gDbEnv.open_db(dbn.encode(\"utf-8\"), dupsort=True) # open named sub db named dbn within env\n with gDbEnv.begin(db=subDb) as txn: # txn is a Transaction object\n with txn.cursor() as cursor:\n if cursor.set_range(dripb): # first key >= dripb\n while cursor.key().startswith(dripb): # something left in inbox\n try:\n ddid, drop, sdid, muid = cursor.key().decode().split(\"/\")\n except ValueError as ex: # skip entry\n pass\n else:\n if drop == \"drop\":\n entry = ODict()\n entry['from'] = sdid\n entry['uid'] = muid\n entries.append(entry)\n\n if not cursor.next(): # next key in database if any\n break\n\n return entries",
"def show_discdds_geo_main(config, parser):\n parser.add_option(\"-o\", \"--output\", help=\"Output directory\",\n default='out/dp-show-discdds-geo/')\n parser.add_option(\"-t\", \"--tolerance\", help=\"Normalized tolerance\",\n default=0.3, type='float')\n options, which = parser.parse()\n \n outdir = options.output \n \n if not which:\n todo = config.discdds.keys() \n else:\n todo = config.discdds.expand_names(which)\n\n\n for id_dds in todo:\n dds = config.discdds.instance(id_dds) \n report = Report(id_dds)\n \n show_diffeo_structure(dds, report, tolerance=options.tolerance) \n \n write_report_files(report, basename=os.path.join(outdir, id_dds))",
"def dycause_param_search(entry_point, true_root_cause, data_inwindow, data_head, window_start):\n result_list = []\n runtime_debug = True\n for pre_length in [0]:\n for post_length in [200]:\n # Skip experiments that takes less than 200 seconds data\n if pre_length + post_length < 200:\n continue\n for step in [50]:\n for lag in [15]:\n for thres in [0.5]:\n tic = time.time()\n prks, acc = test_dycause(\n # Data params\n data_source=\"ibm_micro_service\",\n aggre_delta=1,\n start_time=None,\n before_length=pre_length,\n after_length=post_length,\n # Granger interval based graph construction params\n step=step,\n significant_thres=0.1,\n lag=lag, # must satisfy: step > 3 * lag + 1\n auto_threshold_ratio=thres,\n # Root cause analysis params\n testrun_round=1,\n frontend=entry_point,\n true_root_cause=true_root_cause,\n max_path_length=None,\n mean_method=\"harmonic\",\n topk_path=150,\n num_sel_node=3,\n # Debug params\n plot_figures=False,\n verbose=0,\n runtime_debug=runtime_debug,\n data=data_inwindow,\n data_head=data_head,\n disable_print=True,\n window_start=window_start\n )\n toc = time.time() - tic\n result_list.append({\n 'pre_len': pre_length,\n 'post_len': post_length,\n 'step': step,\n 'lag': lag,\n 'auto_threshold_ratio': thres,\n 'testrun_round': 1,\n 'runtime_debug': runtime_debug,\n 'time': toc,\n 'prks': prks,\n 'acc': acc\n })\n print(\"Granger extend pre:{:d} post:{:d} step:{:d} \"\n \"lag:{:d} thres:{:.2f} time:{:.4f} acc:{:.4f}\".format(\n pre_length, post_length, step, lag, thres, toc, acc))\n return result_list",
"def _get_diff_data(views_index, src_data, ea_index, ddi_data):\n\n def _add_and_del():\n \"\"\"Handles the add's and del import's.\"\"\"\n for add_or_del_row in src_data:\n # Add Check.\n if 'add' in add_or_del_row[0]:\n if add_or_del_row[1] in \\\n ddi_data[views_index[add_or_del_row[15]]]:\n errored_list.append(add_or_del_row)\n continue\n else:\n import_add.append(add_or_del_row)\n continue\n\n # delete check\n if 'del' in add_or_del_row[0] and add_or_del_row[1] in \\\n ddi_data[views_index[add_or_del_row[15]]][\n add_or_del_row[1]]:\n import_delete.append([add_or_del_row[15],\n add_or_del_row[1],\n add_or_del_row[14]])\n continue\n unused_list.append(add_or_del_row)\n\n def _ea_in_disposition_col0_and_empty_ipr_d_col():\n \"\"\"Disposition col0 check and an empty ipr disposition column.\"\"\"\n for disposition_row in unused_list:\n # Check disposition\n ddi_index = views_index[disposition_row[15]]\n # Checks disposition column value and checks for IPR D value.\n # If no IPR D in extattrs dict stores the src data for updates.\n if disposition_row[0] in ea_ipr_d_values and 'IPR Designation' not\\\n in ddi_data[ddi_index][disposition_row[1]]['extattrs']:\n import_merge_disposition.append(\n [disposition_row[15],\n disposition_row[1],\n disposition_row[14],\n disposition_row[0]])\n\n def _comment_check():\n \"\"\"Function for checking ipam comment attribute.\"\"\"\n for comment_row in unused_list:\n ddi_index = views_index[comment_row[15]]\n # Checks for empty src value and empty ddi data value.\n # Continues if True.\n if 'comment' not in ddi_data[ddi_index][comment_row[1]]\\\n and comment_row[12] == '':\n continue\n # Checks a non-empty src value and updates if an\n # empty ddi data value.\n if 'comment' not in ddi_data[ddi_index][comment_row[1]] and \\\n comment_row[12] != '':\n import_merge.append([comment_row[15],\n comment_row[1],\n comment_row[14],\n {'comment': comment_row[12]}])\n continue\n # Checks diff against src value and a populated value in the\n # ddi data and replaces with src value.\n if comment_row[12] != \\\n ddi_data[ddi_index][comment_row[1]]['comment']:\n import_override.append([comment_row[15],\n comment_row[1],\n comment_row[14],\n {'comment': comment_row[12]}])\n continue\n\n def _non_listed_ea_columns_check():\n \"\"\"Checks non-listable ea columns.\"\"\"\n for ea_row in unused_list:\n # dup Check in disposition\n ddi_index = views_index[ea_row[15]]\n for key, value in ea_index.items():\n # ea attributes that could be listed.\n if key == 'Datacenter' or key == 'IPR Designation':\n continue\n # Checks for empty src value and empty ddi data value.\n # Continues if True.\n if key not in ddi_data[ddi_index][ea_row[1]]['extattrs'] and \\\n ea_row[value] in ['', 'DDI']:\n continue\n # Checks a non-empty src value and updates if an\n # empty ddi data value.\n if key not in ddi_data[ddi_index][ea_row[1]]['extattrs'] \\\n and ea_row[value] not in ['', 'DDI']:\n import_merge.append([ea_row[15],\n ea_row[1],\n ea_row[14],\n {key: ea_row[value]}])\n continue\n # Checks diff against src value and a populated value in the\n # ddi data and replaces with src value.\n if ea_row[value] != \\\n ddi_data[ddi_index][\n ea_row[1]]['extattrs'][key]['value']:\n import_override.append([ea_row[15],\n ea_row[1],\n ea_row[14],\n {key: ea_row[value]}])\n continue\n\n def _listed_ea_column_check():\n \"\"\"Checks non-listable ea columns.\"\"\"\n for ea_row in unused_list:\n ddi_index = views_index[ea_row[15]]\n # This check is performed in\n # _ea_in_disposition_col0_and_empty_ipr_d_col\n if ea_row[0] in ea_ipr_d_values and \\\n 'IPR Designation' not in \\\n ddi_data[ddi_index][ea_row[1]]['extattrs']:\n continue\n # Update IPR D src column with ea_row[0] for processing.\n # WORK IN PROGRESS\n elif ea_row[0] in ea_ipr_d_values and 'IPR Designation' \\\n in ddi_data[ddi_index][ea_row[1]]['extattrs']:\n pass\n # Processing listable columns.\n for key, value in ea_index.items():\n # Skip's unused keys.\n if key not in ['Datacenter', 'IPR Designation']:\n continue\n # Check for blank column and blank source column.\n if key not in ddi_data[ddi_index][ea_row[1]]['extattrs'] and \\\n ea_row[value] in ['', 'DDI']:\n continue\n # Check for Disposition col, check for comma not in IPR D col\n # value, check value in IPR D col to ea ipr d attribute list,\n # check IPR D col value eq ddi value.\n # On not listed IPR D values.\n if key == 'IPR Designation':\n if ea_row[0] in ea_ipr_d_values \\\n and ',' not in ea_row[16] \\\n and ea_row[16] in ea_ipr_d_values:\n ea_row[16] = ea_row[16] + ',' + ea_row[0]\n import_override.append([ea_row[15].strip(),\n ea_row[1].strip(),\n ea_row[14].strip(),\n {key: ea_row[16]}])\n continue\n # Check for Disposition col, check for comma not in IPR D col\n # value, check value in IPR D col to ea ipr d attribute list,\n # check IPR D col value eq ddi value.\n # On not listed IPR D values.\n elif ea_row[0] in ea_ipr_d_values \\\n and ',' not in ea_row[16] \\\n and ea_row[16] not in ea_ipr_d_values:\n import_override.append([ea_row[15].strip(),\n ea_row[1].strip(),\n ea_row[14].strip(),\n {key: ea_row[0]}])\n continue\n# # Check Disposition col. and if IPR D listed value needs\n# # updating. On listed IPR D values.\n# if ea_row[0].lower().strip() in ea_ipr_d_values \\\n# and ',' in ea_row[16]:\n# temp_list = ea_row[16].split(',')\n# temp_list = [x.strip() for x in temp_list]\n# if ea_row[0].lower().strip() in temp_list:\n# continue\n# else:\n# temp_list.append(ea_row[0].lower().strip())\n# temp_dict_override.update({key: temp_list})\n# import_override.append([ea_row[15].strip(),\n# ea_row[1].strip(),\n# ea_row[14].strip(),\n# temp_dict_override])\n# continue\n\n # Builds dataset for non-listed values. Final Step.\n # If key not in ddi data and src value is not none.\n # Assign to merge.\n if key not in ddi_data[ddi_index][ea_row[1]]['extattrs'] \\\n and ea_row[value] not in ['', 'DDI']:\n import_merge.append([ea_row[15].strip(),\n ea_row[1].strip(),\n ea_row[14].strip(),\n {key: ea_row[value]}])\n continue\n # Checks diff against src value and a populated value in the\n # ddi data and replaces with src value.\n if ea_row[value] != \\\n ddi_data[ddi_index][\n ea_row[1]]['extattrs'][key]['value']:\n import_override.append([ea_row[15],\n ea_row[1],\n ea_row[14],\n {key: ea_row[value]}])\n continue\n\n # Local scope variables.\n import_add = []\n import_delete = []\n import_merge = []\n import_override = []\n import_merge_disposition = []\n unused_list = []\n errored_list = []\n # Check for extensible attribute in Disposition column[0].\n # If found and IPR D column is empty append for writing.\n ea_ipr_d_values = ['leaf', 'dup', 'followup', 'decom', 'adv', 'divest',\n 'ignore', 're-ip', 'parent', 'drop reserve']\n _add_and_del()\n _ea_in_disposition_col0_and_empty_ipr_d_col()\n _comment_check()\n _non_listed_ea_columns_check()\n _listed_ea_column_check()\n return import_add, \\\n import_delete, \\\n import_merge_disposition, \\\n import_merge, \\\n import_override",
"def get_seq_and_id(fasta_file, promoter_seq, promoter_ids, threshold, scores_file, delimiter):\n\n map_txt = \"DDB_DDB_G/DDB-GeneID-UniProt.txt\"\n df = pd.read_csv(map_txt, sep=\"\\t\")\n ddb_id = list(df['DDBDDB ID'].as_matrix())\n ddb_g_id = list(df['DDB_G ID'].as_matrix())\n\n all_valid_records = get_data_target.get_ids(scores_file, delimiter, 'ID')\n print(all_valid_records)\n sequences = []\n record_ids = []\n for record in SeqIO.parse(fasta_file, \"fasta\"):\n record_id = str(record.id)\n end = record_id.find('|')\n record_id_short = record_id\n if end != -1:\n record_id_short = record_id[:end]\n print(record_id_short)\n try:\n ddbg_record_id_short = ddb_g_id[ddb_id.index(record_id_short)]\n except ValueError:\n ddbg_record_id_short = record_id_short\n if ddbg_record_id_short in all_valid_records:\n record_ids.append(ddbg_record_id_short)\n seq = str(record.seq)[-threshold:]\n sequences.append(seq)\n data_record_ids = pd.DataFrame({\"record_id\": record_ids})\n data_sequences = pd.DataFrame({\"record_sequence\": sequences})\n data_record_ids.to_csv(promoter_ids, index=False, header=False)\n data_sequences.to_csv(promoter_seq, index=False, header=False)",
"def query_runoff(df_events, watershed, flume, df_rain):\n\n # query all runoff records at selected flume\n df_runRaw = query_DAP_runoff_rates(watershed, flume)\n df_runRaw.runCode = df_runRaw.runCode.astype('int32')\n df_runRaw = reduce_df_size(df_runRaw)\n\n v(f'\\n\\nat flume{flume}:\\n{len(df_runRaw)} runoff records are available from DAP. Availability: {df_runRaw.timeStamp.min()} to {df_runRaw.timeStamp.max()}\\n')\n\n df_runoff = pd.DataFrame()\n\n for i in range(len(df_events)):\n\n preName, ad_type = df_events.preName[i],df_events.dataType[i]\n runStartTime, runEndTime = df_events.startTime[i], df_events.endTime[i]\n\n print(f'retrieve runoff rates for event {preName}')\n\n df_ = df_runRaw.loc[(df_runRaw.dataType==ad_type) &\n (df_runRaw.startTime>=runStartTime) & \\\n (df_runRaw.startTime<=runEndTime), :]\n\n if len(df_) == 0:\n v(f'no runoff rates for runoff event {preName}')\n\n else:\n df_ = df_.assign(preName=preName)\n df_runoff = df_runoff.append(df_)\n\n df_runoff = df_runoff.reset_index(drop=True)\n\n return df_runoff, df_runRaw",
"def find_sources(obs_id, data_dir):\n\n sources = []\n logs = glob.glob(data_dir+'/'+str(obs_id)+'/param_01_preflag_*.npy')\n\n for i in range(len(logs)):\n sources.append(logs[i][41:-4])\n\n return sources",
"def querybydistrictid(self, districtids, date, min_age_limit, vaccine):\n while True:\n api = '/api/v2/appointment/sessions/public/findByDistrict'\n logger.info(\"Hitting API {}\".format(api))\n for id in districtids:\n self.requestfunc(api, 'district_id={}&date={}'.format(id,date))\n if self.checkavailability(min_age_limit, vaccine) == 1:\n self.ring_alarm()\n self.sleep(20) # Wait for few seconds for next query.\n\n # Recurse the Function\n # self.querybydistrictid(districtids, date, min_age_limit, vaccine)",
"def get_all_parameters(cause_ids, hybridizer=False,\n start_date=datetime(2019, 1, 11), end_date=datetime.now()):\n cause_ids = list_check(cause_ids)\n jobs = get_jobs(start_date, end_date, hybridizer)\n dictionary = {}\n for c in tqdm(cause_ids):\n dictionary[c] = get_demographic_dictionary(c, hybridizer)\n for m in dictionary[c]:\n for a in dictionary[c][m]:\n for e in dictionary[c][m][a]:\n dictionary[c][m][a][e] = get_parameters(cause_id=c, age_start=a, age_end=e,\n model_version_type_id=m,\n hybridizer=hybridizer,\n jobs=jobs)\n return dictionary",
"def get_possible_ids(self):\n ids = []\n\n dest_data = requests.get(\"https://api.wdpro.disney.go.com/facility-service/destinations/{}\".format(self.__anc_dest_id), headers=getHeaders()).json()\n data = requests.get(dest_data['links']['entertainmentVenues']['href'], headers=getHeaders()).json()\n\n for entry in data['entries']:\n try:\n ids.append(entry['links']['self']['href'].split('/')[-1].split('?')[0])\n except:\n pass\n\n return ids",
"def get_doms(tids, params):\n pfam_lkp = {}\n tidstr = \"', '\".join(str(t) for t in tids)\n data = queryDevice.queryDevice(\"\"\"\n SELECT tid, domain_name\n FROM target_components tc\n\t JOIN component_domains cd\n\t ON cd.component_id = tc.component_id\n JOIN domains d\n\t ON d.domain_id = cd.domain_id\n WHERE tc.tid IN('%s') and domain_type = 'Pfam-A'\"\"\" %tidstr, params)\n for ent in data:\n tid = ent[0]\n dom = ent[1]\n try:\n pfam_lkp[tid].append(dom)\n except KeyError:\n pfam_lkp[tid] = [dom]\n return pfam_lkp",
"def parseCCD(ids):\n if isListLike(ids):\n n_ids = len(ids)\n else:\n ids = [ids]\n n_ids = 1\n\n ret = []\n for id in ids:\n id_url = 'http://ligand-expo.rcsb.org/reports/{0}/{1}/{1}.cif'.format(id[0],\n id)\n try:\n handle = openURL(id_url)\n except Exception as err:\n LOGGER.warn('download failed ({1}).'.format(str(err)))\n else:\n data = handle.read()\n if len(data):\n if PY3K:\n data = data.decode()\n\n parsingDict, prog = parseSTARLines(data.split('\\n'), shlex=True)\n \n star_dict = StarDict(parsingDict, prog, id)\n ret.append(star_dict[id])\n else:\n ret.append(None)\n LOGGER.warn('Could not parse CCD data for {0}'.format(id))\n\n if n_ids == 1:\n return ret[0]\n\n return ret",
"def read_lnasdr(cause_id, codcorrect_version_id, gbd_round_id):\n fname = FPATH_TEMPLATE.format(\n cause_id=cause_id, codcorrect_version_id=codcorrect_version_id\n )\n fpath = Path(ASDR_DIR.format(\n GBD_ROUND=gbd_round_from_gbd_round_id(gbd_round_id))) / Path(fname)\n\n log = logging.getLogger(__name__)\n log.info(f\"reading ASDR from {fpath}\")\n\n try:\n df = pd.read_hdf(fpath)\n except FileNotFoundError:\n log.error(\n f\"Failed to find Ln-ASDR for cause_id {cause_id} and codcorrect \"\n \"version {codcorrect_version_id}\"\n )\n raise RuntimeError\n return df",
"def resolve_cause_ids( cause, model, arguments = DEFAULT_ARGUMENTS):\n # cause can be an entity -> cause is the entity itself\n if isinstance( cause, parse_standoff.EntityTrigger):\n return [cause.id]\n # cause can be an event which is in the model\n # -> then the cause is actually the product of that event\n elif model.getReaction( cause.id):\n reaction = model.getReaction( cause.id)\n product_id = add_product( product_id = None, reaction = reaction, model = model, arguments = arguments);\n return [ product_id]\n # cause can be an event which is Regulation and not in model\n # -> cause is the cause of that event\n elif isinstance( cause, parse_standoff.Event) \\\n and cause.type in [\"positive_regulation\", \"negative_regulation\", \"regulation\", \"catalysis\"] \\\n and len( cause.get_roles(\"cause\")) > 0:\n # find the causes of the cause event\n results = []\n for c in cause.get_roles(\"cause\"):\n results.extend( resolve_cause_ids( c, model, arguments = arguments))\n return results\n elif isinstance( cause, parse_standoff.Event) \\\n and cause.type in [\"positive_regulation\", \"negative_regulation\", \"regulation\", \"catalysis\"]:\n cause_entity = add_species( None,\n model = model,\n id = cause.id + \"_Cause_0\",\n name = \"Cause\",\n arguments = arguments);\n return [ cause_entity.getId()]\n # cannot handle other causes\n else:\n return [];",
"def gdcs_reporter(metadata, analysistype, reportpath):\n # Initialise list to store all the GDCS genes, and genera in the analysis\n gdcs = list()\n genera = list()\n for sample in metadata:\n sample[analysistype].faidict = dict()\n if sample.general.bestassemblyfile != 'NA':\n if os.path.isdir(sample[analysistype].targetpath):\n # Update the fai dict with all the genes in the analysis, rather than just those with baited hits\n Reports.gdcs_fai(sample=sample,\n analysistype=analysistype)\n sample[analysistype].createreport = True\n # Determine which genera are present in the analysis\n if sample.general.closestrefseqgenus not in genera:\n genera.append(sample.general.closestrefseqgenus)\n try:\n # Add all the GDCS genes to the list\n for gene in sorted(sample[analysistype].faidict):\n if gene not in gdcs:\n gdcs.append(gene)\n except AttributeError:\n sample[analysistype].createreport = False\n else:\n sample[analysistype].createreport = False\n else:\n sample[analysistype].createreport = False\n sample.general.incomplete = True\n header = 'Strain,Genus,Matches,Pass/Fail,{},\\n'.format(','.join(sorted(gdcs)))\n data = str()\n with open(os.path.join(reportpath, '{}.csv'.format(analysistype)), 'w') as report:\n # Sort the samples in the report based on the closest refseq genus e.g. all samples with the same genus\n # will be grouped together in the report\n for genus in genera:\n for sample in metadata:\n if sample.general.closestrefseqgenus == genus:\n if sample[analysistype].createreport:\n sample[analysistype].totaldepth = list()\n # Add the sample to the report if it matches the current genus\n data += '{},{},'.format(sample.name, genus)\n # Initialise a variable to store the number of GDCS genes were matched\n count = 0\n # As I want the count to be in the report before all the gene results, this string will\n # store the specific sample information, and will be added to data once count is known\n specific = str()\n for gene in sorted(gdcs):\n # As there are different genes present in the GDCS databases for each organism of\n # interest, genes that did not match because they're absent in the specific database are\n # indicated using an X\n if gene not in [result for result in sample[analysistype].faidict]:\n specific += 'X,'\n else:\n try:\n specific += '{p_id},'.format(p_id=sample[analysistype].blastresults[gene])\n # Report the necessary information for each gene result\n count += 1\n # If the gene was missing from the results attribute, add a - to the cell\n except (KeyError, AttributeError):\n specific += '-,'\n # Determine whether the sample pass the necessary quality criteria:\n # Pass, all GDCS, mean coverage greater than 20X coverage;\n # ?: Indeterminate value;\n # -: Fail value\n # Allow one missing GDCS to still be considered a pass\n if count >= len(sample[analysistype].faidict) - 1:\n quality = '+'\n else:\n quality = '-'\n # Add the count, mean depth with standard deviation, the pass/fail determination,\n # and the total number of GDCS genes as well as the results\n data += '{hits}/{total},{fail},{gdcs}\\n'\\\n .format(hits=str(count),\n total=len(sample[analysistype].faidict),\n fail=quality,\n gdcs=specific)\n # Any samples with a best assembly of 'NA' are considered incomplete.\n else:\n data += '{},{},,,-\\n'.format(sample.name, sample.general.closestrefseqgenus)\n elif sample.general.closestrefseqgenus == 'NA':\n data += '{}\\n'.format(sample.name)\n # Write the header and data to file\n report.write(header)\n report.write(data)\n # Return the updated metadata object\n return metadata",
"def get_exposure(exposure_id,b_mean,b_sd,c_mean,c_sd,non_rate,dist_type,mortalities):#id in db\n\te_id \t\t= int(long(exposure_id))\n\texposure_outcomes = DBHelper.exposure_outcome\n\toutcome_ids \t= DBHelper.exposure_outcome.get(e_id)\n\n\tsamples_rr \t= DBHelper.samples_rr.get(e_id)\n\tsamples_pop \t= DBHelper.samples_pop.get(e_id)\n\trisks \t\t= DBHelper.risks.get(e_id)\n\tmeasure \t= DBHelper.measures.get(e_id)\n\tdist_type \t= get_dist_type(e_id)\n\n\t#get population distribution \n\tpopDistribution = PopDistribution(DBHelper.age_group_num,non_rate,b_mean,b_sd,c_mean,c_sd,samples_pop,dist_type)\n\n\t#get outcomes\n\toutcomes = []\n\tfor o_id in outcome_ids:\n\t\t# mortality\n\t\tm_mortality = mortalities.get(2*o_id)\n\t\tf_mortality = mortalities.get(2*o_id+1)\n\t\t# risks\n\t\tm_risks = risks.get(2*o_id)\n\t\tf_risks = risks.get(2*o_id+1)\n\t\t# outcome name\n\t\tname = DBHelper.get_outcome_name(o_id)\n\t\t# limit estimates\n\t\tlle = DBHelper.exposure_outcome.get(e_id).get(o_id)[0]\n\t\tule = DBHelper.exposure_outcome.get(e_id).get(o_id)[1]\n\t\t# outcome\n\t\toutcome = PrimeOutcome(name,o_id,m_mortality,f_mortality,samples_rr,m_risks,f_risks,lle,ule,measure,e_id) \n\t\toutcomes.append(outcome)\n\n\texposure = PrimeExposure(mortalities,outcome_ids,samples_rr,samples_pop,outcomes,popDistribution)\n\treturn exposure"
] | [
"0.56230086",
"0.53120613",
"0.5277913",
"0.5180252",
"0.51693785",
"0.507765",
"0.5014216",
"0.49858776",
"0.4975915",
"0.49524066",
"0.49363998",
"0.49002153",
"0.48908767",
"0.48766443",
"0.48478845",
"0.48225546",
"0.48033267",
"0.4801104",
"0.4785761",
"0.4774491",
"0.47706035",
"0.47638518",
"0.47355726",
"0.47251192",
"0.4722997",
"0.47166985",
"0.4715812",
"0.47015214",
"0.46844786",
"0.46775067"
] | 0.7026445 | 0 |
Following Nathaniel's Vitamin A multiplicative model writeup Calculates, in draw space, the PAF of some outcome 'o' (such as NTD incidence) given some dichomotous exposure 'r' (such as a lack of folic acid fortification) | def paf_o_r(rr_o_r, alpha):
return ((rr_o_r - 1) * (1 - alpha)) / ((rr_o_r - 1) * (1 - alpha) + 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prf(stats):\n if stats['pred'] == 0:\n return 0, 0, 0\n p = stats['corr']/stats['pred']\n r = stats['corr']/stats['gold']\n if p > 0 and r > 0:\n f = 2*p*r/(p+r)\n else:\n f = 0\n return p, r, f",
"def pif_o_r(paf_o_r, alpha, alpha_star):\n return paf_o_r * ((alpha_star - alpha) / (1 - alpha))",
"def afprob (dfnum, dfden, F):\r\n if type(F) == N.ndarray:\r\n return abetai(0.5*dfden, 0.5*dfnum, dfden/(1.0*dfden+dfnum*F))\r\n else:\r\n return abetai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))",
"def compute_prf_on_selection(arts, forms_set):\n tp=0\n fn=0\n fp=0\n for article in arts:\n for entity in article.entity_mentions:\n if entity.mention in forms_set:\n if entity.gold_link==entity.sys_link:\n tp+=1\n else:\n if entity.sys_link!='--NME--':\n fp+=1\n if entity.gold_link!='--NME--':\n fn+=1\n print(tp, fp, fn)\n p=tp/(tp+fp)\n r=tp/(tp+fn)\n f1=2*p*r/(p+r)\n print(p,r,f1)\n return f1",
"def calculate_f_p(genes, gene_abundance_file, gene_molecular_weight_file):\n gene_abundance = pd.read_csv(gene_abundance_file, index_col=0)\n gene_molecular_weight = json_load(gene_molecular_weight_file)\n enzy_abundance = 0\n pro_abundance = 0\n for gene_i in gene_abundance.index:\n if gene_i in gene_molecular_weight.keys():\n abundance = gene_abundance.loc[gene_i, 'abundance'] * \\\n gene_molecular_weight[gene_i]/1000\n pro_abundance += abundance\n if gene_i in genes.index:\n enzy_abundance += abundance\n f = enzy_abundance/pro_abundance\n return f",
"def calories_protein(og, fg):\n\n return 0.994 * fg * real_extract(og, fg)",
"def proba_fm(m_pred,f_pred, dataset):\n p = np.zeros(10)\n if dataset == 'cifar10':\n for i in range(10):\n if i <4:\n if i <2:\n p[i] = (m_pred[0])*(f_pred[i]/np.sum(f_pred[0:2]))\n else:\n p[i] = (m_pred[1])*(f_pred[i]/np.sum(f_pred[2:4]))\n else:\n if i <6:\n p[i] = (m_pred[2])*(f_pred[i]/np.sum(f_pred[4:6]))\n elif i <8:\n p[i] = (m_pred[3])*(f_pred[i]/np.sum(f_pred[6:8]))\n else:\n p[i] = (m_pred[4])*(f_pred[i]/np.sum(f_pred[8:]))\n else :\n for i in range(10):\n if i <5:\n if i <3:\n p[i] = (m_pred[0])*(f_pred[i]/np.sum(f_pred[0:3]))\n else:\n p[i] = (m_pred[1])*(f_pred[i]/np.sum(f_pred[3:5]))\n else:\n if i <8:\n p[i] = (m_pred[2])*(f_pred[i]/np.sum(f_pred[5:8]))\n else:\n p[i] = (m_pred[3])*(f_pred[i]/np.sum(f_pred[8:]))\n return(p)",
"def OOM_approach(bolo_name, exposure):\n\n\th, hfile = PyRPl.open_ROOT_object(\"../ROOT_files/Axion/Spec/\" + bolo_name + \"_spec_perkeV.root\", \"h_\" + bolo_name)\n\tfflux, file_flux = PyRPl.open_ROOT_object(\"../ROOT_files/Axion/CBRD_convolved/\" + bolo_name + \"_flux.root\", bolo_name + \"_flux_mass_0\")\n\n\n\tclass Flux:\n\t\tdef __call__( self, x, par ):\n\t\t\treturn exposure*(par[0]**4)*fflux.Eval(x[0])\n\n\tnorm_flux = TF1(\"flux\", Flux(), 0, 12, 1)\n\tnorm_flux.SetParameter(0,20E-12)\n\n\th.Draw()\n\tnorm_flux.Draw(\"same\")\n\traw_input()",
"def fObs(p):\n f = 0\n for obs in obstacles(p):\n f += fPenalty(obs[0])\n return f",
"def aeroFus(F,alpha,beta,rp,V0=50.):\r\n a = alpha * m.pi/180;\r\n S_Cyl = 0.25 * m.pi * F.cD ** 2;\r\n f_F = (F.cL + F.bL + F.nL) / F.cD;\r\n FF = 1. + 2.2/(f_F ** (1.5)) - 0.9/(f_F ** (3.));\r\n gen = F.getGen();\r\n x = np.concatenate([np.linspace(0,gen[17,0],50), np.linspace(gen[17,0],gen[18,0],100),np.linspace(gen[18,0],gen[19,0],100)]);\r\n ReX = V0*x/(1.57e-5);\r\n delta = np.concatenate([[0.],4.92*x[1:50]/(ReX[1:50]**(0.5)),4.92*x[50]/(ReX[50]**(0.5))+0.35*(x[50:]-x[50])/(ReX[50:]**(0.2))]);\r\n rayon = np.interp(x,gen[:,0],gen[:,1])+delta;\r\n S = m.pi * rayon ** 2;\r\n cp_X = (x[1:]+x[:-1])*0.5;\r\n dS = S[1:]-S[:-1];\r\n \r\n CN_lin = np.sin(2.*a) * S[-1];\r\n CX_lin = np.sin(a)**2 * S[-1];\r\n L_lin = CN_lin * np.cos(a) - CX_lin * np.sin(a);\r\n D_lin = CN_lin * np.sin(a) + CX_lin * np.cos(a);\r\n M_lin = -np.sin(2.*a) * np.sum(dS*cp_X);\r\n ReF = V0 * cp_X /(1.57e-5);\r\n CF = np.concatenate([1.328/(ReF[:49]**0.5), 0.0442/(ReF[49:]**(1./6.))]);\r\n dX = x[1:]-x[:-1];\r\n r2 = np.interp(x,gen[:,0],gen[:,1]);\r\n ds = ((r2[1:]-r2[:-1])**2 + dX ** 2) ** (0.5);\r\n rMoy = (r2[1:]+r2[:-1])*0.5;\r\n dSW = 2*m.pi*ds*rMoy;\r\n Frot = np.sum(CF*dSW)*FF;\r\n D_Culot = 0.14*0.25 * m.pi * F.bD**2; # Lecture Aerodynamics 2A : slender body\r\n if F.bL/F.cD < 0.8:\r\n D_con = 1.4*S_Cyl;\r\n else:\r\n D_con = 1.4 * m.exp(-((F.bL/F.cD)-0.8)*3./0.8) * S_Cyl;\r\n D_windscreen = S_Cyl * 2.e-3;\r\n D_par = (Frot+D_Culot + D_con + D_windscreen);\r\n# D_par = (Frot + D_con + D_windscreen);\r\n Cx = 1.2; # Allen ensures that for transversal flow ok if no compressible effect\r\n L_visc = np.sin(a) * np.abs(np.sin(a)) * np.cos(a) * Cx * np.sum(2.*dX*rMoy);\r\n D_visc = np.abs(np.sin(a) ** 3) * Cx * np.sum(2.*dX*rMoy);\r\n M_visc = - Cx * np.abs(np.sin(a)) * np.sin(a) * np.sum(2.*dX*rMoy*cp_X);\r\n \r\n L = L_lin + L_visc - D_par * np.sin(a);\r\n D = D_lin + D_visc + D_par * np.cos(a);\r\n Moment = M_lin + M_visc;\r\n d = ((rp[0] + F.hDist) ** 2 + (rp[2]+F.vDist)**2)**(0.5);\r\n TP = np.arctan2(-(rp[2]+F.vDist),(rp[0]+F.hDist));\r\n M = Moment + d * L * np.cos(TP+a) + d * D * np.sin(TP + a);\r\n \r\n beta *= m.pi/180.;\r\n CN_lin = np.sin(2.*beta) * S[-1];\r\n CX_lin = np.sin(beta)**2 * S[-1];\r\n Y_lin = - CN_lin * np.cos(beta) + CX_lin * np.sin(beta);\r\n D_lin = CN_lin * np.sin(beta) + CX_lin * np.cos(beta);\r\n N_lin = np.sin(2.*beta) * np.sum(dS*cp_X);\r\n Y_visc = - np.sin(beta) * np.abs(np.sin(beta)) * np.cos(beta) * Cx * np.sum(2.*dX*rMoy);\r\n D_visc = np.abs(np.sin(beta) ** 3) * Cx * np.sum(2.*dX*rMoy);\r\n N_visc = Cx * np.abs(np.sin(beta)) * np.sin(beta) * np.sum(2.*dX*rMoy*cp_X);\r\n N = N_lin + N_visc;\r\n Y = Y_lin + Y_visc + D_par * np.sin(beta);\r\n Dt = D_lin + D_visc;\r\n D += Dt;\r\n dv = rp[0] + F.hDist;\r\n dvt = F.bL+F.cL+F.nL - dv;\r\n TPv = np.arctan2(-rp[1],(rp[0]+F.hDist));\r\n N += dv * (Y_lin + Y_visc) * m.cos(TPv - beta) + dv * Dt * m.sin(TPv-beta) + D_par * np.sin(beta) * dvt ;\r\n Y = 0.; \r\n return L,D,M,Y,N",
"def real_attenuation(og, fg):\n\n oe = sg2plato(og)\n re = real_extract(og, fg)\n return (oe - re) / oe * 100.",
"def setup_fpa():\n # it is a silicon detector. Based on the graph, the quantum efficiency\n # at 1.06 um is ~50%.\n fpa = {}\n fpa[\"quantum_efficiency\"] = 0.5\n return fpa",
"def _wf(self, p):\n r = self.faces - p\n n = norm(r, axis=2)\n num = row_wise_dot(r[:, 0, :], np.cross(r[:, 1, :], r[:, 2, :]))\n den = n[:, 1] * n[:, 2] * n[:, 0]\n for i in range(3):\n j = (i + 1) % 3\n k = (i + 2) % 3\n den += row_wise_dot(r[:, i, :], r[:, j, :]) * n[:, k]\n return 2*np.arctan2(num, den)",
"def f1_score(confusion):\n p = precision(confusion)\n r = sensitivity(confusion)\n F1 = (2 * p * r) / (p + r)\n return F1",
"def calf_f1(annotated_Y, predicted_Y):\n\n POSITIVE = ADR_MENTION_CLASS_LABEL\n NEGATIVE = NON_ADR_MENTION_CLASS_LABEL\n\n tp = 0\n fp = 0\n fn = 0\n tn = 0\n\n total_actual_positives = 0\n total_actual_negatives = 0\n\n for index, actual in enumerate(annotated_Y):\n predicted = predicted_Y[index]\n\n if actual == POSITIVE:\n total_actual_positives += 1\n\n if predicted == POSITIVE:\n tp += 1\n elif predicted == NEGATIVE:\n fn += 1\n\n elif actual == NEGATIVE:\n total_actual_negatives += 1\n\n if predicted == POSITIVE:\n fp += 1\n elif predicted == NEGATIVE:\n tn += 1\n\n if (tp+fp) == 0:\n precision = 0\n else:\n precision = tp/(tp+fp)\n\n if (tp+fn) == 0:\n recall = 0\n else:\n recall = tp/(tp+fn)\n\n if (precision+recall) == 0:\n f1 = 0\n else:\n f1 = 2*precision*recall/(precision+recall)\n\n # print(\"Total labels: {}, total actual positives: {}, total_actual_negatives: {}\".format(len(predicted_Y), total_actual_positives, total_actual_negatives))\n # print(\"tp: {}, tn: {}, fp: {}, fn: {}\".format(tp, tn, fp, fn))\n # print(\" Accuracy: {}\".format((tp+tn)/(len(test_Y))))\n print(\" Precision: {}\".format(precision))\n print(\" Recall: {}\".format(recall))\n print(\" F1: {}\".format(f1))",
"def proba_fc(c_pred,f_pred,dataset):\n p = np.zeros(10)\n for i in range(10):\n if dataset =='cifar10':\n if i <4:\n p[i] = (c_pred[0])*(f_pred[i]/np.sum(f_pred[0:4]))\n else:\n p[i] = (c_pred[1])*(f_pred[i]/np.sum(f_pred[4:]))\n else:\n if i<5:\n p[i] = (c_pred[0])*(f_pred[i]/np.sum(f_pred[0:5]))\n else:\n p[i] = (c_pred[1])*(f_pred[i]/np.sum(f_pred[5:]))\n return(p)",
"def prf_analysis(y_true: list, y_pred: list) -> None:\n print('Precision: {:,.2f}'.format(precision_score(y_true, y_pred)))\n print('Recall : {:,.2f}'.format(recall_score(y_true, y_pred)))\n print('F1 : {:,.2f}'.format(f1_score(y_true, y_pred)))\n print('Accuracy : {:,.2f}'.format(accuracy_score(y_true, y_pred)))\n return None",
"def real_extract(og, fg):\n\n oe = sg2plato(og)\n ae = sg2plato(fg)\n q = 0.22 + 0.001 * oe\n return (q * oe + ae) / (1 + q)",
"def fAVM(RHOB,Dw,Ds,Df,Dc1,PHIc1,Ck,Dk,PHIk,RSK):\n#\n# 5.1.1 Initialise Outputs & Check for missing values in inputs:\n# --------------------------------------------------------------\n\tPHIt=MissingValue\n\tPHIe=MissingValue\n\tCBW=MissingValue\n\tBVW=MissingValue\n\tHCPV=MissingValue\n\tVf=MissingValue\n\tVs=MissingValue\n\tSwt=MissingValue\n\tSwe=MissingValue\n\tVc1=MissingValue\n\tVc2=MissingValue\n\tVc3=MissingValue\n\tVk=MissingValue\n\tToc=MissingValue\n\tQc=MissingValue\n\tGDen=MissingValue\n\tif MissingValue in (RHOB,Dw,Ds,Df,Dc1,PHIc1,Ck,Dk,PHIk,RSK):\n\t\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Vs,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen\n#\n# 5.1.2 Initialise parameters:\n# ----------------------------\n\tNIter=0\n\tNIterMax=100\n\tErrIter=10000\n\tTolErrIter=0.0001\n\tIterEnd=0\n\tVk=0.000 # Initially assumme no kerogen\n\tDh=Df\n#\n#\t5.1.3 Start interative loop:\n#\t-----------------------------\n\twhile IterEnd==0:\n#\n# 5.5.3.1 Organic and Inorganic Component Density Values:\n# -------------------------------------------------------\n\t\tDBI=(1-PHIc1)*Dc1+(PHIc1*Dw) # Bulk Density of Inorganic Component\n\t\tDBO=(1-PHIk)*Dk+(PHIk*Dh)# Bulk Density of Organic Component\n#\n# 5.1.3.2 Compute Volume of Organic and Inorganic Component:\n# ----------------------------------------------------------\n\t\tVOR=(DBI-RHOB)/(DBI-DBO)\n\t\tVOR=ImposeLimits(VOR,0,1)\n\t\tVIN=(1-VOR)\n#\n# 5.1.3.3 Compute Volumetrics, Total & Effective Porosity and Total & Effective Water Saturation:\n# ---------------------------------------\t-------------------------------------------------------\n\t\tVc1=VIN*(1-PHIc1)\n\t\tVc2=0.000\n\t\tVc3=0.000\n\t\tVk=VOR*(1-PHIk)\n\t\tPHIt=VIN*PHIc1+VOR*PHIk\n\t\tPHIe=VOR*PHIk\n\t\tSwt=1-((VOR*PHIk)/PHIt)\n\t\tSwt=ImposeLimits(Swt,0,1)\n\t\tSwe=0.000\n\t\tSxot=Swt\n\t\tSxoe=Swe\n#\n# 5.1.3.4 Compute Bulk Volume of Water, Hydrocarbon Pore Volume and Pore Space Fluid Properties:\n# ---------------------------------------\t------------------------------------------------------\n\t\tBVW=PHIe*Swe\n\t\tHCPV=PHIe*(1-Swe)\n\t\tVs=RSK*Vk # Estimate volume of adsorbed (sorbed) hydrocarbon\n\t\tVs=ImposeLimits(Vs,0,HCPV)\n\t\tVf=(HCPV-Vs)\n\t\tVf=ImposeLimits(Vf,0,(HCPV-Vs))\n#\n# 5.1.3.5 Recompute hydrocarbon properties in the pore space:\n# -----------------------------------------------------------\n\t\tSum=Vs+Vf\n\t\tif(Sum<=0.000):\n\t\t\tDh=Df\n\t\telse:\n\t\t\tDh=(Ds*Vs+Df*Vf)/(Vs+Vf)\n#\n# 5.1.4 Test for interative computations:\n# ---------------------------------------\n\t\tNIter=NIter+1\n\t\tif(NIter>=NIterMax):\n\t\t\tIterEnd=1\n\t\telse:\t\t\t\n\t\t\tif(NIter<=2):\n\t\t\t\tResultOld=[1,1,1,1,1,1,1,1,1] # Initial Setting\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Vs,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\telse:\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Vs,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\t\tif(ErrIter<=TolErrIter):\n\t\t\t\t\tIterEnd=1\n#\n# 5.1.6 Preoutput computations:\n# ------------------------------\n\tQc=MissingValue\n\tDc2=0.00\n\tDc3=0.00\n\tCBW=PHIt-PHIe # The assumption is that all microporosity can be considered to be clay bound water.\n\tToc=fToc_Wtf(Vc1,Vc2,Vc3,Vk,0,Ck,Dc1,Dc2,Dc3,Dk,Dw) # TOC-wt fraction. Note: Vrw=0 in fToc_Wtf(Vc1,Vc2,Vc3,Vk,Vrw,Ck,Dc1,Dc2,Dc3,Dk,Dw)\n\tGDen=fOrmGDen(Vc1,Vc2,Vc3,Vk,0,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density. Note: Vrw=0 in fOrmGDen(Vc1,Vc2,Vc3,Vk,Vrw,Dc1,Dc2,Dc3,Dk,Dw)\n#\n# 5.5.7 Output Results:\n# \t-------------------\n\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Vs,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen",
"def fmeasure(B, hits, misses, falses) :\r\n x = ((1 + B**2) * hits) / ((1 + B**2) * hits + B**2 * misses + falses)\r\n return x",
"def fmeasure(B, hits, misses, falses) :\n x = ((1 + B**2) * hits) / ((1 + B**2) * hits + B**2 * misses + falses)\n return x",
"def outputFO(self, outfile):\n\n#fof(axiom_0,axiom,\n# ( ! [V2] : ? [V1]:\n# ( p(V2) | p(V1) ) & (~p(V2) | ~p(V1) ) & ( p(V2) | ~p(V1) ) )).\n# outfile.write(\"\"\"cnf(rule_true,axiom, p(1)).\n#cnf(rule_false,axiom, ~p(0)).\n#\"\"\")\n outfile.write(\"fof(quant,axiom,(\\n\\t\")\n for q in self.__quantifierList:\n\n if q.qtype == \"a\":\n outfile.write(\" ! \")\n elif q.qtype == \"e\":\n outfile.write(\" ? \")\n variables = [\"V%d\" % x for x in q.getVariableNames()]\n \n outfile.write(\"[ %s ] : \\n\\t\" % \",\".join(variables))\n clauselist = [] \n outfile.write(\" ( \\n\\t p(true) & ~p(false) & \\n \") \n for c in self.__clauseList:\n clause = []\n clause.append(\"( \")\n vlist = []\n for var in c.varlist:\n if var.inv:\n vlist.append(\"~p(V%s)\" % var.name)\n else:\n vlist.append(\" p(V%s)\" % var.name)\n clause.append(\" | \".join(vlist))\n clause.append(\") \")\n clauselist.append(\"\".join(clause))\n outfile.write(\"\\n\\t & \".join(clauselist))\n outfile.write(\" ) \");\n outfile.write(\"\\n)).\")",
"def calcAVO(velp1,velp2,vels1,vels2,rho1,rho2,model='akirichards3'):\n out=np.zeros([np.size(velp1),9])\n out[:,0]=velp2-velp1\n out[:,1]=vels2-vels1\n out[:,2]=rho2-rho1\n out[:,3]=(velp2+velp1)/2.0\n out[:,4]=(vels2+vels1)/2.0\n out[:,5]=(rho2+rho1)/2.0\n modelAVOAkiRichards3(out)\n modelFattiRpRs(out)\n return out",
"def responseProb(obs, dt, n1, n2, pc, scaling, prevInternalState, reward, costM, costS,\n pRes): \n #0 is default, 1 is cue\n respond = 2; internalState = np.nan; payofftoA = 0; payofftoD = 0\n p = np.full((len(obs)+1,2), np.nan) #array of posterior prob for default, cue\n fs = np.full((len(obs)+1,2), np.nan) #array of scaled f values for default, cue\n \n transition1 = np.array([[1, 0],[0,1]]) #transition probabilities in general\n e = np.array([[n1,1-n1],[1-n2,n2]]) #emission probabilities\n foreperiodSteps = int((6/dt)+1)\n \n \n fs[0,:] = np.array([1,0])\n p[0,:] = fs[0,:]/np.sum(fs[0,:])\n \n #inference process \n for i in range(len(obs)):\n if i < foreperiodSteps:\n r = 1/(foreperiodSteps-i)\n #print(r, i, sep= ' ')\n transition2 = np.array([[1-pc*r,pc*r],[0,1]])\n #transition probability in foreperiod, before transition\n fs[i+1, :] = scaling*e[:,int(obs[i])]*np.matmul(fs[i,:], transition2)\n #calculaitng joint probabilities\n else:\n fs[i+1, :] = scaling*e[:,int(obs[i])]*np.matmul(fs[i,:], transition1)\n #calculaitng joint probabilities\n \n p[i+1, :] = fs[i+1,:]/np.sum(fs[i+1,:]) #posterior probabilites\n \n #response process\n \n #calculating payoffs\n if prevInternalState == 'default' :\n payofftoA = p[len(obs),1]*pRes[1,1]*reward + p[len(obs),0]*pRes[0,1]*reward - costS\n payofftoD = p[len(obs),0]*pRes[0,0]*reward + p[len(obs),1]*pRes[1,0]*reward\n elif prevInternalState == 'active' :\n payofftoA = p[len(obs),1]*pRes[1,1]*reward + p[len(obs),0]*pRes[0,1]*reward - costM\n payofftoD = p[len(obs),0]*pRes[0,0]*reward + p[len(obs),1]*pRes[1,0]*reward\n \n \n #deciding internal state based on payoffs\n if payofftoA > payofftoD :\n internalState = 'active'\n k = np.random.binomial(1,pRes[1,1]) #probabilistic response in A\n if k == 1:\n respond = 1\n elif k == 0:\n respond = 0\n \n elif payofftoA < payofftoD :\n internalState = 'default'\n k = np.random.binomial(1,pRes[0,0]) #probabilistic response in D\n if k == 1:\n respond = 0\n elif k == 0:\n respond = 1\n \n \n return respond, internalState, p",
"def calc_eta_FC(Q_load_W, Q_design_W, phi_threshold, approach_call):\n phi = 0.0\n\n ## Approach A - NREL Approach\n if approach_call == \"A\":\n\n phi = float(Q_load_W) / float(Q_design_W)\n eta_max = 0.425 # from energy.gov\n\n if phi >= phi_threshold: # from NREL-Shape\n eta_el = eta_max - ((1 / 6.0 * eta_max) / (1.0 - phi_threshold)) * abs(phi - phi_threshold)\n\n if phi < phi_threshold:\n if phi <= 118 / 520.0 * phi_threshold:\n eta_el = eta_max * 2 / 3 * (phi / (phi_threshold * 118 / 520.0))\n\n if phi < 0.5 * phi_threshold and phi >= 118 / 520.0 * phi_threshold:\n eta_el = eta_max * 2 / 3.0 + \\\n eta_max * 0.25 * (phi - phi_threshold * 118 / 520.0) / (phi_threshold * (0.5 - 118 / 520.0))\n\n if phi > 0.5 * phi_threshold and phi < phi_threshold:\n eta_el = eta_max * (2 / 3.0 + 0.25) + \\\n 1 / 12.0 * eta_max * (phi - phi_threshold * 0.5) / (phi_threshold * (1 - 0.5))\n\n eta_therm_max = 0.45 # constant, after energy.gov\n\n if phi < phi_threshold:\n eta_therm = 0.5 * eta_therm_max * (phi / phi_threshold)\n\n else:\n eta_therm = 0.5 * eta_therm_max * (1 + eta_therm_max * ((phi - phi_threshold) / (1 - phi_threshold)))\n\n ## Approach B - Empiric Approach\n if approach_call == \"B\":\n\n if Q_design_W > 0:\n phi = float(Q_load_W) / float(Q_design_W)\n\n else:\n phi = 0\n\n eta_el_max = 0.39\n eta_therm_max = 0.58 # * 1.11 as this source gives eff. of HHV\n eta_el_score = -0.220 + 5.277 * phi - 9.127 * phi ** 2 + 7.172 * phi ** 3 - 2.103 * phi ** 4\n eta_therm_score = 0.9 - 0.07 * phi + 0.17 * phi ** 2\n\n eta_el = eta_el_max * eta_el_score\n eta_therm = eta_therm_max * eta_therm_score\n\n if phi < 0.2:\n eta_el = 0\n\n return eta_el, eta_therm",
"def f1_score(confusion):\n s = np.power(sensitivity(confusion), -1)\n p = np.power(precision(confusion), -1)\n return 2 / (s + p)",
"def prop(q1,abcd,mode=[0,0],p1=1):\n\n A=abcd[0][0]\n B=abcd[0][1]\n C=abcd[1][0]\n D=abcd[1][1]\n \n n=mode[0]\n m=mode[1]\n \n q = (A*q1 + B)/(C*q1 + D)\n p = p1*np.exp(1j*np.angle(1/(A+B/q1)**(1+n+m)))\n \n return q,p",
"def proba(c_pred,m_pred,f_pred, dataset):\n p = np.zeros(10)\n if dataset == 'cifar10':\n for i in range(10):\n if i <4:\n if i <2:\n p[i] = c_pred[0]*(m_pred[0]/(m_pred[0]+m_pred[1]))*(f_pred[i]/np.sum(f_pred[0:2]))\n elif i <4:\n p[i] = c_pred[0]*(m_pred[1]/(m_pred[0]+m_pred[1]))*(f_pred[i]/np.sum(f_pred[2:4]))\n if i >=4:\n if i <6:\n p[i] = c_pred[1]*(m_pred[2]/(m_pred[2]+m_pred[3]+m_pred[4]))*(f_pred[i]/np.sum(f_pred[4:6]))\n elif i <8:\n p[i] = c_pred[1]*(m_pred[3]/(m_pred[2]+m_pred[3]+m_pred[4]))*(f_pred[i]/np.sum(f_pred[6:8]))\n elif i <10:\n p[i] = c_pred[1]*(m_pred[4]/(m_pred[2]+m_pred[3]+m_pred[4]))*(f_pred[i]/np.sum(f_pred[8:10]))\n else :\n for i in range(10):\n if i <5:\n if i <3:\n p[i] = c_pred[0]*(m_pred[0]/(m_pred[0]+m_pred[1]))*(f_pred[i]/np.sum(f_pred[0:3]))\n elif i <5:\n p[i] = c_pred[0]*(m_pred[1]/(m_pred[0]+m_pred[1]))*(f_pred[i]/np.sum(f_pred[3:5]))\n if i >=5:\n if i <8:\n p[i] = c_pred[1]*(m_pred[2]/(m_pred[2]+m_pred[3]))*(f_pred[i]/np.sum(f_pred[5:8]))\n elif i <10:\n p[i] = c_pred[1]*(m_pred[3]/(m_pred[2]+m_pred[3]))*(f_pred[i]/np.sum(f_pred[8:]))\n return(p)",
"def fac2(actual: np.ndarray, predicted: np.ndarray):\n frac = predicted / actual\n return ((0.5 <= frac) & (frac <= 2.0)).sum() / len(predicted)",
"def fitfunc_AP(x, *p):\n val = p[0]\n for n in range(0, len(p) - 1, 2):\n ind = n + 1\n mode = (n / 2) + 1\n val = val + p[ind] * np.cos(2 * np.pi * mode * (x - p[ind + 1]) / 360.0)\n return val"
] | [
"0.6252921",
"0.6047141",
"0.5921848",
"0.5919183",
"0.58074903",
"0.580166",
"0.573012",
"0.57079667",
"0.5702805",
"0.56922394",
"0.5558143",
"0.5557235",
"0.5544737",
"0.5510745",
"0.5489067",
"0.54731345",
"0.54717034",
"0.5446004",
"0.5440856",
"0.54358524",
"0.54325265",
"0.5430739",
"0.54278034",
"0.54063565",
"0.5402559",
"0.5398235",
"0.5394714",
"0.5393119",
"0.5390012",
"0.53852546"
] | 0.61302215 | 1 |
a pd.DataFrame() with age_group_id age_group_name age_start float for neonatal + 1 to 4 year old age groups | def pull_u5_age_groups_formatted():
age_start_map = {
'Early Neonatal': 0,
'Late Neonatal': 7/365,
'Post Neonatal': 28/365,
'1 to 4': 1
}
age_end_map = {
'Early Neonatal': 7/365,
'Late Neonatal': 28/365,
'Post Neonatal': 365/365,
'1 to 4': 5
}
# pull age
age_groups = get_ids("age_group")
age_groups = age_groups[age_groups.age_group_id.isin([2, 3, 4, 5])]
age_groups['age_start'] = age_groups.age_group_name.map(age_start_map)
age_groups['age_end'] = age_groups.age_group_name.map(age_end_map)
return age_groups | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_df():\n df = load_df_from_files()\n df = clean_df(df)\n df = expand_df_dates(df)\n df[\"age_at_t\"] = ((df[\"date\"] - df[\"birthday\"]) / 365).dt.days # Yeah, this is weird.\n return df",
"def age_binning(df):\n\n # bins that age is sorted into\n age_bins = np.append(np.array([0,1,4]), np.arange(10, 116, 5)) # 0, 1, 4 do not follow the 5 year bin pattern.\n\n # labels for age columns are the lower and upper ages of bin\n age_start_list = np.append(np.array([0,1]), np.arange(5, 111, 5))\n age_end_list = np.append(np.array([1]), np.arange(4, 116, 5))\n\n # Create 2 new age columns\n df['age_start'] = pd.cut(df['age'], age_bins, labels = age_start_list, right=False)\n df['age_end'] = pd.cut(df['age'], age_bins, labels = age_end_list, right = False)\n\n # Drop age variable\n df.drop('age', 1, inplace=True)\n\n # return dataframe with age_start,age_end features\n return (df)",
"def create_age_buckets(df):\r\n print(df['age'].min())\r\n print(df['age'].max())\r\n old_values = df['age'].values.tolist()\r\n new_age_list = []\r\n new_age = 0\r\n for age in old_values:\r\n if age in range(18, 30):\r\n new_age = 1\r\n elif age in range(30, 40):\r\n new_age = 2\r\n elif age in range(40, 50):\r\n new_age = 3\r\n elif age in range(50, 65):\r\n new_age = 4\r\n elif age in range(65, 70):\r\n new_age = 5\r\n else:\r\n new_age = 6\r\n new_age_list.append(new_age)\r\n age_df = DataFrame(new_age_list, columns=['Age_Buckets'])\r\n df = concat([age_df, df], axis=1)\r\n return df",
"def meetup_groups_growth():\n # groups we are interested in.\n groups = list(Group.objects.filter(country='PT').order_by('created'))\n\n # take the start and end dates for the group dynamic\n start = groups[0].created\n end = datetime.datetime.utcnow().replace(tzinfo=start.tzinfo)\n\n # counter, which will have a structure like this:\n # {\"Lisbon\": {\"2012-01-01\": 1, \"2012-02-01\": 5}}\n # every edge value represents the number of groups in the specific city\n # for a specific moment in time\n group_counter = defaultdict(lambda: Counter())\n\n # date range, essentially a list with datetimes for every month\n # from start to end\n idx = pd.date_range(start=start, end=end, freq='M', normalize=True)\n\n # populate the counter\n for moment in idx:\n for group in groups:\n if group.created <= moment:\n group_counter[group.city][moment] += 1\n\n # convert this to a dataframe and replace all missing values with 0's\n df = pd.DataFrame.from_dict(group_counter).fillna(0).astype(int)\n # sort columns from \"most active\" to \"least active\" city at the moment\n df = df.sort_values(axis=1, by=df.index[-1], ascending=False)\n return df",
"def _transform_age_feature(df):\n df = df.apply(_build_age_range, axis='columns')\n dummies_age = pd.get_dummies(df['Age'], prefix='Age')\n print(\"For dataset with shape {}, the dummies for 'Age' are: {}\".format(df.shape, dummies_age.columns))\n df = pd.concat([df, dummies_age], axis=1)\n\n # Ensure that all dummies are created and that 'Training' and 'Test' datasets will have same number of columns. In\n # our case, 'Age_8' will not be created for 'Test' dataset. We could create it by hand but it is more robust to test\n # all cases\n # For 'Age', range has been splitted in 8\n for i in range(8):\n if 'Age_{}'.format(i) not in df:\n df['Age_{}'.format(i)] = 0\n\n return df",
"def _age_bins(df):\n df['age_9'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (4, 9)]].sum(axis=1))\n df['age_19'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (14, 17, 19)]].sum(axis=1))\n df['age_29'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (20, 21, 24, 29)]].sum(axis=1))\n df['age_39'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (34, 39)]].sum(axis=1))\n df['age_49'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (44, 49)]].sum(axis=1))\n df['age_59'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (54, 59)]].sum(axis=1))\n df['age_69'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (61, 64, 66, 69)]].sum(axis=1))\n df['age_79'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (74, 79)]].sum(axis=1))\n df['age_80_over'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (84, 99)]].sum(axis=1))\n\n df = df.drop([f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (4, 9, 14, 17, 19, 20, 21, 24, 29, 34, 39, 44, 49,\n 54, 61, 64, 66, 69, 74, 79, 84, 99)\n ], axis=1)\n\n return df",
"def age_indicators(self):\n # Load table #\n age_indicators = self.parent.database[\"tblAgeIndicators\"]\n classifr_coefs = self.parent.classifiers_coefs\n # Join\n df = (age_indicators\n .left_join(classifr_coefs, on='user_defd_class_set_id')\n )\n # Place classifiers first\n df = df.set_index(self.parent.classifiers_names).reset_index()\n return df",
"def year_range(df):\n\n if not isinstance(df, pd.DataFrame):\n print(\"year_range was not passed a pandas DataFrame.\")\n return\n\n df['year_start'] = df['year'].min()\n df['year_end'] = df['year'].max()\n df.drop('year' , axis = 1, inplace = True)\n return df",
"def _build_age_range(row):\n val = 0\n if row.Age <= 10:\n val = 0\n elif 10 < row.Age <= 15:\n val = 1\n elif 15 < row.Age <= 25:\n val = 2\n elif 25 < row.Age <= 40:\n val = 3\n elif 40 < row.Age <= 60:\n val = 4\n elif 60 < row.Age <= 70:\n val = 5\n elif 70 < row.Age <= 77:\n val = 6\n elif row.Age > 77:\n val = 7\n elif pd.isnull(row.Age):\n val = 9\n\n row.Age = val\n return row",
"def _year_days(year):\n return pd.DataFrame({'time': pd.date_range(f'{year}-01-01', f'{year}-12-31')})",
"def get_age_fields():\n under_18_fields = CensusFields.get_under_18_fields()\n\n age_18_to_29_fields = [ \n 'B01001_007E', # Male:!!18 and 19 years\n 'B01001_008E', # Male:!!20 years\n 'B01001_009E', # Male:!!21 years\n 'B01001_010E', # Male:!!22 to 24 years\n 'B01001_011E', # Male:!!25 to 29 years\n 'B01001_031E', # Female:!!18 and 19 years\n 'B01001_032E', # Female:!!20 years\n 'B01001_033E', # Female:!!21 years\n 'B01001_034E', # Female:!!22 to 24 years\n 'B01001_035E', # Female:!!25 to 29 years\n ]\n age_30_to_39_fields = [\n 'B01001_012E', # Male:!!30 to 34 years\n 'B01001_013E', # Male:!!35 to 39 years\n 'B01001_036E', # Female:!!30 to 34 years\n 'B01001_037E', # Female:!!35 to 39 years\n ]\n age_40_to_49_fields = [\n 'B01001_014E', # Male:!!40 to 44 years\n 'B01001_038E', # Female:!!40 to 44 years\n 'B01001_015E', # Male:!!45 to 49 years\n 'B01001_039E', # Female:!!45 to 49 years\n\n ]\n age_50_to_59_fields = [\n 'B01001_016E', # Male:!!50 to 54 years\n 'B01001_017E', # Male:!!55 to 59 years\n 'B01001_040E', # Female:!!50 to 54 years\n 'B01001_041E', # Female:!!55 to 59 years\n\n ]\n age_60_to_69_fields = [\n 'B01001_018E', # Male:!!60 and 61 years\n 'B01001_019E', # Male:!!62 to 64 years\n 'B01001_020E', # Male:!!65 and 66 years\n 'B01001_021E', # Male:!!67 to 69 years\n 'B01001_042E', # Female:!!60 and 61 years\n 'B01001_043E', # Female:!!62 to 64 years\n 'B01001_044E', # Female:!!65 and 66 years\n 'B01001_045E', # Female:!!67 to 69 years\n ]\n age_70_to_79_fields = [\n 'B01001_022E', # Male:!!70 to 74 years\n 'B01001_023E', # Male:!!75 to 79 years\n 'B01001_046E', # Female:!!70 to 74 years\n 'B01001_047E', # Female:!!75 to 79 years\n ]\n age_81_plus_fields = [\n 'B01001_024E', # Male:!!80 to 84 years\n 'B01001_025E', # Male:!!85 years and over\n 'B01001_048E', # Female:!!80 to 84 years\n 'B01001_049E', # Female:!!85 years and over\n ]\n \n age_fields = OrderedDict()\n age_fields[ 'age_18_to_29' ] = { 'label': '18-29', 'fields': age_18_to_29_fields }\n age_fields[ 'age_30_to_39' ] = { 'label': '30s', 'fields': age_30_to_39_fields }\n age_fields[ 'age_40_to_49' ] = { 'label': '40s', 'fields': age_40_to_49_fields }\n age_fields[ 'age_50_to_59' ] = { 'label': '50s', 'fields': age_50_to_59_fields }\n age_fields[ 'age_60_to_69' ] = { 'label': '60s', 'fields': age_60_to_69_fields } \n age_fields[ 'age_70_to_79' ] = { 'label': '70s', 'fields': age_70_to_79_fields }\n age_fields[ 'age_81_plus' ] = { 'label': '80+', 'fields': age_81_plus_fields }\n\n return age_fields",
"def get_age_bounds(input_dir):\r\n ages = pd.read_csv(os.path.join(input_dir, \"age_bounds.csv\"))\r\n return ages",
"def get_age_arr(self,renew=False):\n\t\tdset = dispDBase.dispASDF(self.attrs['age_h5'])\n\t\tminlat = self.attrs['minlat']\n\t\tmaxlat = self.attrs['maxlat']\n\t\tminlon = self.attrs['minlon']\n\t\tmaxlon = self.attrs['maxlon']\n\t\tdset.set_poly(self.poly_lst,minlon,minlat,maxlon,maxlat)\n\t\tdset.read_age_mdl()\n\t\tself.create_dataset(name='age_nc_Arr', data=dset.age_data)\n\t\tself.create_dataset(name='age_lon_Vec', data=dset.age_lon)\n\t\tself.create_dataset(name='age_lat_Vec', data=dset.age_lat)\n\t\tfor period in self.attrs['prd_arr']:\n\t\t\tgroup = self['%g_sec'%( period )]\n\t\t\tlons = group['lonArr'].value\n\t\t\tlats = group['latArr'].value\n\t\t\tage_Arr = dset.get_ages(lons.reshape(lons.size),lats.reshape(lats.size))\n\t\t\tage_Arr = age_Arr.reshape(lats.shape)\n\t\t\tmask_age = age_Arr > 180.\n\t\t\tif renew:\n\t\t\t\tdel group['age_Arr']\n\t\t\t\tdel group['age_Arr_msk']\n\t\t\tgroup.create_dataset(name='age_Arr', data=age_Arr)\n\t\t\tgroup.create_dataset(name='age_Arr_msk', data=mask_age)\n\t\treturn",
"def at_birth(df,variable,npoint):\n return df.groupby('cell')[['{}'.format('{}'.format(variable)),'pred_growth_rate']].apply(lambda x: x.head(npoint).mean()).rename(columns={'pred_length_box_um':'{}_at_birth'.format(variable)})",
"def aggregate_absolute_cases_by_age(df):\n df.drop([\"Meldedatum\", \"Landkreis\", \"IdBundesland\", \"Bundesland\", \"ObjectId\"], axis=1, inplace=True)\n df = df.groupby(['IdLandkreis', 'Altersgruppe']).sum()\n df.reset_index(inplace=True)\n return df",
"def get_age_1_4_age_splits(location_ids, sexes):\n # pull population data\n location_ids = list(location_ids)\n age_split_pop_count = get_population(\n location_id=location_ids,\n year_id=2019,\n age_group_id=[49,50,51,52],\n single_year_age=True,\n sex_id=sexes,\n gbd_round_id=6,\n decomp_step='step4')\n\n # calculate proportions\n age_split_pop_count['denom'] = age_split_pop_count.groupby('location_id').transform('sum').population\n age_split_pop_count['prop_1_4'] = age_split_pop_count.population / age_split_pop_count.denom\n\n # add formatting\n age_group_names = {\n 49 : 'age1',\n 50 : 'age2',\n 51 : 'age3',\n 52 : 'age4'\n }\n age_split_pop_count['age_name'] = age_split_pop_count.age_group_id.map(age_group_names)\n \n usecols = ['location_id','age_group_id','sex_id','age_name','prop_1_4']\n return age_split_pop_count[usecols]",
"def clean_data(df, start = 1995, stop = 2018):\n country_iso3_code = pd.read_html('https://unstats.un.org/unsd/methodology/m49/')\n country_iso3_code = country_iso3_code[0]['ISO-alpha3 code']\n df = df.loc[df.country_iso3_code.isin(country_iso3_code)]\n df = df.set_index(['indicator', 'country_iso3_code', 'country', 'year']).unstack(level = 0)\n df.columns = df.columns.get_level_values(1)\n df = df.rename(columns = {'NY.GDP.PCAP.KD.ZG': 'pc_GDP_growth',\n 'NY.GDP.PCAP.PP.CD': 'pc_GDP_PPP'})\n df = df.reset_index()\n df = df.loc[(df.year >= (start - 1)) & (df.year <= stop)]\n df = df.dropna()\n return df",
"def get_start_end_years(df: pd.DataFrame) -> Tuple[int, int]:\n return df.iloc[0].year, df.iloc[-1].year",
"def new_features(df):\n print(\"Add new features ...\")\n # distinguish Spring, Fall and pregnant females (don't care about juvenilles/unknown)\n df[\"gender_plus\"] = df[\"Gender\"]\n df.loc[df.Gravid, \"gender_plus\"] = \"f_gra\"\n\n df[\"gender_seasons\"] = df[\"Gender\"]\n df.loc[df.Gravid, \"gender_seasons\"] = \"f_gra\"\n\n # add features\n df[\"Age_To_Weight\"] = df[\"Annuli\"] / df[\"Weight\"]\n\n # Calcuate Number of recaptures\n df_captures = df[[\"ID\", \"Date\"]].groupby(\"ID\").count()\n df_captures.columns = [\"recapture_count\"]\n df_captures.reset_index(inplace=True)\n df = pd.merge(df, df_captures, how=\"outer\", on=\"ID\")\n\n # recalculate annuli\n df_min = pd.pivot_table(\n df[df.Annuli > 0],\n values=[\"Date\", \"Annuli\"],\n index=[\"ID\"],\n aggfunc={\"Date\": min, \"Annuli\": min},\n )\n df_min.columns = [\"annuli_min\", \"date_min\"]\n df_min.reset_index(inplace=True)\n\n df = pd.merge(df, df_min, how=\"outer\", on=\"ID\")\n df[\"year\"] = df.Date.map(lambda x: x.year)\n df[\"year_min\"] = df.date_min.map(lambda x: x.year)\n df[\"Annuli_orig\"] = df.Annuli\n df.Annuli = df.year - df.year_min + df.annuli_min\n df.Annuli = np.nan_to_num(df.Annuli)\n df[\"Annuli\"] = pd.to_numeric(df[\"Annuli\"], downcast=\"integer\")\n\n # Annuli Buckets\n buckets = 5\n interval = int(df[\"Annuli\"].max() / buckets)\n buckets = [i for i in range(0, df[\"Annuli\"].max() + interval, interval)]\n labels = [\"'{0} - {1}'\".format(i, i + interval) for i in buckets]\n df[\"Annuli_Group\"] = pd.cut(\n df.Annuli, buckets, labels=labels[:-1], include_lowest=True\n )\n\n return df",
"def calc_base_year_data(base_year_vehicles_df):\n pass",
"def add_days_since_year_start(df):\n\n try:\n \n df['day_into_year'] = df.DAYOFSERVICE.dt.dayofyear\n\n print('— days since 1/1/18 added')\n \n return df\n\n except:\n\n print(\"Problem with add_day_since_year_start function\")",
"def select_information(df_players: pd.DataFrame) -> pd.DataFrame:\n df_players = df_players[[\"Birth Place\", 'Birthday', \"Name\"]]\n df_players['Birthday'] = pd.to_datetime(df_players['Birthday'])\n df_players[\"today\"] = date.today().strftime(\"%Y-%m-%d\")\n df_players['today'] = pd.to_datetime(df_players['today'])\n df_players[\"age\"] = (df_players[\"today\"] - df_players['Birthday']).dt.days / 365.\n df_players = df_players[df_players['age'] <= 50].reset_index(drop=True)\n df_players = df_players.dropna(subset=['Birth Place']).reset_index(drop=True)\n return df_players",
"def __init__(self, df, first_n=100, last_n=5, min_len=200):\n games = list(df.groupby('game_id'))\n self.data = []\n for g_id, g in games:\n if g.shape[0] >= min_len:\n self.data.append(g)\n\n self.length = len(self.data)\n self.first_n = first_n\n self.last_n = last_n",
"def g_date(data): \n #pull out data from the most original dataset and modified the date by year\n g=data.GRADE\n date=data['GRADE DATE'] \n date=[datetime.datetime.strptime(d, \"%m/%d/%Y\") for d in date]\n\n d_by_year=[] \n for i in range(len(date)): \n d_by_year.append(datetime.datetime(date[i].year,1,1))\n\n d_by_year=pd.Series(d_by_year,index=g.index,name=\"GRADE DATE\")\n return pd.concat([g,d_by_year],axis=1)",
"def create_data_structure(dataframe):\n\n dataframe = dataframe.groupby(\"name\").agg({\"date_new\": \"min\",\n \"url\": \"count\"})\n dataframe.columns = ['recency', 'frequency']\n dataframe[\"recency\"] = dataframe[\"recency\"].astype(\"int64\")\n\n return dataframe",
"def create_regressor_attributes(df, attribute, list_of_prev_t_instants) :\n \n list_of_prev_t_instants.sort()\n start = list_of_prev_t_instants[-1] \n end = len(df)\n df['datetime'] = df.index\n df.reset_index(drop=True)\n\n df_copy = df[start:end]\n df_copy.reset_index(inplace=True, drop=True)\n\n for attribute in attribute :\n foobar = pd.DataFrame()\n\n for prev_t in list_of_prev_t_instants :\n new_col = pd.DataFrame(df[attribute].iloc[(start - prev_t) : (end - prev_t)])\n new_col.reset_index(drop=True, inplace=True)\n new_col.rename(columns={attribute : '{}_(t-{})'.format(attribute, prev_t)}, inplace=True)\n foobar = pd.concat([foobar, new_col], sort=False, axis=1)\n\n df_copy = pd.concat([df_copy, foobar], sort=False, axis=1)\n \n df_copy.set_index(['datetime'], drop=True, inplace=True)\n return df_copy",
"def get_frac_remaining_by_age(cls, ages):\n n_ages = np.size(ages)\n arr = xr.DataArray(data=np.ones([n_ages, 1], dtype=float), coords=[('age', ages), ('theta', cls._no_theta_coord)])\n return arr",
"def get_yearly_data(name, startyr=None, endyr=None, interpolated=False):\n varinfo = get_varinfo(name)\n \n if varinfo[\"type\"] == \"yearly\":\n data = get_data(varinfo[\"id\"], startyr=startyr, endyr=endyr)\n giddict = dict()\n sorteddata = sorted(data[\"cells\"], key=lambda vd: vd[\"gid\"])\n for gid,valuedicts in itertools.groupby(sorteddata, key=lambda vd: vd[\"gid\"]):\n yrdict = dict([(valuedict[\"year\"],valuedict[\"value\"])\n for valuedict in valuedicts\n ])\n info = {\"data\": yrdict}\n giddict[gid] = info\n\n if interpolated:\n def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)\n \n def lerp(factor, fromval, toval):\n valrange = toval - fromval\n return fromval + valrange * factor\n \n for gid,info in giddict.items():\n yrdict = info[\"data\"]\n if len(yrdict) > 1:\n for (fromyr,fromval),(toyr,toval) in pairwise(sorted(yrdict.items(),key=lambda i: i[0])):\n curyr = fromyr + 1\n interpneeded = fromval != toval\n \n while curyr != toyr:\n if interpneeded:\n factor = (curyr - fromyr) / float(toyr - fromyr)\n yrdict[curyr] = lerp(factor, fromval, toval)\n else:\n yrdict[curyr] = fromval\n curyr += 1\n\n return giddict\n\n else:\n raise Exception(\"Could not find a yearly variable with that name\")",
"def read_precip(fname, label, start_year, end_year):\n data_in = pd.read_csv(fname)\n #leaps = [1960+n*4 for n in range(20)] # Create a list of leap years\n dailys = []\n for year in data_in:\n for n,day in enumerate(data_in[year].values):\n if n == 59: # if we are looking at the 59th element (added feb 29th)\n \n if int(year)/4 % 1 == 0.0: # year/4 modulus 1 = 0.0 (leap years)\n #if int(year) in leaps:\n dailys.append(day)\n else:\n pass\n else:\n dailys.append(day) \n dailys = np.array(dailys)\n mask = dailys > 99.\n dailys[mask] = np.nan\n start_date = '01-01-'+str(start_year)\n end_date = '31-12-'+str(end_year)\n #print(start_date, end_date)\n dates = pd.date_range(start=start_date, end=end_date, freq='D')\n return pd.DataFrame(data=dailys,columns=[label],index=dates)",
"def generate_example() -> pd.DataFrame:\n rng = np.random.RandomState(1234)\n\n df = generate_test_dataframe(n_dims=2, size=2000)\n df[\"date\"] = pd.Timestamp(\"2000-01-01\") + pd.to_timedelta(df[\"dim_0\"], unit=\"D\")\n df[\"month\"] = df[\"date\"].dt.month.astype(np.int8)\n df[\"year\"] = df[\"date\"].dt.year.astype(np.int16)\n df[\"city\"] = \"city_\" + df[\"dim_1\"].astype(\"str\")\n df[\"country\"] = \"country_\" + (df[\"dim_1\"] // 500).astype(\"str\")\n df[\"avg_temp\"] = (\n rng.normal(loc=10.0, scale=5.0, size=len(df))\n .round(decimals=1)\n .astype(np.float32)\n )\n df[\"rain\"] = rng.rand(len(df)) > 0.9\n df[\"mood\"] = \"ok\"\n df.loc[(~df[\"rain\"]) & (df[\"avg_temp\"] > 15), \"mood\"] = \"great\"\n df.loc[(df[\"rain\"]) & (df[\"avg_temp\"] < 5), \"mood\"] = \"sad\"\n return df[[\"date\", \"month\", \"year\", \"city\", \"country\", \"avg_temp\", \"rain\", \"mood\"]]"
] | [
"0.6269801",
"0.5932561",
"0.58339566",
"0.5740221",
"0.5732571",
"0.5659223",
"0.5570736",
"0.5543955",
"0.5441446",
"0.5404643",
"0.53028536",
"0.52629364",
"0.522845",
"0.52063704",
"0.5162005",
"0.51570964",
"0.5115626",
"0.5105394",
"0.509224",
"0.5091394",
"0.50578034",
"0.50307924",
"0.49955788",
"0.4993704",
"0.49936977",
"0.49912822",
"0.49744725",
"0.49275532",
"0.49067968",
"0.49007913"
] | 0.60626066 | 1 |
a pd.DataFrame() with specified location_ids age_group_ids for 1, 2, 3, and 4 year olds sex_ids "age_name" with an age group label "prop_1_4" with the proportion of age_group X out of age_group_id 5 | def get_age_1_4_age_splits(location_ids, sexes):
# pull population data
location_ids = list(location_ids)
age_split_pop_count = get_population(
location_id=location_ids,
year_id=2019,
age_group_id=[49,50,51,52],
single_year_age=True,
sex_id=sexes,
gbd_round_id=6,
decomp_step='step4')
# calculate proportions
age_split_pop_count['denom'] = age_split_pop_count.groupby('location_id').transform('sum').population
age_split_pop_count['prop_1_4'] = age_split_pop_count.population / age_split_pop_count.denom
# add formatting
age_group_names = {
49 : 'age1',
50 : 'age2',
51 : 'age3',
52 : 'age4'
}
age_split_pop_count['age_name'] = age_split_pop_count.age_group_id.map(age_group_names)
usecols = ['location_id','age_group_id','sex_id','age_name','prop_1_4']
return age_split_pop_count[usecols] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def apply_iron_hemoglobin_age_related_effective_coverage_restrictions(data,\n sex_ids,\n age_group_ids,\n effective_fractions):\n final = pd.DataFrame()\n for n in list(range(0, len(sex_ids))):\n out_data = pd.DataFrame()\n for i in list(range(0, len(age_group_ids))):\n temp = (data * effective_fractions[i]).reset_index()\n temp['age_group_id'] = age_group_ids[i]\n out_data = pd.concat([out_data, temp], ignore_index=True)\n out_data['sex_id'] = sex_ids[n]\n final = pd.concat([final, out_data], ignore_index=True)\n final = (final.set_index(\n ['location_id','vehicle', 'age_group_id', 'sex_id', 'year'] + [c for c in final.columns if c == 'coverage_level'])\n .sort_index())\n return final",
"def outcome_bygroup_df(df, outcomes, groupbyvars):\n colselect = groupbyvars + outcomes\n colnames=[]\n bygender = df.loc[:, colselect].groupby('group_gender')[outcomes].mean().T\n colnames.extend(list(bygender.columns))\n bymigrant = df.loc[:, colselect].groupby('group_migrant')[\n outcomes].mean().T\n colnames.extend(list(bymigrant.columns))\n byinformal = df.loc[:, colselect].groupby('group_informal')[\n outcomes].mean().T\n colnames.extend(list(byinformal.columns))\n bytotal = df.loc[:, colselect].groupby('Total')[outcomes].mean().T\n colnames.extend(list(bytotal.columns))\n data = pd.concat([bygender, bymigrant, byinformal,\n bytotal], axis=1, ignore_index=True)\n \n data.columns=colnames\n data['label'] = varlabel_df.loc[outcomes]\n data = data.set_index('label')\n return data",
"def outcome_bygroup_df(df, outcomes, groupbyvars):\n colselect = groupbyvars + outcomes\n colnames=[]\n bygender = df.loc[:, colselect].groupby('group_gender')[outcomes].mean().T\n colnames.extend(list(bygender.columns))\n bymigrant = df.loc[:, colselect].groupby('group_migrant')[\n outcomes].mean().T\n colnames.extend(list(bymigrant.columns))\n byinformal = df.loc[:, colselect].groupby('group_informal')[\n outcomes].mean().T\n colnames.extend(list(byinformal.columns))\n bytotal = df.loc[:, colselect].groupby('Total')[outcomes].mean().T\n colnames.extend(list(bytotal.columns))\n data = pd.concat([bygender, bymigrant, byinformal,\n bytotal], axis=1, ignore_index=True)\n \n data.columns=colnames\n data['label'] = varlabel_df.loc[outcomes]\n data = data.set_index('label')\n return data",
"def _age_bins(df):\n df['age_9'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (4, 9)]].sum(axis=1))\n df['age_19'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (14, 17, 19)]].sum(axis=1))\n df['age_29'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (20, 21, 24, 29)]].sum(axis=1))\n df['age_39'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (34, 39)]].sum(axis=1))\n df['age_49'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (44, 49)]].sum(axis=1))\n df['age_59'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (54, 59)]].sum(axis=1))\n df['age_69'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (61, 64, 66, 69)]].sum(axis=1))\n df['age_79'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (74, 79)]].sum(axis=1))\n df['age_80_over'] = (\n df[[f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (84, 99)]].sum(axis=1))\n\n df = df.drop([f'age_{g}_{a}'\n for g in ('male', 'female')\n for a in (4, 9, 14, 17, 19, 20, 21, 24, 29, 34, 39, 44, 49,\n 54, 61, 64, 66, 69, 74, 79, 84, 99)\n ], axis=1)\n\n return df",
"def balance(df):\n groups = df.groupby(['age_group'])\n for count, group in enumerate(groups.groups):\n group = groups.get_group(group)\n sex_ratio = sum((group['sex'] == 'f') * 1) / sum((group['sex'] == 'm') * 1)\n if sex_ratio < 1:\n group_m = group[group['sex'] == 'm'].sample(frac=sex_ratio, random_state=1)\n group_f = group[group['sex'] == 'f']\n else:\n group_m = group[group['sex'] == 'm']\n group_f = group[group['sex'] == 'f'].sample(frac=(1 / sex_ratio), random_state=1)\n if count == 0:\n df_balanced = pd.concat([group_m, group_f], axis=0)\n else:\n df_balanced = pd.concat([df_balanced, group_m, group_f], axis=0)\n return df_balanced.sample(frac=1).reset_index(drop=True)",
"def pull_u5_age_groups_formatted():\n age_start_map = {\n 'Early Neonatal': 0,\n 'Late Neonatal': 7/365,\n 'Post Neonatal': 28/365,\n '1 to 4': 1\n }\n\n age_end_map = {\n 'Early Neonatal': 7/365,\n 'Late Neonatal': 28/365,\n 'Post Neonatal': 365/365,\n '1 to 4': 5\n }\n\n # pull age \n age_groups = get_ids(\"age_group\")\n age_groups = age_groups[age_groups.age_group_id.isin([2, 3, 4, 5])]\n age_groups['age_start'] = age_groups.age_group_name.map(age_start_map)\n age_groups['age_end'] = age_groups.age_group_name.map(age_end_map)\n\n return age_groups",
"def meetup_groups_dynamic(growth_df):\n\n def convert_to_percent(row):\n total_groups = row.sum()\n return row.apply(lambda x: x * 100 / total_groups)\n\n return growth_df.apply(convert_to_percent, axis=1)",
"def get_ages(location, country, level, num_agebrackets=85):\n if country == location:\n level = 'country'\n\n if country == 'Europe':\n country = location\n level = 'country'\n\n if level == 'country':\n file_name = country + '_' + level + '_level_age_distribution_' + '%i' % num_agebrackets + '.csv'\n else:\n file_name = country + '_' + level + '_' + location + '_age_distribution_' + '%i' % num_agebrackets + '.csv'\n file_path = os.path.join(datadir, 'age_distributions', file_name)\n df = pd.read_csv(file_path, delimiter=',', header=None)\n df.columns = ['age', 'age_count']\n ages = dict(zip(df.age.values.astype(int), df.age_count.values))\n return ages",
"def make_age_data(api, district_data = {}, categories = {'Age': {} },\r\n state=48, district=7, leg_body='US-REP', year='2015'):\r\n category='Age'\r\n \r\n district_key='district'\r\n blockgroup_key='bg'\r\n precinct_key='precinct'\r\n tract_key='tract'\r\n\r\n total_census_field = 'B01001_001E'\r\n age_table = 'B01001'\r\n \r\n total_field = 'total'\r\n total_label = 'Total'\r\n \r\n over_18_field = 'over_18'\r\n over_18_label = '18 and over'\r\n \r\n data_path = 'static/data/'\r\n\r\n age_classes = CensusFields.get_age_fields()\r\n \r\n under_18_classes = CensusFields.get_under_18_fields()\r\n \r\n # Load the census data\r\n print( \"\\n\" )\r\n print( \"Getting Census Data for Sex by Age\" )\r\n census_fields, census_labels = get_census_fields_by_table(table=age_table, \r\n year=year)\r\n census_data = get_census_data(api=api, category=category, fields=census_fields, \r\n state=state, district=district, leg_body=leg_body, year=year)\r\n \r\n # create fields and labels for Party Identification classes\r\n # used in web-based dashboard\r\n fields = []\r\n labels = {}\r\n \r\n fields.append(total_field)\r\n labels[total_field] = total_label\r\n \r\n fields.append(over_18_field)\r\n labels[over_18_field] = over_18_label\r\n\r\n # create fields and labels for census classes\r\n # used in web-based dashboard\r\n fields = []\r\n labels = {}\r\n \r\n fields.append(over_18_field)\r\n labels[over_18_field] = over_18_label\r\n \r\n fields.append(total_field)\r\n labels[total_field] = total_label\r\n \r\n for age_field, age_row in age_classes.items():\r\n fields.append(age_field)\r\n labels[age_field] = age_row['label']\r\n \r\n categories[category]['Census'] = {'fields': fields, 'labels': labels}\r\n\r\n print( \"Building Age Data\" )\r\n \r\n # make the party identification data and data for the census classes\r\n # for the blockgroups\r\n district_data = make_class_data( \r\n census_data_in_district=census_data, \r\n census_classes=age_classes,\r\n district_data=district_data,\r\n district=district,\r\n leg_body=leg_body,\r\n year=year\r\n )\r\n # make the party identification data and data for the census classes\r\n # for the district\r\n district_data = make_class_data( \r\n census_data_in_district=census_data, \r\n census_classes=age_classes,\r\n district_data=district_data,\r\n category=categories[category],\r\n district=district,\r\n leg_body=leg_body,\r\n year=year,\r\n geo_key=district_key\r\n )\r\n\r\n # Calculate persons 18 and over in each block group and \r\n # get the total population in each block group\r\n geo_key = blockgroup_key\r\n for geoid, census_data_row in census_data[year][geo_key].items():\r\n # Persons 18 and over\r\n under_18 = 0\r\n for census_field in under_18_classes['fields']:\r\n under_18 = under_18 + int(census_data_row[census_field])\r\n # (over 18) = total - (under 18)\r\n over_18 = int(census_data_row[total_census_field]) - under_18\r\n district_data[year][geo_key][geoid][over_18_field] = over_18\r\n \r\n # Total Population\r\n district_data[year][geo_key][geoid][total_field] = census_data_row[total_census_field]\r\n \r\n if leg_body == 'US-REP':\r\n # calculate the district stats\r\n geo_key = district_key\r\n for census_field in under_18_classes['fields']:\r\n under_18 = under_18 + int(census_data[year][geo_key][census_field])\r\n # (over 18) = total - (under 18)\r\n over_18 = int(census_data[year][geo_key][total_census_field]) - under_18\r\n district_data[year][geo_key][over_18_field] = over_18\r\n \r\n # Total Population\r\n district_data[year][geo_key][total_field] = census_data[year][geo_key][total_census_field]\r\n\r\n return categories, district_data",
"def get_age_fields():\n under_18_fields = CensusFields.get_under_18_fields()\n\n age_18_to_29_fields = [ \n 'B01001_007E', # Male:!!18 and 19 years\n 'B01001_008E', # Male:!!20 years\n 'B01001_009E', # Male:!!21 years\n 'B01001_010E', # Male:!!22 to 24 years\n 'B01001_011E', # Male:!!25 to 29 years\n 'B01001_031E', # Female:!!18 and 19 years\n 'B01001_032E', # Female:!!20 years\n 'B01001_033E', # Female:!!21 years\n 'B01001_034E', # Female:!!22 to 24 years\n 'B01001_035E', # Female:!!25 to 29 years\n ]\n age_30_to_39_fields = [\n 'B01001_012E', # Male:!!30 to 34 years\n 'B01001_013E', # Male:!!35 to 39 years\n 'B01001_036E', # Female:!!30 to 34 years\n 'B01001_037E', # Female:!!35 to 39 years\n ]\n age_40_to_49_fields = [\n 'B01001_014E', # Male:!!40 to 44 years\n 'B01001_038E', # Female:!!40 to 44 years\n 'B01001_015E', # Male:!!45 to 49 years\n 'B01001_039E', # Female:!!45 to 49 years\n\n ]\n age_50_to_59_fields = [\n 'B01001_016E', # Male:!!50 to 54 years\n 'B01001_017E', # Male:!!55 to 59 years\n 'B01001_040E', # Female:!!50 to 54 years\n 'B01001_041E', # Female:!!55 to 59 years\n\n ]\n age_60_to_69_fields = [\n 'B01001_018E', # Male:!!60 and 61 years\n 'B01001_019E', # Male:!!62 to 64 years\n 'B01001_020E', # Male:!!65 and 66 years\n 'B01001_021E', # Male:!!67 to 69 years\n 'B01001_042E', # Female:!!60 and 61 years\n 'B01001_043E', # Female:!!62 to 64 years\n 'B01001_044E', # Female:!!65 and 66 years\n 'B01001_045E', # Female:!!67 to 69 years\n ]\n age_70_to_79_fields = [\n 'B01001_022E', # Male:!!70 to 74 years\n 'B01001_023E', # Male:!!75 to 79 years\n 'B01001_046E', # Female:!!70 to 74 years\n 'B01001_047E', # Female:!!75 to 79 years\n ]\n age_81_plus_fields = [\n 'B01001_024E', # Male:!!80 to 84 years\n 'B01001_025E', # Male:!!85 years and over\n 'B01001_048E', # Female:!!80 to 84 years\n 'B01001_049E', # Female:!!85 years and over\n ]\n \n age_fields = OrderedDict()\n age_fields[ 'age_18_to_29' ] = { 'label': '18-29', 'fields': age_18_to_29_fields }\n age_fields[ 'age_30_to_39' ] = { 'label': '30s', 'fields': age_30_to_39_fields }\n age_fields[ 'age_40_to_49' ] = { 'label': '40s', 'fields': age_40_to_49_fields }\n age_fields[ 'age_50_to_59' ] = { 'label': '50s', 'fields': age_50_to_59_fields }\n age_fields[ 'age_60_to_69' ] = { 'label': '60s', 'fields': age_60_to_69_fields } \n age_fields[ 'age_70_to_79' ] = { 'label': '70s', 'fields': age_70_to_79_fields }\n age_fields[ 'age_81_plus' ] = { 'label': '80+', 'fields': age_81_plus_fields }\n\n return age_fields",
"def setup_percentiles_adults(percentiles):\n # expand decade rows into one row per year\n pct = percentiles[\n percentiles[\"Age (All race and Hispanic-origin groups)\"] != \"20 and over\"\n ].copy()\n pct.loc[pct[\"Age_low\"] == 20, \"Age_low\"] = 18\n range_col = pct.apply(lambda row: row.Age_high - row.Age_low + 1, axis=1)\n pct = pct.assign(range=range_col.values)\n dta = pd.DataFrame(\n (np.repeat(pct.values, pct[\"range\"], axis=0)), columns=pct.columns\n )\n dta[\"count\"] = dta.groupby([\"Sex\", \"Measure\", \"Age_low\", \"Age_high\"]).cumcount()\n dta[\"age\"] = dta[\"Age_low\"] + dta[\"count\"]\n # add standard deviation and other values\n dta[\"sqrt\"] = np.sqrt(pd.to_numeric(dta[\"Number of examined persons\"]))\n dta[\"sd\"] = dta[\"Standard error of the mean\"] * dta[\"sqrt\"]\n dta[\"Sex\"] = dta.Sex.replace(\"Male\", 0).replace(\"Female\", 1)\n dta.rename(columns={\"Measure\": \"param\"}, inplace=True)\n dta.drop(\n columns=[\n \"Age (All race and Hispanic-origin groups)\",\n \"Age_low\",\n \"sqrt\",\n \"Standard error of the mean\",\n \"Age_high\",\n \"range\",\n \"count\",\n \"Number of examined persons\",\n ],\n inplace=True,\n )\n # smooth percentiles between X9-(X+1)1 (i.e., 29-31)\n dta[\"decade\"] = np.where(dta[\"age\"] == (round(dta[\"age\"].astype(float), -1)), 1, 0)\n mcol_list = [\n \"Mean\",\n \"sd\",\n \"P5\",\n \"P10\",\n \"P15\",\n \"P25\",\n \"P50\",\n \"P75\",\n \"P85\",\n \"P90\",\n \"P95\",\n ]\n for col in mcol_list:\n dta[col] = np.where(\n (dta[\"decade\"] == 1) & (dta[\"age\"] < 110),\n (dta[col] + dta[col].shift(1)) / 2,\n dta[col],\n )\n dta.drop(columns={\"decade\"}, inplace=True)\n col_list = [\"param\", \"Sex\", \"age\"] + mcol_list\n dta = dta.reindex(columns=col_list)\n return dta",
"def index():\n import numpy as np\n import random\n\n total_gender = {}\n total_gender['Male'] = db(db.patient.sex == 'Male').count()\n total_gender['Female'] = db(db.patient.sex == 'Female').count()\n total_gender['Undeclared'] = db(db.patient.sex == 'Undeclared').count()\n\n groups = db(db.groups).select()\n freq_groups = {}\n grp_gender = {}\n for g in groups:\n freq_groups[g.code] = db(db.patient.groups.contains(g.id)).count()\n grp_gender[g.code] = {}\n grp_gender[g.code]['Male'] = db(db.patient.groups.contains(g.id) & (db.patient.sex == 'Male')).count()\n grp_gender[g.code]['Female'] = db(db.patient.groups.contains(g.id) & (db.patient.sex == 'Female')).count()\n grp_gender[g.code]['Undeclared'] = db(db.patient.groups.contains(g.id) & (db.patient.sex == 'Undeclared')).count()\n\n experiments = db(db.experiments).select()\n freq_experiments = {}\n exp_gender = {}\n for e in experiments:\n freq_experiments[e.code] = db(db.patient.experiments.contains(e.id)).count()\n exp_gender[e.code] = {}\n exp_gender[e.code]['Male'] = db(db.patient.experiments.contains(e.id) & (db.patient.sex == 'Male')).count()\n exp_gender[e.code]['Female'] = db(db.patient.experiments.contains(e.id) & (db.patient.sex == 'Female')).count()\n exp_gender[e.code]['Undeclared'] = db(db.patient.experiments.contains(e.id) & (db.patient.sex == 'Undeclared')).count()\n\n grp_exp = {}\n for e in experiments:\n grp_exp[e.code] = {}\n for g in groups:\n grp_exp[e.code][g.code] = db(db.patient.experiments.contains(e.id) & db.patient.groups.contains(g.id)).count()\n\n return dict(message=T('Pain Network: A web-based tool for diagnosis of the Chronic Pain.'),\n freq_gender=total_gender,freq_groups=freq_groups,freq_experiments=freq_experiments,\n exp_gender=exp_gender,grp_gender=grp_gender,grp_exp=grp_exp)",
"def get_effective_iron_hemoglobin_coverage(df, sex_ids, age_group_ids, effective_fractions, years):\n effective_coverage_by_age = apply_iron_hemoglobin_age_related_effective_coverage_restrictions(df,\n sex_ids,\n age_group_ids,\n effective_fractions)\n #effective_fraction_by_time_lag = calculate_iron_hemoglobin_time_lag_effective_fraction(df, years)\n effective_coverage = effective_coverage_by_age #* effective_fraction_by_time_lag\n print('NOTE: not currently applying time lag effect.')\n effective_coverage = (effective_coverage.reset_index()\n .set_index(['location_id', 'sex_id', 'age_group_id', 'vehicle', 'year'] +\n [c for c in effective_coverage.reset_index().columns if c == 'coverage_level'])\n .sort_index())\n\n return effective_coverage",
"def generateExtrapolationTable(sex, region):\n pop1 = dataStore.data[dataStore.data.Location == region]\n pop1 = pop1[['Time', 'Age', SEXES[sex]]]\n # pop1 = data[['Time', 'Age', SEX]].query('Location' == CNTRY)\n #print pop1\n\n july1from1950to2100 = [inPosixDays(date(y, 7, 1)) for y in xrange(1950, 2100+1)]\n\n dateRange1950to2100inPosixDays = range(inPosixDays(date(1950,1,1)), inPosixDays(date(2100,12,31))+1)\n\n ''' --- Date interpolation function --- '''\n def dateInterp(iage):\n popi = np.asarray(pop1.loc[dataStore.data.Age == iage.name, SEXES[sex]])\n\n # spline interpolation function from Scipy Package\n iuspl = InterpolatedUnivariateSpline(july1from1950to2100, popi, k=4)\n return iuspl(dateRange1950to2100inPosixDays)\n\n # --- store the results of the date interpolation --- #\n result1 = pd.DataFrame(index = range(0,len(dateRange1950to2100inPosixDays)), columns = range(0,100))\n table = result1.apply(dateInterp, axis=0)\n\n # Change column names by appending \"age_\"\n oldHeaders = table.columns\n newHeaders = []\n for i in oldHeaders:\n newHeaders.append(\"age\" + \"_\" + str(i))\n table.columns = newHeaders\n #print result1.head # results: \"age_0, age_1, ...\"\n\n # Convert the numerical days to date string\n def toDate(d):\n return (date(1970, 1, 1) + timedelta(days=d)).strftime('%Y-%m-%d')\n toDate = np.vectorize(toDate) # vectorize the function to iterate over numpy ndarray\n #fullDateRange = toDate(dateRange1970to2100inPosixDays) # 1st result: 1950-01-01\n fullDateRange = len(dateRange1950to2100inPosixDays)*[None]\n for i in range(0,len(dateRange1950to2100inPosixDays)):\n fullDateRange[i] = toDate(dateRange1950to2100inPosixDays[i])\n\n # Add the fullDateRange to the result1\n table['date1'] = fullDateRange\n\n return table",
"def setup_individual_obs_df(obs_df):\n df = obs_df.copy()\n df.rename(columns={\"clean_res\": \"clean_value\"}, inplace=True)\n df[\"ageyears\"] = df[\"agedays\"] / 365.25\n df[\"clean_cat\"] = df[\"clean_value\"].astype(\"category\")\n df[\"include\"] = df.clean_value.eq(\"Include\")\n col_list = [\n \"id\",\n \"subjid\",\n \"agedays\",\n \"ageyears\",\n \"sex\",\n \"param\",\n \"measurement\",\n \"clean_value\",\n \"clean_cat\",\n \"include\",\n ]\n return df[col_list]",
"def state_age_count(state) :\n\n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n import itertools\n \n assert isinstance(state, str)\n\n train = pd.read_csv('./data/train.csv')\n state_labels = pd.read_csv('./data/state_labels.csv')\n\n \n # Dictionary for state labels\n state_dict = dict(zip(state_labels[\"StateName\"], state_labels[\"StateID\"]))\n\n assert state in state_dict, \"state not in state dictionary\"\n \n # Split data to only contain given state data\n this_state = train.loc[train['State'] == state_dict[state],\n ['State','Type', 'Age', 'AdoptionSpeed']]\n\n this_state.loc[this_state['Age'] > -1, 'Age'] = (this_state['Age']//12)\n \n # Divide by dog (Type = 1) and cat (Type = 2)\n dog_df = this_state.loc[this_state['Type'] == 1, :]\n cat_df = this_state.loc[this_state['Type'] == 2, :]\n \n dog_max_age = max(dog_df.loc[:, 'Age'])\n dog_min_age = min(dog_df.loc[:, 'Age'])\n \n cat_max_age = max(cat_df.loc[:, 'Age'])\n cat_min_age = min(cat_df.loc[:, 'Age'])\n \n dog_age_labels = []\n dog_count = []\n \n cat_age_labels = []\n cat_count = []\n \n # Find dog count for each age\n for i in range(dog_min_age, dog_max_age + 1) :\n count = (dog_df.Age == i).sum()\n if(count > 0) :\n dog_count.append(count)\n dog_age_labels.append(i)\n\n # Find cat count for each age\n for i in range(cat_min_age, cat_max_age + 1) :\n count = (cat_df.Age == i).sum()\n if(count > 0) :\n cat_count.append(count)\n cat_age_labels.append(i)\n \n # Plot pie charts\n plt.figure()\n plt.pie(dog_count, labels = dog_age_labels, startangle=90, autopct='%.1f%%')\n plt.title('Age Count of Dogs in ' + state)\n plt.show()\n \n plt.figure()\n plt.pie(cat_count, labels = cat_age_labels, startangle=90, autopct='%.1f%%')\n plt.title('Age Count of Cats in ' + state)\n plt.show()\n \n # Plot bar graphs\n plt.figure()\n index = np.arange(len(dog_age_labels))\n plt.bar(index, dog_count)\n plt.xlabel('Age in Years', fontsize = 5)\n plt.xticks(index, dog_age_labels, fontsize = 5)\n plt.ylabel('Count', fontsize = 5)\n plt.title('Age Count of Dogs in ' + state)\n \n \n plt.figure()\n index = np.arange(len(cat_age_labels))\n plt.bar(index, cat_count)\n plt.xlabel('Age in Years', fontsize = 5)\n plt.xticks(index, cat_age_labels, fontsize = 5)\n plt.ylabel('Count', fontsize = 5)\n plt.title('Age Count of Cats in ' + state)",
"def ex_eight_animals_data_table():\n data_dict = {'Calf': [4, 5, 6, 7, 8],\n 'Sire': [1, 3, 1, 4, 3],\n 'Dam': ['Unknown', 2, 2, 5, 6],\n 'Sex': ['Male', 'Female', 'Female', 'Male', 'Male'],\n 'WWG': [4.5, 2.9, 3.9, 3.5, 5.0]}\n\n df = pd.DataFrame(data_dict)\n\n return(df)",
"def calc_year_based_saving_capacities(values, group, group_people_ratio):\n column = 'income_{}{}'.format(group, group_people_ratio)\n for index, obj in enumerate(values):\n if index < len(values) - 1:\n next_obj = values[index + 1]\n # between below years calculation\n years = (obj['year'], next_obj['year'])\n\n # find year specific income distribution for the one person who\n # belong the regarding group\n current_per_people_ratio = obj[column] / group_people_ratio\n next_per_people_ratio = next_obj[column] / group_people_ratio\n\n diff = next_per_people_ratio - current_per_people_ratio\n saving_capacity = diff / current_per_people_ratio\n yield {'year': years, 'savingcapacity': saving_capacity}",
"def aggregate_absolute_cases_by_age(df):\n df.drop([\"Meldedatum\", \"Landkreis\", \"IdBundesland\", \"Bundesland\", \"ObjectId\"], axis=1, inplace=True)\n df = df.groupby(['IdLandkreis', 'Altersgruppe']).sum()\n df.reset_index(inplace=True)\n return df",
"def create_df():\n df = load_df_from_files()\n df = clean_df(df)\n df = expand_df_dates(df)\n df[\"age_at_t\"] = ((df[\"date\"] - df[\"birthday\"]) / 365).dt.days # Yeah, this is weird.\n return df",
"def grouping_cols(df, cat_percentage = 0.05, checking_itr = 10):",
"def generate_group_summary_table(self, groups, group_names=None):\n output = {\n 'patient_patches': {},\n 'slide_patches': {},\n 'patient_slides': {},\n }\n groups['chunks'].sort(key=lambda chunk: chunk['id'])\n category_names = sorted([c.name for c in self.CategoryEnum])\n cum_header = 'Overall' if self.is_binary else 'Total'\n headers = category_names + [cum_header]\n num_headers = len(headers)\n group_patches = pd.DataFrame(columns=headers)\n group_slides = pd.DataFrame(columns=headers)\n group_patients = pd.DataFrame(columns=headers)\n for chunk in groups['chunks']:\n try:\n group_name = group_names[chunk['id']]\n except (TypeError, KeyError):\n group_name = f\"Group {chunk['id'] + 1}\"\n patch_paths = chunk['imgs']\n patches = {name: set() for name in category_names}\n slides = {name: set() for name in category_names}\n patients = {name: set() for name in category_names}\n all_patches = set()\n all_slides = set()\n all_patients = set()\n patient_patches = pd.DataFrame(columns=headers)\n slide_patches = pd.DataFrame(columns=headers)\n patient_slides = pd.DataFrame(columns=headers)\n for patch_path in patch_paths:\n patch_id = utils.create_patch_id(patch_path, self.patch_pattern)\n label = utils.get_label_by_patch_id(patch_id, self.patch_pattern,\n self.CategoryEnum, is_binary=self.is_binary).name\n slide_name = utils.get_slide_by_patch_id(patch_id, self.patch_pattern)\n patient_id = utils.get_patient_by_slide_id(slide_name,\n dataset_origin=self.dataset_origin)\n\n patches[label].add(patch_id)\n\n if slide_name not in slides[label]:\n if patient_id not in patient_slides.index:\n patient_slides.loc[patient_id] = [0] * num_headers\n patient_slides.at[patient_id, label] += 1\n if slide_name not in all_slides:\n patient_slides.at[patient_id, cum_header] += 1\n \n slides[label].add(slide_name)\n patients[label].add(patient_id)\n\n if patient_id not in patient_patches.index:\n patient_patches.loc[patient_id] = [0] * num_headers\n patient_patches.at[patient_id, label] += 1\n patient_patches.at[patient_id, cum_header] += 1\n\n if slide_name not in slide_patches.index:\n slide_patches.loc[slide_name] = [0] * num_headers\n slide_patches.at[slide_name, label] += 1\n slide_patches.at[slide_name, cum_header] += 1\n\n all_patches.add(patch_id)\n all_slides.add(slide_name)\n all_patients.add(patient_id)\n\n for label, s in patches.items():\n group_patches.at[group_name, label] = len(s)\n group_patches.at[group_name, cum_header] = len(all_patches)\n for label, s in slides.items():\n group_slides.at[group_name, label] = len(s)\n group_slides.at[group_name, cum_header] = len(all_slides)\n for label, s in patients.items():\n group_patients.at[group_name, label] = len(s)\n group_patients.at[group_name, cum_header] = len(all_patients)\n\n patient_patches.loc[\"Total\"] = patient_patches.sum().astype(int)\n slide_patches.loc[\"Total\"] = slide_patches.sum().astype(int)\n patient_slides.loc[\"Total\"] = patient_slides.sum().astype(int)\n output['patient_patches'][group_name] = patient_patches\n output['slide_patches'][group_name] = slide_patches\n output['patient_slides'][group_name] = patient_slides\n \n group_patches.loc['Total'] = group_patches.sum().astype(int)\n group_slides.loc['Total'] = group_slides.sum().astype(int)\n group_patients.loc['Total'] = group_patients.sum().astype(int)\n output['group_patches'] = group_patches\n output['group_slides'] = group_slides\n output['group_patients'] = group_patients\n return output",
"def _transform_age_feature(df):\n df = df.apply(_build_age_range, axis='columns')\n dummies_age = pd.get_dummies(df['Age'], prefix='Age')\n print(\"For dataset with shape {}, the dummies for 'Age' are: {}\".format(df.shape, dummies_age.columns))\n df = pd.concat([df, dummies_age], axis=1)\n\n # Ensure that all dummies are created and that 'Training' and 'Test' datasets will have same number of columns. In\n # our case, 'Age_8' will not be created for 'Test' dataset. We could create it by hand but it is more robust to test\n # all cases\n # For 'Age', range has been splitted in 8\n for i in range(8):\n if 'Age_{}'.format(i) not in df:\n df['Age_{}'.format(i)] = 0\n\n return df",
"def process(cls, df):\n\n # Calculate totals for both genders together\n for g in cls.GROUPS[1:]:\n\n # the columns to sum\n cols_to_sum = [f\"{tag}_{g}\" for tag in [\"male\", \"female\"]]\n\n # approximate the sum\n new_cols = [f\"total_{g}\", f\"total_{g}_moe\"]\n df[new_cols] = df.apply(approximate_sum, cols=cols_to_sum, axis=1)\n\n # Calculate custom group sets\n groupsets = collections.OrderedDict(\n {\n \"16_to_21_employed\": [\"16_to_19_employed\", \"20_to_21_employed\"],\n \"22_to_29_employed\": [\"22_to_24_employed\", \"25_to_29_employed\"],\n \"30_to_44_employed\": [\"30_to_34_employed\", \"35_to_44_employed\"],\n \"45_to_64_employed\": [\n \"45_to_54_employed\",\n \"55_to_59_employed\",\n \"60_to_61_employed\",\n \"62_to_64_employed\",\n ],\n \"65_and_over_employed\": [\n \"65_to_69_employed\",\n \"70_to_74_employed\",\n \"75_and_over_employed\",\n ],\n \"16_to_64_employed\": [\n \"16_to_19_employed\",\n \"20_to_21_employed\",\n \"22_to_24_employed\",\n \"25_to_29_employed\",\n \"30_to_34_employed\",\n \"35_to_44_employed\",\n \"45_to_54_employed\",\n \"55_to_59_employed\",\n \"60_to_61_employed\",\n \"62_to_64_employed\",\n ],\n }\n )\n\n # Sum over the custom groups\n for groupset, group_list in groupsets.items():\n for tag in [\"total\", \"male\", \"female\"]:\n\n # cols to sum over\n cols_to_sum = [f\"{tag}_{f}\" for f in group_list]\n\n # do the aggregation\n newcols = [f\"{tag}_{groupset}\", f\"{tag}_{groupset}_moe\"]\n df[newcols] = df.apply(approximate_sum, cols=cols_to_sum, axis=1)\n\n return df",
"def top100_by_age(df_info, year, col):\n global ages, df_list\n #Remove inactive players\n data = df_info[df_info.Flag != \"i\"]\n data = data[data.Flag != \"wi\"]\n\n for age in ages:\n data_by_age = data[data[\"age\"] < age]#Select the desired players\n #Create a new dataframe with data from the top 100\n data_top100 = data_by_age.sort_values(col, ascending=False)\n data_top100 = data_top100.head(100)\n #The if clause differentiates between the 2 different formats\n if year > 2012:\n #Calculate the percentage in top100 and in total\n percentage_top100 = data_top100[data_top100[\"Sex\"] == \"F\"].shape[0]\n percentage_total = data_by_age[data_by_age[\"Sex\"] == \"F\"].shape[0] / data_by_age.shape[0]\n else:\n percentage_top100 = data_top100[data_top100[\"Flag\"] == \"w\"].shape[0]\n percentage_total = data_by_age[data_by_age[\"Flag\"] == \"w\"].shape[0] / data_by_age.shape[0]\n #Append the data to a list of dictionaries\n df_list_top100.append(\n {\"age\": \"under \" + str(age), \"percentage\": percentage_top100, \"year\": year})\n df_list_total.append(\n {\"age\": \"under \" + str(age), \"percentage\": percentage_total, \"year\": year})",
"def create_sigma_df(df_grouped, class_=0):\n sigma_all_list = []\n sigma_peak_list = []\n snid_list = []\n pred_class_list = []\n for SNID, SNID_df in df_grouped:\n arr_proba = SNID_df[f\"all_class{class_}\"]\n perc_16 = np.percentile(arr_proba, 16)\n perc_84 = np.percentile(arr_proba, 84)\n sigma_all_list.append(perc_84 - perc_16)\n\n arr_proba = SNID_df[f\"PEAKMJD_class{class_}\"]\n perc_16 = np.percentile(arr_proba, 16)\n perc_84 = np.percentile(arr_proba, 84)\n sigma_peak_list.append(perc_84 - perc_16)\n snid_list.append(SNID)\n\n # get predicition for this SNID\n k_all_probas = [k for k in SNID_df.keys() if \"all_class\" in k]\n median_prob_forSNID = SNID_df[k_all_probas].median()\n pred_class = median_prob_forSNID.idxmax()\n arr_proba = SNID_df[pred_class]\n # get sigma for this class\n perc_16 = np.percentile(arr_proba, 16)\n perc_84 = np.percentile(arr_proba, 84)\n pred_class_list.append(perc_84 - perc_16)\n\n df = pd.DataFrame()\n df[\"SNID\"] = np.array(snid_list)\n df[\"sigma_all\"] = np.array(sigma_all_list)\n df[\"sigma_peak\"] = np.array(sigma_peak_list)\n df[\"pred_sigma_all\"] = np.array(pred_class_list)\n return df",
"def aggregate_count_data(df, groupby, id_vars=[]):\n # Make sure we have the column we are grouping by\n if groupby not in df.columns:\n raise ValueError(\n f\"the specified column to group by '{by}' is not in the input data\"\n )\n\n # data columns\n data_columns = [\n col\n for col in df.columns\n if not col.startswith(\"geo\") and not col.endswith(\"moe\")\n ]\n\n def _aggregate(group_df):\n \"\"\"\n The function that aggregates each group\n \"\"\"\n out = {}\n for col in data_columns:\n # The name of the error column (if it exists)\n error_col = f\"{col}_moe\"\n\n # remove any NaN rows\n subset = group_df.dropna(subset=[col], how=\"any\")\n\n # aggregat if we had any rows left\n if len(subset):\n\n # column values, margin of error (if it exists)\n args = np.column_stack(\n [subset[col], subset.get(error_col, np.zeros(len(subset)))]\n )\n\n # do the aggregation\n aggval, moe = cda.approximate_sum(*args)\n else:\n aggval = moe = np.nan\n\n # store\n out[col] = aggval\n if error_col in subset.columns:\n out[f\"{col}_moe\"] = moe\n\n out[\"geometry\"] = group_df.geometry.unary_union\n return pd.Series(out)\n\n # this is the aggregated data, with index of \"by\", e.g., group label\n agg_df = df.groupby(groupby).apply(_aggregate)\n\n # Return a GeoDataFrame\n out = gpd.GeoDataFrame(agg_df, geometry=\"geometry\", crs=df.crs).reset_index()\n\n # Add in any id variables from\n if len(id_vars):\n if groupby not in id_vars:\n id_vars.append(groupby)\n out = out.merge(df[id_vars], on=groupby).drop_duplicates(subset=[groupby])\n\n return out",
"def _tranform_idjj(df, age1720=False, exit=False):\n global _SIMPLECOUNT_COLUMNS\n\n try:\n df.columns = ['age', 'year', 'fk_simplecount_county'] + df.columns.tolist()[3:]\n\n if not age1720:\n indicator_list = [701, 702, 703, 710, 711, 720, 721, 722, 730, 731, 732, 733, 734, 740, 741]\n c_age = df['age'].isin(range(13, 16+1))\n else:\n indicator_list = [704, 705, 706, 712, 713, 723, 724, 725, 735, 736, 737, 738, 739, 742, 743]\n c_age = df['age'].isin(range(17, 20+1))\n\n if exit:\n indicator_list = [i + 50 for i in indicator_list]\n \n c_new = df['admtypo'].isin(['CE', 'CER', 'DR', 'IC', 'MVN', 'PVN', 'RAM'])\n c_ce = df['admtypo'] == 'CE'\n c_tv = df['admtypo'].isin(['TMV', 'TPV'])\n c_male = df['sex'] == 'M'\n c_female = ~c_male\n c_whi = df['race'] == 'WHI'\n c_blk = df['race'] == 'BLK'\n c_hsp = df['race'] == 'HSP'\n c_pers = df['offtype9'] == 1\n c_prop = df['offtype9'] == 2\n c_drug = df['offtype9'] == 3\n c_weap = df['offtype9'] == 4\n c_sex = df['offtype9'] == 5\n c_felo = df['hclass'].isin(['M','X',1,2,3,4])\n c_misd = ~c_felo\n\n c_first3 = [c_new, c_ce, c_tv]\n c_others = [c_male, c_female, c_whi, c_blk, c_hsp, c_pers, c_prop, c_drug, c_weap, c_sex, c_felo, c_misd]\n \n def helper(c, indicator_id, first3):\n df['fk_simplecount_indicator'] = indicator_id\n g = ['fk_simplecount_indicator', 'year', 'fk_simplecount_county']\n if first3:\n return df[c_age & c].groupby(g).size().reset_index(name='value')\n else:\n return df[c_age & c_new & c].groupby(g).size().reset_index(name='value')\n\n out = pd.DataFrame()\n for i in range(3):\n out = out.append(helper(c_first3[i], indicator_list[i], first3=True))\n \n for i in range(len(c_others)):\n out = out.append(helper(c_others[i], indicator_list[i+3], first3=False))\n \n out = out[out['fk_simplecount_county'].isin(range(1,102+1))]\n return out[_SIMPLECOUNT_COLUMNS]\n except:\n raise",
"def at_birth(df,variable,npoint):\n return df.groupby('cell')[['{}'.format('{}'.format(variable)),'pred_growth_rate']].apply(lambda x: x.head(npoint).mean()).rename(columns={'pred_length_box_um':'{}_at_birth'.format(variable)})",
"def leitner_proportions(df):\n denom = df.shape[0]\n prop_dict = {}\n\n for i in range(1,6):\n df_i = df[df['comfort_level'] == i]\n numer = df_i.shape[0]\n prop_dict[i] = numer / denom\n\n prop_df = pd.DataFrame.from_dict([prop_dict], orient='columns') \n\n prop_df = prop_df.T.rename(columns={0:'proportion'}) \n \n return prop_df"
] | [
"0.63705397",
"0.6071489",
"0.6071489",
"0.5912261",
"0.5603249",
"0.5497298",
"0.54729533",
"0.54188925",
"0.5374897",
"0.53565705",
"0.52926534",
"0.52469796",
"0.5227463",
"0.52265364",
"0.52131486",
"0.5205063",
"0.52038306",
"0.5188397",
"0.51701516",
"0.5131363",
"0.5124102",
"0.511309",
"0.5104998",
"0.51017016",
"0.5096565",
"0.50961524",
"0.5041277",
"0.4999612",
"0.49941427",
"0.49914396"
] | 0.74131227 | 0 |
adds lines to the swim object. with the arguments forwarded to __util__.scope_lines | def _add_scope(self, *args, **kwargs):
lines = scope_lines(*args, **kwargs)
self.lines.extend(lines)
return len(lines) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_raw(self, lines):\n return self._add_scope(lines, None, None, indent=None, inline=False)",
"def add_lines(self, *lines, **kwargs):\n assert len(lines) > 0\n lines = [_endline(line) for line in lines]\n all_stages = kwargs.pop('all_stages', False)\n at_start = kwargs.pop('at_start', False)\n skip_scratch = kwargs.pop('skip_scratch', False)\n assert not kwargs, \"Unknown keyword argument(s): {0}\".format(list(kwargs))\n\n froms = [\n instr for instr in self.structure\n if instr['instruction'] == 'FROM'\n ] or [{'endline': -1}] # no FROM? fake one before the beginning\n if not all_stages: # only modify the last\n froms = [froms[-1]]\n\n df_lines = self.lines\n # make sure last line has a newline if lines are to be appended\n if df_lines and not at_start:\n df_lines[-1] = _endline(df_lines[-1])\n\n # iterate through the stages in reverse order\n # so adding lines doesn't invalidate line numbers from structure dicts.\n # first add a bogus instruction to represent EOF in our iteration.\n froms.append({'startline': len(df_lines) + 1})\n for stage in range(len(froms)-2, -1, -1): # e.g. 0 for single or 2, 1, 0 for 3 stages\n start, finish = froms[stage], froms[stage+1]\n linenum = start['endline'] + 1 if at_start else finish['startline']\n image, _ = image_from(froms[stage].get('value') or '')\n if skip_scratch and image == 'scratch':\n continue\n df_lines[linenum:linenum] = lines\n\n self.lines = df_lines",
"def add_lines(self, lines):\n # Make sure this is a line or a list of lines.\n if isinstance(lines, str):\n self.add_lines(lines)\n else:\n for line in lines:\n self.add_line(line)",
"def scope_line(self, text):\n self.__write_scope()\n self.write_line(text)",
"def add_extend(self, lines, name=''):\n return self._add_scope(lines, '%extend ' + name + '{', '}', indent=None, inline=False)",
"def add_inline(self, lines):\n return self._add_scope(lines, '%inline %{', '%}')",
"def add_insert(self, lines):\n return self._add_scope(lines, '%{', '%}')",
"def add_begin(self, lines):\n return self._add_scope(lines, '%begin %{', '%}')",
"def add(self, line):\n self.cull()\n self.lines.append(line)",
"def create_line(self):\n if self.hosts and self.line:\n self.msg(\"There is a line here already.\")\n self.display_line()\n return\n self.line = []\n other_hosts = [self.caller.search(arg) for arg in self.lhslist]\n other_hosts = [ob for ob in other_hosts if ob and ob.player]\n other_hosts.append(self.caller)\n self.hosts = other_hosts\n if \"loop\" in self.switches:\n self.toggle_loop()\n self.display_line()",
"def add_line(self, line):\n self.code.extend([\" \" * self.indent_level, line, \"\\n\"])",
"def add_line(self, line):\n self.code.extend([\" \" * self.indent_level, line, \"\\n\"])",
"def insert_lines(self, lines, color=defcolor):\n for i in range(len(lines)):\n self.insert_line(lines[i], 0, i*(self.font['height']+1), color)",
"def add_line(project, jobs, line):\n if project not in jobs:\n jobs[project] = []\n jobs[project].append(line)",
"def add_comment(self, lines, verbosity_level=Verbosity.info):\n if self.verbosity < verbosity_level:\n return 0\n return self._add_scope(lines, None, None, '// ', inline=False)",
"def __window_scrollByLines(self, lines):\n pass",
"def add_line(self, line):\n self._set_instance_data('body', self.indent + ' ' * 4 + line)",
"def add_init(self, lines):\n return self._add_scope(lines, '%init %{', '%}')",
"def runlines(self,lines):\n\n # We must start with a clean buffer, in case this is run from an\n # interactive IPython session (via a magic, for example).\n self.resetbuffer()\n lines = lines.split('\\n')\n more = 0\n for line in lines:\n # skip blank lines so we don't mess up the prompt counter, but do\n # NOT skip even a blank line if we are in a code block (more is\n # true)\n if line or more:\n more = self.push((self.prefilter(line,more)))\n # IPython's runsource returns None if there was an error\n # compiling the code. This allows us to stop processing right\n # away, so the user gets the error message at the right place.\n if more is None:\n break\n # final newline in case the input didn't have it, so that the code\n # actually does get executed\n if more:\n self.push('\\n')",
"def _update_lines(self, lines, new_line):\n code_matches = [x for x in _ansi_codes.finditer(new_line)]\n color_codes = [\n code.string[code.span()[0] : code.span()[1]] for code in code_matches\n ]\n\n # Add color codes from earlier in the unwrapped line, and then track any new ones we add.\n new_line = \"\".join(self._active_codes) + new_line\n\n for code in color_codes:\n if code != _ansi_color_reset_code:\n self._active_codes.append(code)\n else: # A single reset code resets everything\n self._active_codes = []\n\n # Always ensure each line is color terminted if any colors are\n # still active, otherwise colors will bleed into other cells on the console\n if len(self._active_codes) > 0:\n new_line = new_line + _ansi_color_reset_code\n\n lines.append(new_line)",
"def add_to_cmd(run, batch, source, add_line, basename='xrb'):\n filepath = grid_strings.cmd_filepath(run, batch, source=source, basename=basename)\n print(f'Writing: {filepath}')\n with open(filepath) as f:\n lines = f.readlines()\n\n lines = [f'{add_line}\\n'] + lines\n with open(filepath, 'w') as f:\n f.writelines(lines)",
"def add_code(self, code_lines: List[str]) -> None:\n self.__code_block__ += code_lines",
"def add_comment_ml(self, lines, verbosity_level=Verbosity.info):\n if self.verbosity < verbosity_level:\n return 0\n return self._add_scope(lines, '/*', '*/', '* ', inline=False)",
"def setup_lines(self):\n self.center_lines()\n self.space_lines()",
"def add_lines(request, hash, lines):\n try:\n session = TailSession.objects.get(hash=hash)\n for line in lines:\n session.add(line.replace('>', '>').replace('<', '<') + u\"\\n\")\n return True\n except TailSession.DoesNotExist:\n return False",
"def add_line(self, lineno, text, important=True):\n self.append((lineno, important, text))",
"def add_python(self, lines):\n return self._add_scope(lines, '%pythoncode %{', '%}', inline=False)",
"def fold_lines(self, lines, name='', line_end=None, force=None):\n if not self.is_fold_enabled(force):\n return lines\n line_end = detect_nl(lines, line_end)\n start_mark, end_mark = new_section_marks(name, line_end)\n ret = [start_mark, end_mark]\n ret[1:1] = lines\n return ret",
"def _data_lines(self, new_lines):\n self._load()\n\n # Check _load() comments to see why this can happen\n if not hasattr(self, \"_data\"):\n return\n\n self._data[\"lines\"] = new_lines\n self.dirty = True\n self.uncache()",
"def run(self, lines):\r\n pass"
] | [
"0.67846817",
"0.650356",
"0.6448292",
"0.63999695",
"0.63545126",
"0.6267685",
"0.6185877",
"0.61215115",
"0.610109",
"0.59473467",
"0.5920031",
"0.5920031",
"0.5853437",
"0.57996607",
"0.5771026",
"0.5769285",
"0.57038033",
"0.5693841",
"0.56882554",
"0.56811553",
"0.56298524",
"0.56066614",
"0.55960643",
"0.5580512",
"0.55765444",
"0.5572608",
"0.55610996",
"0.5554828",
"0.5522834",
"0.5495317"
] | 0.75363743 | 0 |
add lines to the swim without an enclosing scope | def add_raw(self, lines):
return self._add_scope(lines, None, None, indent=None, inline=False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add(self, line):\n self.cull()\n self.lines.append(line)",
"def add_line(self, line):\n self.code.extend([\" \" * self.indent_level, line, \"\\n\"])",
"def add_line(self, line):\n self.code.extend([\" \" * self.indent_level, line, \"\\n\"])",
"def new_line():\n print()",
"def add_line(self, line):\n self._set_instance_data('body', self.indent + ' ' * 4 + line)",
"def create_line(self):\n if self.hosts and self.line:\n self.msg(\"There is a line here already.\")\n self.display_line()\n return\n self.line = []\n other_hosts = [self.caller.search(arg) for arg in self.lhslist]\n other_hosts = [ob for ob in other_hosts if ob and ob.player]\n other_hosts.append(self.caller)\n self.hosts = other_hosts\n if \"loop\" in self.switches:\n self.toggle_loop()\n self.display_line()",
"def add_to_cmd(run, batch, source, add_line, basename='xrb'):\n filepath = grid_strings.cmd_filepath(run, batch, source=source, basename=basename)\n print(f'Writing: {filepath}')\n with open(filepath) as f:\n lines = f.readlines()\n\n lines = [f'{add_line}\\n'] + lines\n with open(filepath, 'w') as f:\n f.writelines(lines)",
"def add_insert(self, lines):\n return self._add_scope(lines, '%{', '%}')",
"def add_begin(self, lines):\n return self._add_scope(lines, '%begin %{', '%}')",
"def _add_scope(self, *args, **kwargs):\n lines = scope_lines(*args, **kwargs)\n self.lines.extend(lines)\n return len(lines)",
"def three_lines():\n new_line()\n new_line()\n new_line()",
"def add_line(project, jobs, line):\n if project not in jobs:\n jobs[project] = []\n jobs[project].append(line)",
"def add_lines(self, *lines, **kwargs):\n assert len(lines) > 0\n lines = [_endline(line) for line in lines]\n all_stages = kwargs.pop('all_stages', False)\n at_start = kwargs.pop('at_start', False)\n skip_scratch = kwargs.pop('skip_scratch', False)\n assert not kwargs, \"Unknown keyword argument(s): {0}\".format(list(kwargs))\n\n froms = [\n instr for instr in self.structure\n if instr['instruction'] == 'FROM'\n ] or [{'endline': -1}] # no FROM? fake one before the beginning\n if not all_stages: # only modify the last\n froms = [froms[-1]]\n\n df_lines = self.lines\n # make sure last line has a newline if lines are to be appended\n if df_lines and not at_start:\n df_lines[-1] = _endline(df_lines[-1])\n\n # iterate through the stages in reverse order\n # so adding lines doesn't invalidate line numbers from structure dicts.\n # first add a bogus instruction to represent EOF in our iteration.\n froms.append({'startline': len(df_lines) + 1})\n for stage in range(len(froms)-2, -1, -1): # e.g. 0 for single or 2, 1, 0 for 3 stages\n start, finish = froms[stage], froms[stage+1]\n linenum = start['endline'] + 1 if at_start else finish['startline']\n image, _ = image_from(froms[stage].get('value') or '')\n if skip_scratch and image == 'scratch':\n continue\n df_lines[linenum:linenum] = lines\n\n self.lines = df_lines",
"def push(self, line):\n self.lines_pushed += line + \"\\n\"\n return code.InteractiveConsole.push(self, line)",
"def scope_line(self, text):\n self.__write_scope()\n self.write_line(text)",
"def add(self, line):\n self.body.append(line)",
"def add_line(self, lineno, text, important=True):\n self.append((lineno, important, text))",
"def add_extend(self, lines, name=''):\n return self._add_scope(lines, '%extend ' + name + '{', '}', indent=None, inline=False)",
"def add_inline(self, lines):\n return self._add_scope(lines, '%inline %{', '%}')",
"def _trunc_lines_append(self):\n\t\tp = self._edit.get_buffer()\n\t\tnLines = p.get_line_count()\n\t\twhile nLines > 0:\n\t\t\tif nLines <= self._maxLines +1:\n\t\t\t\tbreak\n\t\t\tstart = p.get_start_iter()\n\t\t\tend = p.get_start_iter()\n\t\t\tend.forward_line()\n\t\t\tp.delete(start, end)\n\t\t\tnLines = p.get_line_count()",
"def add_lines(self, lines):\n # Make sure this is a line or a list of lines.\n if isinstance(lines, str):\n self.add_lines(lines)\n else:\n for line in lines:\n self.add_line(line)",
"def _(event):\n if line.is_multiline:\n line.newline()\n else:\n if line.validate():\n cli_ref().line.add_to_history()\n cli_ref().set_return_value(line.document)",
"def runlines(self,lines):\n\n # We must start with a clean buffer, in case this is run from an\n # interactive IPython session (via a magic, for example).\n self.resetbuffer()\n lines = lines.split('\\n')\n more = 0\n for line in lines:\n # skip blank lines so we don't mess up the prompt counter, but do\n # NOT skip even a blank line if we are in a code block (more is\n # true)\n if line or more:\n more = self.push((self.prefilter(line,more)))\n # IPython's runsource returns None if there was an error\n # compiling the code. This allows us to stop processing right\n # away, so the user gets the error message at the right place.\n if more is None:\n break\n # final newline in case the input didn't have it, so that the code\n # actually does get executed\n if more:\n self.push('\\n')",
"def insert_lines(self, lines, color=defcolor):\n for i in range(len(lines)):\n self.insert_line(lines[i], 0, i*(self.font['height']+1), color)",
"def update(self, line):",
"def add_code(self, code_lines: List[str]) -> None:\n self.__code_block__ += code_lines",
"def add_line(self, text):\n\t\twidth, height = self.font.size(text)\n\t\tpos = (self.rect.left + 10, self.rect.bottom - height- 5)\n\t\trend = self.font.render(text, True, BLACK)\n\t\t# Move all already existing lines up\n\t\tfor i in range(len(self.all_lines)):\n\t\t\toldsurf, oldpos = self.all_lines[i]\n\t\t\tself.all_lines[i] = self.lift_line(oldsurf, height, oldpos)\n\t\t\tcopy = oldsurf.copy()\n\t\t\tcopy.fill(BG_COLOR)\n\t\t\tself.image.blit(copy, oldpos)\n\t\tself.all_lines.append([rend, pos])\n\t\tself.image.blit(rend, pos)",
"def double_line():\n print (\"=============================================================\")",
"def _update_lines(self, lines, new_line):\n code_matches = [x for x in _ansi_codes.finditer(new_line)]\n color_codes = [\n code.string[code.span()[0] : code.span()[1]] for code in code_matches\n ]\n\n # Add color codes from earlier in the unwrapped line, and then track any new ones we add.\n new_line = \"\".join(self._active_codes) + new_line\n\n for code in color_codes:\n if code != _ansi_color_reset_code:\n self._active_codes.append(code)\n else: # A single reset code resets everything\n self._active_codes = []\n\n # Always ensure each line is color terminted if any colors are\n # still active, otherwise colors will bleed into other cells on the console\n if len(self._active_codes) > 0:\n new_line = new_line + _ansi_color_reset_code\n\n lines.append(new_line)",
"def run(self, lines):\r\n pass"
] | [
"0.65126455",
"0.6127987",
"0.6127987",
"0.60583615",
"0.603168",
"0.59646225",
"0.59486276",
"0.59404665",
"0.5914367",
"0.58971936",
"0.58896637",
"0.58696604",
"0.5858786",
"0.58379865",
"0.5834611",
"0.578282",
"0.5762538",
"0.57595",
"0.5711435",
"0.5685841",
"0.5636901",
"0.56050605",
"0.55771995",
"0.5576901",
"0.5563431",
"0.5523492",
"0.5511948",
"0.5497733",
"0.5495263",
"0.54809356"
] | 0.6349906 | 1 |
add lines to the swim with an insert scope %{ %} | def add_insert(self, lines):
return self._add_scope(lines, '%{', '%}') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def insert(self):\n #vim.command(\"autocmd! CursorMovedI *\")\n try:\n placeholder = self.placeholders.pop()\n pos = self.findPlaceholder(placeholder)\n except IndexError:\n #TODO here I could do a findAllPlaceHolders on the complete file, for\n #reducing errors!\n pos = (0,0,0)\n if pos !=(0,0,0):\n line = self.buffer[pos[0]]\n new_line = line[:pos[1]] + \"\" + line[pos[1]+pos[2]:]\n cursor = (pos[0]+1, pos[1])\n vim.current.window.cursor = cursor\n vim.command(\"startinsert\")\n vim.command(\"redraw\")\n self.buffer[pos[0]] = new_line\n yield\n self.templateMode = False\n return",
"def add_begin(self, lines):\n return self._add_scope(lines, '%begin %{', '%}')",
"def add_python_begin(self, lines):\n return self._add_scope(lines, '%pythonbegin %{', '%}', inline=False)",
"def insert(self, line, where=0):\n self.buffer.insert(where, line)",
"def insertTemplate(self, word, line, pos):\n try:\n template = self.templates[word][1]\n before = line[0:pos[0]]\n after = line[pos[0]+pos[1]:]\n template = before + template + after\n template_list = template.split(\"\\n\")\n self.template_list = template_list\n #TODO replace with correct tabs\n (row, col) = vim.current.window.cursor\n self.buffer = vim.current.buffer\n new_list = []\n new_list.append(template_list[0])\n for template in template_list[1:]:\n new_list.append(self.helper.addTabs(template, col-len(word)))\n template_list = new_list \n self.buffer[row-1:row] = template_list\n self.placeholders = self.getAllPlaceholders(template_list)\n #template inserted, now go to template mode with and cycle with tabs\n self.row = row\n self.templateMode = True\n #set autocommand for insert mode\n #vim.command('norm \"\\<C-\\\\>\\<C-N>\"') \n #vim.command(\"startinsert\")\n #vim.command(\"autocmd CursorMovedI * python template.trigger()\")\n except KeyError:\n self.insertTab()",
"def add_raw(self, lines):\n return self._add_scope(lines, None, None, indent=None, inline=False)",
"def insert_lines(self, lines, color=defcolor):\n for i in range(len(lines)):\n self.insert_line(lines[i], 0, i*(self.font['height']+1), color)",
"def add_line(self, line):\n self._set_instance_data('body', self.indent + ' ' * 4 + line)",
"def add_line(self, line):\n self.code.extend([\" \" * self.indent_level, line, \"\\n\"])",
"def add_line(self, line):\n self.code.extend([\" \" * self.indent_level, line, \"\\n\"])",
"def scope_line(self, text):\n self.__write_scope()\n self.write_line(text)",
"def add_extend(self, lines, name=''):\n return self._add_scope(lines, '%extend ' + name + '{', '}', indent=None, inline=False)",
"def add_python(self, lines):\n return self._add_scope(lines, '%pythoncode %{', '%}', inline=False)",
"def insert_indent(event):\n env = XSH.env\n event.cli.current_buffer.insert_text(env.get(\"INDENT\"))",
"def _add_scope(self, *args, **kwargs):\n lines = scope_lines(*args, **kwargs)\n self.lines.extend(lines)\n return len(lines)",
"def _(event):\n line.insert_text(event.data * event.arg)",
"def insert_literal_tab(event):\n b = event.current_buffer\n if b.complete_state:\n b.complete_previous()\n else:\n env = XSH.env\n event.cli.current_buffer.insert_text(env.get(\"INDENT\"))",
"def insertBlock(self, utext):\n self.saveText()\n\n self.selection.clearSelection()\n word, cx = self.edCursor.getPos()\n # Get cursor offset from end of word (for new cursor placement)\n cxn = len(word.string) - cx\n # Insert the new text at the cursor position ...\n string = word.string[:cx] + utext + word.string[cx:]\n textLines = self.rsubject.textToLines(string)\n tline = word.tline\n nline0 = textLines[0]\n word1 = nline0.twords[0] # position to start rendering\n wx = tline.twords.index(word) # insertion index\n # delete the word which was under the cursor\n word.delete()\n # and set its string to non-empty so that the cursor\n # repositioning works (!)\n word.string = u\"DUMMY\"\n # and add the rest of the original line to the end of the insertion\n oline2 = tline.twords[wx+1:]\n del(tline.twords[wx:])\n nlineL = textLines[-1]\n word = nlineL.twords[-1] # get new word under cursor\n for w in oline2:\n nlineL.insert(w)\n # Append the first inserted line to tline\n for w in nline0.twords:\n tline.insert(w)\n\n # Insert the remaining lines into the subjects line list\n tlx = self.rsubject.tlines.index(tline) + 1\n self.rsubject.tlines[tlx:tlx] = textLines[1:]\n\n self.rsubject.renderShortened(word1)\n\n cx = len(word.string) - cxn\n self.edCursor.setPos(word, cx)",
"def vim_insert_mode(cmd: str):\n v = VimMode()\n v.set_insert_mode()\n actions.insert(cmd)",
"def write(self, line, *, preprocessor):\n preprocessor.insert_lines((line + \"\\n\",))",
"def add_init(self, lines):\n return self._add_scope(lines, '%init %{', '%}')",
"def append_cursor_enter_callback(self):",
"def add_inline(self, lines):\n return self._add_scope(lines, '%inline %{', '%}')",
"def pre_readline(self):\n \n self.readline.insert_text(' '* self.readline_indent)",
"def new_line():\n print()",
"def InsertMode(self):\n self.stc.SetLineCaret()\n self.stc.SetOvertype(False)\n self.BlockMode = False\n self._SetMode(ViKeyHandler.INSERT, u\"INSERT\")",
"def do_insert(self,args):\n if len(args) != 0:\n for w in args.split():\n sl.insertList(int(w.rstrip()))",
"def help_insert(self):\n print(INSERT)",
"def insert_import_mode_free(note):\n txt_import = open(PATH_UTILS + 'import_mode_free.md', 'r', encoding='utf-8').read()\n note['cells'] += [nb.v4.new_code_cell(txt_import)]\n note.cells[-1].metadata = {\"jupyter\": {\"source_hidden\": True}, \"init_cell\": True, \"editable\": False, \"deletable\": False, \"tags\": ['run_start']}",
"def onInsert(self):\n self.mainWindow.insert()"
] | [
"0.67922753",
"0.6663215",
"0.6172989",
"0.61030936",
"0.5942064",
"0.59274566",
"0.5889628",
"0.5855325",
"0.5761353",
"0.5761353",
"0.5756298",
"0.57560426",
"0.5698956",
"0.568308",
"0.5626498",
"0.55507535",
"0.55334395",
"0.55170107",
"0.551231",
"0.55110294",
"0.54957455",
"0.5462858",
"0.5432408",
"0.5419528",
"0.5415508",
"0.5401827",
"0.5377024",
"0.534736",
"0.53124535",
"0.53104794"
] | 0.7928264 | 0 |
add lines to the swim with an %init scope | def add_init(self, lines):
return self._add_scope(lines, '%init %{', '%}') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init(self):\n sys_init_lines = CodeWriter.write_init()\n self.init_lines.extend(sys_init_lines)",
"def add_begin(self, lines):\n return self._add_scope(lines, '%begin %{', '%}')",
"def beginScope():",
"def add_python_begin(self, lines):\n return self._add_scope(lines, '%pythonbegin %{', '%}', inline=False)",
"def setup(self):\n self.ca_lines = []\n self.ca_lines.append(self.build_initial_line())\n self.set_display_from_lines()",
"def init():",
"def initialize():\n dislin.disini()",
"def init():\n pass",
"def add_insert(self, lines):\n return self._add_scope(lines, '%{', '%}')",
"def _add_scope(self, *args, **kwargs):\n lines = scope_lines(*args, **kwargs)\n self.lines.extend(lines)\n return len(lines)",
"def setup_lines(self):\n self.center_lines()\n self.space_lines()",
"def scope_line(self, text):\n self.__write_scope()\n self.write_line(text)",
"def _init():\n line.set_data([], [])\n return line,",
"def activate_shell_scope(self):\n self.variables = {}\n self.prompt = 'cm> '\n self.active_scope = \"\"\n self.scopes = []\n self.scopeless = ['load', 'info', 'var', 'use', 'quit', 'q', 'help']\n # self.scopeless = ['use', 'quit', 'q', 'EOF', 'eof', 'help']",
"def entry_message(self):\n print _(\"APPEND VIM MODELINE TO SOURCE CODE FILES\")\n print\n print _(\"DEFAULT CONFIGURATION:\")\n print _(\" - MODE:\"), self.default_mode\n print _(\" - LANGUAGE:\"), self.default_lang",
"def enterScope(self, name):",
"def add_raw(self, lines):\n return self._add_scope(lines, None, None, indent=None, inline=False)",
"def __init__(self):\n self.statiFile = \"\"\n self.printOrder = []\n self.instCount = 0\n self.initializedVars = {\"GF\":[],\"TF\":[],\"LF\":[]}",
"def writeInit(self):\r\n #We set the name to sys\r\n self.setFileName(\"Sys\")\r\n #Create the list of commands\r\n assemblerTraduction = []\r\n #SP = 256\r\n assemblerTraduction.append(\"@256\")\r\n assemblerTraduction.append(\"D=A\")\r\n assemblerTraduction.append(\"@SP\")\r\n assemblerTraduction.append(\"M=D\")\r\n #We write in the file the result traduction\r\n for traducedLines in assemblerTraduction:\r\n self.filename.write(traducedLines+\"\\n\")\r\n #Then we call Self.init = invoke sys.init\r\n self.writeCall(\"Sys.init\",0)",
"def add_extend(self, lines, name=''):\n return self._add_scope(lines, '%extend ' + name + '{', '}', indent=None, inline=False)",
"def init(stdscr):\n # Ensures a clean visual space.\n stdscr.clear()\n curses.curs_set(False)\n\n # Set the background of the app to the secondary color.\n stdscr.bkgd(' ', curses.color_pair(1))\n stdscr.refresh()",
"def _init_outliners(self):\n\n for outliner in self._outliners.values():\n outliner.refresh()",
"def exec_init_cmd(self):\n\n sys.argv = ['-c']\n self.push(self.rc.c)",
"def pre_readline(self):\n \n self.readline.insert_text(' '* self.readline_indent)",
"def Init(ss):\n rand.Seed(ss.RndSeed)\n ss.UpdateEnv()\n ss.StopNow = False\n ss.SetParams(\"\", False)\n ss.NewRun()\n ss.UpdateView(True)",
"def initialize_scope_settings(scope) -> None:\n scope.set_hi_res_mode()\n scope.set_single_acquisition_mode()\n scope.set_waveform_data_source_single_channel(1)\n scope.set_waveform_encoding_ascii()\n scope.set_waveform_start_point(1)\n scope.set_waveform_stop_point(scope.get_waveform_length())\n scope.set_waveform_start_point(1)\n scope.set_waveform_stop_point(10_000_000)\n scope.turn_off_all_measurements()\n scope.add_displayed_mean_measurement(1, 1)\n scope.add_displayed_mean_measurement(2, 2)\n scope.add_displayed_mean_measurement(3, 3)\n scope.zero_all_vertical_positions()\n scope.set_trigger_holdoff()\n return",
"def init():\n global DICO\n script_dir = os.path.dirname(__file__)\n dic = open(script_dir+\"/dico/dico-fr.txt\", \"r\")\n for line in dic:\n DICO.append(line.strip())",
"def __init__(self, environment):\n super(SlimishExtension, self).__init__(environment)\n environment.extend(\n slim_debug=True,\n slim_print=False,\n file_extensions=('.slim',),\n )",
"def start(text=\"\"):\n global _current_line\n _current_line = StatusLine(text, WIDTH)",
"def init_vars():\n\tda_vinci.base.usepackage(\"pgfkeys\")\n\tda_vinci.base.add_preamble(setup_script)"
] | [
"0.67998314",
"0.64131415",
"0.60576457",
"0.58872247",
"0.57417905",
"0.57333225",
"0.56565",
"0.56419224",
"0.55961514",
"0.55780023",
"0.5516226",
"0.5493941",
"0.5476834",
"0.5469563",
"0.54207057",
"0.54154503",
"0.54121745",
"0.5371379",
"0.5362003",
"0.53599983",
"0.5359573",
"0.53569525",
"0.5343369",
"0.5328084",
"0.5316965",
"0.5316129",
"0.5312243",
"0.52938384",
"0.5287486",
"0.5240082"
] | 0.7650657 | 0 |
add lines to the swim with an %inline scope | def add_inline(self, lines):
return self._add_scope(lines, '%inline %{', '%}') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_raw(self, lines):\n return self._add_scope(lines, None, None, indent=None, inline=False)",
"def add_begin(self, lines):\n return self._add_scope(lines, '%begin %{', '%}')",
"def add_python(self, lines):\n return self._add_scope(lines, '%pythoncode %{', '%}', inline=False)",
"def scope_line(self, text):\n self.__write_scope()\n self.write_line(text)",
"def add_python_begin(self, lines):\n return self._add_scope(lines, '%pythonbegin %{', '%}', inline=False)",
"def _add_scope(self, *args, **kwargs):\n lines = scope_lines(*args, **kwargs)\n self.lines.extend(lines)\n return len(lines)",
"def _inline_example( name: str, lines: str = None, image = True ):\n \n result = \"\"\n\n if image:\n result += f\"\"\"\n\n.. only:: html\n\n .. image:: ../examples/images/{name}_html.png\n\n.. only:: pdf\n\n .. image:: ../examples/images/{name}_pdf.png\n\n\"\"\"\n\n if lines is not None:\n result += f\"\"\"\n\n.. literalinclude:: ../examples/{name}.py\n :lines: {lines}\n\n\"\"\"\n return result",
"def add_extend(self, lines, name=''):\n return self._add_scope(lines, '%extend ' + name + '{', '}', indent=None, inline=False)",
"def add_line(self, line):\n self.code.extend([\" \" * self.indent_level, line, \"\\n\"])",
"def add_line(self, line):\n self.code.extend([\" \" * self.indent_level, line, \"\\n\"])",
"def add_insert(self, lines):\n return self._add_scope(lines, '%{', '%}')",
"def inline(self, *args, **kwargs):\n pass",
"def add_line(self, line):\n self._set_instance_data('body', self.indent + ' ' * 4 + line)",
"def lineparse(inline, options=None, **keywargs):\n p = LineParser(options, **keywargs)\n return p.feed(inline)",
"def write(self, line, *, preprocessor):\n preprocessor.insert_lines((line + \"\\n\",))",
"async def toggle_inline(self,ctx):\n self.inline = not self.inline",
"def ins(self, line=None):\n self.inspect(line=line)",
"def add_code(self, code_lines: List[str]) -> None:\n self.__code_block__ += code_lines",
"def _HandleInlineCode(self, input_line, match, output_stream):\n self._formatting_handler.HandleInlineCode(input_line, output_stream, match)",
"def add_init(self, lines):\n return self._add_scope(lines, '%init %{', '%}')",
"def add_to_cmd(run, batch, source, add_line, basename='xrb'):\n filepath = grid_strings.cmd_filepath(run, batch, source=source, basename=basename)\n print(f'Writing: {filepath}')\n with open(filepath) as f:\n lines = f.readlines()\n\n lines = [f'{add_line}\\n'] + lines\n with open(filepath, 'w') as f:\n f.writelines(lines)",
"def saveline(filename, fi, p1_CG, p2_CG, append=True):\n\n fid = open(filename, 'a' if append else 'w')\n snippet = ' '.join(map(str, [fi, p1_CG, p2_CG]))\n fid.write(snippet + '\\n')\n fid.close()",
"def single_line():\n print (\"-------------------------------------------------------------\")",
"def three_lines():\n new_line()\n new_line()\n new_line()",
"def asm(self, text):\n self.text.append(text)",
"def double_line():\n print (\"=============================================================\")",
"def add_comment(self, lines, verbosity_level=Verbosity.info):\n if self.verbosity < verbosity_level:\n return 0\n return self._add_scope(lines, None, None, '// ', inline=False)",
"def generateInlineCSS():",
"def extendMarkdown(self, md, md_globals):\r\n md.inlinePatterns['autolink'] = UrlizePattern(URLIZE_RE, md)",
"def edit_line(self, line):\r\n for code, code_obj in self.code_objs.items():\r\n line = self.__edit_line(line, code, code_obj)\r\n return line"
] | [
"0.6796899",
"0.64170915",
"0.64074826",
"0.62847924",
"0.6172507",
"0.6146074",
"0.60593814",
"0.60490286",
"0.5909625",
"0.5909625",
"0.5829333",
"0.5821996",
"0.5814859",
"0.58125925",
"0.5644606",
"0.56434417",
"0.5574224",
"0.5539332",
"0.5471069",
"0.5402047",
"0.5378864",
"0.5344954",
"0.53439504",
"0.5298306",
"0.52803725",
"0.5276192",
"0.52512753",
"0.5251012",
"0.5247021",
"0.5243099"
] | 0.83485454 | 0 |
add lines to the swim with a %pythoncode scope | def add_python(self, lines):
return self._add_scope(lines, '%pythoncode %{', '%}', inline=False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_python_begin(self, lines):\n return self._add_scope(lines, '%pythonbegin %{', '%}', inline=False)",
"def add_code(self, code_lines: List[str]) -> None:\n self.__code_block__ += code_lines",
"def write_code(self, code):\n self.buffer.scope_line(code.lstrip(' \\t'))",
"def maybe_append_new_line(code):\n lines = code.split(\"\\n\")\n\n if lines[0] in [\"py\", \"python\"]:\n # add new line before last line being ```\n last_line = lines[-1]\n lines.pop()\n lines.append(\"\\n\" + last_line)\n\n return \"\\n\".join(lines)",
"def code():",
"def add_code(self, code):\n self.code += code",
"def visit_Python(self, node):\n py_code = compile(node.py_ast, self.filename, mode='exec')\n bp_code = Code.from_code(py_code)\n # Skip the SetLineo and ReturnValue codes\n self.code_ops.extend(bp_code.code[1:-2])",
"def add_code(self, s):\n self.code += ' ' * self.indent + s + '\\n'",
"def add_begin(self, lines):\n return self._add_scope(lines, '%begin %{', '%}')",
"def maybe_remove_new_line(code):\n lines = code.split(\"\\n\")\n\n if lines[0] in [\"py\", \"python\"]:\n # add new line before last line being ```\n lines = lines[:-2] + lines[-1:]\n\n return \"\\n\".join(lines)",
"def add_raw(self, lines):\n return self._add_scope(lines, None, None, indent=None, inline=False)",
"def add_code_block(self, code_block: 'CodeElement') -> None:\n self.add_code(code_block.get_code())",
"def add_line(self, line):\n self.code.extend([\" \" * self.indent_level, line, \"\\n\"])",
"def add_line(self, line):\n self.code.extend([\" \" * self.indent_level, line, \"\\n\"])",
"def appendCode(dest, indent, text):\n\n dest.append(\"%s// START custom code\" % indent)\n for s in text.rstrip().split(\"\\n\"):\n dest.append(\"%s%s\" % (indent, s))\n dest.append(\"%s// END custom code\" % indent)",
"def main_code():\n pass",
"def add_python_prepend(self, method: Function, lines):\n return self.add_feature(lines, method.pattern, 'pythonprepend')",
"def list_code(self, ofile=sys.stdout):\r\n for i, line in enumerate(self.code().split('\\n')):\r\n print >> ofile, ('%4i' % (i + 1)), line\r\n ofile.flush()",
"def lines_of_code(project: Project) -> int:\n ret = sh.cloc(\"--quiet\", \"--include-lang=Python\", \"--yaml\", str(project.root))\n ret_obj = list(yaml.safe_load_all(str(ret)))\n return ret_obj[0][\"Python\"][\"code\"]",
"def add_inline(self, lines):\n return self._add_scope(lines, '%inline %{', '%}')",
"def code_line(self, var_name: str, unnamed_input_vars: Sequence[str], named_input_vars: Dict[str, str]) -> str:\n raise NotImplementedError",
"def scope_line(self, text):\n self.__write_scope()\n self.write_line(text)",
"def block_code(self, code, lang=None):\n code = code.rstrip('\\n')\n return [\"<code>\"] + code",
"def add_block(self, cxnode, code, **magic_vars):\n ast = cparse(code)\n # ast.show()\n generator = MagicCGenerator(cxnode, magic_vars)\n generator.indent_level = self.indent_level\n hdr = '\\n%s// %s\\n' % (' ' * self.indent_level,\n cxnode.__class__.__name__)\n self.code += hdr + generator.visit(ast)",
"def add_python_append(self, method: Function, lines):\n return self.add_feature(lines, method.pattern, 'pythonappend')",
"def _add_scope(self, *args, **kwargs):\n lines = scope_lines(*args, **kwargs)\n self.lines.extend(lines)\n return len(lines)",
"def add_extend(self, lines, name=''):\n return self._add_scope(lines, '%extend ' + name + '{', '}', indent=None, inline=False)",
"def visit_Python(self, node):\n # This compiles the given Python ast into a Python code object\n # then disassembles it into a byteplay code object. This allows\n # us to interleave the instructions with those generated for\n # the rest of the module and then compile a single unified \n # code object.\n py_code = compile(node.py_ast, self.filename, mode='exec')\n bpc = Code.from_code(py_code)\n # Skip the SetLineo and ReturnValue codes\n self.code_ops.extend(bpc.code[1:-2])",
"def parse_python_cmd(self, line):\n # Find the code to run\n end_line = self.get_end_brace()\n\n py_script = self.file_ltxt[self.line_num+2:end_line]\n # Replace tabs with spaces so things run!\n py_script = list(map(lambda x: x.replace(\"\\t\", \" \"), py_script))\n\n\n # Now shift everything back to the minimum indentation\n # Find the minimum indentation that is on every line\n min_indent = 100000\n for line in py_script:\n\n if line.strip()[0] != '#':\n indent = re.findall(\"^ *\", line)[0]\n else:\n continue\n min_indent = min([min_indent, len(indent)])\n\n # Remove the indent\n for line_num in range(len(py_script)):\n ind_search = \"^\" + \" \"*min_indent\n line = re.sub(ind_search, \"\", py_script[line_num])\n py_script[line_num] = line\n py_script = '\\n'.join(py_script)\n\n self.exec_python_script(script_txt=py_script)\n\n self.line_num = end_line",
"def edit_line(self, line):\r\n for code, code_obj in self.code_objs.items():\r\n line = self.__edit_line(line, code, code_obj)\r\n return line"
] | [
"0.72855204",
"0.67737067",
"0.64277625",
"0.6366127",
"0.6319772",
"0.61823004",
"0.6182049",
"0.6001075",
"0.5914849",
"0.59011936",
"0.5879456",
"0.5847941",
"0.5823648",
"0.5823648",
"0.576693",
"0.57615745",
"0.5663095",
"0.5660934",
"0.5612203",
"0.5569046",
"0.5562889",
"0.5530553",
"0.55204093",
"0.5493292",
"0.54789716",
"0.54777724",
"0.5437564",
"0.54208845",
"0.54165524",
"0.54025686"
] | 0.82194614 | 0 |
add lines to the swim with a %pythonbegin scope | def add_python_begin(self, lines):
return self._add_scope(lines, '%pythonbegin %{', '%}', inline=False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_begin(self, lines):\n return self._add_scope(lines, '%begin %{', '%}')",
"def add_python(self, lines):\n return self._add_scope(lines, '%pythoncode %{', '%}', inline=False)",
"def beginScope():",
"def add_python_prepend(self, method: Function, lines):\n return self.add_feature(lines, method.pattern, 'pythonprepend')",
"def add_init(self, lines):\n return self._add_scope(lines, '%init %{', '%}')",
"def add_insert(self, lines):\n return self._add_scope(lines, '%{', '%}')",
"def begin():\n return BeginBlock()",
"def add_extend(self, lines, name=''):\n return self._add_scope(lines, '%extend ' + name + '{', '}', indent=None, inline=False)",
"def add_raw(self, lines):\n return self._add_scope(lines, None, None, indent=None, inline=False)",
"def bp_ins(filename, start, end):\n with open(filename, 'r') as f:\n lines = f.readlines()\n lines.insert(start-1, \"\")\n lines.insert(end+1, \"\")\n lines.insert(0, \"\")\n lines[start-1] = 'ipdb.set_trace()\\n'\n lines[end+1] = 'ipdb.set_trace()\\n'\n lines[0] = \"import ipdb\\n\"\n with open(f\"break_{filename}\", 'w+') as f:\n f.writelines(lines)",
"def insert_import_mode_free(note):\n txt_import = open(PATH_UTILS + 'import_mode_free.md', 'r', encoding='utf-8').read()\n note['cells'] += [nb.v4.new_code_cell(txt_import)]\n note.cells[-1].metadata = {\"jupyter\": {\"source_hidden\": True}, \"init_cell\": True, \"editable\": False, \"deletable\": False, \"tags\": ['run_start']}",
"def scope_line(self, text):\n self.__write_scope()\n self.write_line(text)",
"def add_inline(self, lines):\n return self._add_scope(lines, '%inline %{', '%}')",
"def add_code(self, code_lines: List[str]) -> None:\n self.__code_block__ += code_lines",
"def new_line():\n print()",
"def maybe_append_new_line(code):\n lines = code.split(\"\\n\")\n\n if lines[0] in [\"py\", \"python\"]:\n # add new line before last line being ```\n last_line = lines[-1]\n lines.pop()\n lines.append(\"\\n\" + last_line)\n\n return \"\\n\".join(lines)",
"def _add_scope(self, *args, **kwargs):\n lines = scope_lines(*args, **kwargs)\n self.lines.extend(lines)\n return len(lines)",
"def setup(self, ds: PetscDocStringImpl) -> None:\n items = {}\n\n class Inspector:\n __slots__ = 'codeblocks', 'startline'\n\n codeblocks: int\n startline: int\n\n def __init__(self, startline: int) -> None:\n self.codeblocks = 0\n self.startline = startline\n return\n\n def __call__(self, ds: PetscDocStringImpl, loc: SourceRange, line: str, verdict: Verdict) -> None:\n sub = self.codeblocks\n lstrp = line.lstrip()\n if lstrp.startswith('.vb'):\n items[sub] = [loc.start.line - self.startline]\n elif lstrp.startswith('.ve'):\n assert len(items[sub]) == 1\n items[sub].append(loc.start.line - self.startline + 1)\n self.codeblocks += 1\n return\n\n super()._do_setup(ds, Inspector(self.extent.start.line if self else 0))\n self.items = items\n return",
"def add_block(self, cxnode, code, **magic_vars):\n ast = cparse(code)\n # ast.show()\n generator = MagicCGenerator(cxnode, magic_vars)\n generator.indent_level = self.indent_level\n hdr = '\\n%s// %s\\n' % (' ' * self.indent_level,\n cxnode.__class__.__name__)\n self.code += hdr + generator.visit(ast)",
"def preamble(gcode, preamble):\r\n gcode.append('; <Start Preamble> ')\r\n gcode+=preamble\r\n risePen(gcode)\r\n gcode.append('; <End Preamble> ')\r\n gcode.append(' ')",
"def add_new_globals_after_comment(text, jsglobals):\n\n output = []\n\n in_start_comment = True\n\n for line in text.split(\"\\n\"):\n\n if in_start_comment and not is_comment_line(line):\n # Inject globals\n globals_line = generate_globals_line(None, jsglobals)\n output.append(globals_line)\n in_start_comment = False\n\n if not is_comment_line(line):\n in_start_comment = False\n\n output.append(line)\n\n return \"\\n\".join(output)",
"def _generate_headlines(self):\n includes = set()\n for decl in self._ast.decls:\n includes.add(decl.cpp_file)\n for include in includes:\n yield f'#include \"{include}\"'\n yield '#include \"third_party/pybind11/include/pybind11/pybind11.h\"'\n yield ''\n yield 'namespace py = pybind11;'\n yield ''",
"def shel(line):\n get_ipython().run_line_magic('autocall', '1')\n get_ipython().run_line_magic('rehashx', '')",
"def current_line_preserved():\n\n current_line = get_current_line_number() + 1\n yield\n vim.command('{0}'.format(current_line))",
"def default(self, line):\n self.history.append(line)\n if line[:1] == '!':\n line = line[1:]\n locals = self.curframe_locals\n ns = self.curframe.f_globals.copy()\n ns.update(locals)\n try:\n code = compile(line + '\\n', '<stdin>', 'single')\n save_stdout = sys.stdout\n save_stdin = sys.stdin\n save_displayhook = sys.displayhook\n try:\n sys.stdin = self.stdin\n sys.stdout = self.stdout\n sys.displayhook = self.displayhook\n exec(code, ns, locals)\n finally:\n sys.stdout = save_stdout\n sys.stdin = save_stdin\n sys.displayhook = save_displayhook\n except:\n exc_info = sys.exc_info()[:2]\n self.error(traceback.format_exception_only(*exc_info)[-1].strip())",
"def pre_readline(self):\n \n self.readline.insert_text(' '* self.readline_indent)",
"def example():\n print \"\"\"\n \"\"\"",
"def example():\n print \"\"\"\n \"\"\"",
"def enterScope(self, name):",
"def runlines(self,lines):\n\n # We must start with a clean buffer, in case this is run from an\n # interactive IPython session (via a magic, for example).\n self.resetbuffer()\n lines = lines.split('\\n')\n more = 0\n for line in lines:\n # skip blank lines so we don't mess up the prompt counter, but do\n # NOT skip even a blank line if we are in a code block (more is\n # true)\n if line or more:\n more = self.push((self.prefilter(line,more)))\n # IPython's runsource returns None if there was an error\n # compiling the code. This allows us to stop processing right\n # away, so the user gets the error message at the right place.\n if more is None:\n break\n # final newline in case the input didn't have it, so that the code\n # actually does get executed\n if more:\n self.push('\\n')"
] | [
"0.7444203",
"0.7173692",
"0.6397793",
"0.5903751",
"0.5727093",
"0.56622964",
"0.5648465",
"0.55984676",
"0.55900687",
"0.55078626",
"0.545898",
"0.54386294",
"0.54139555",
"0.5387291",
"0.53734475",
"0.53591806",
"0.5343911",
"0.53297573",
"0.5301706",
"0.52969384",
"0.5292095",
"0.5286336",
"0.5261381",
"0.5255376",
"0.5228548",
"0.5218523",
"0.52134204",
"0.52134204",
"0.5201661",
"0.517993"
] | 0.8458866 | 0 |
add lines to the swim with a %pythonappend scope. requires the method to which to bind. | def add_python_append(self, method: Function, lines):
return self.add_feature(lines, method.pattern, 'pythonappend') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_python(self, lines):\n return self._add_scope(lines, '%pythoncode %{', '%}', inline=False)",
"def add_python_prepend(self, method: Function, lines):\n return self.add_feature(lines, method.pattern, 'pythonprepend')",
"def add_extend(self, lines, name=''):\n return self._add_scope(lines, '%extend ' + name + '{', '}', indent=None, inline=False)",
"def AppendExtra(self, extra):\n self.script.append(extra)",
"def append(self, *args, **kwargs): # real signature unknown\n pass",
"def add_python_begin(self, lines):\n return self._add_scope(lines, '%pythonbegin %{', '%}', inline=False)",
"def append(self, *args):\n self.add(*args)",
"def _add_scope(self, *args, **kwargs):\n lines = scope_lines(*args, **kwargs)\n self.lines.extend(lines)\n return len(lines)",
"def append(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass",
"def append(self, line):\n self.ag.append(line)",
"def append(self, line):\n self.buffer.append(line)",
"def append(self, text, afterline=None):\n if afterline:\n self._vim.current.buffer.append(text, afterline)\n else:\n self._vim.current.buffer.append(text)",
"def eggs(some_parameter):\n some_parameter.append('Hello')\n print(some_parameter)",
"def append(command):\n with open(GlobalVariables.get_instance().get('history_filename'), 'ab') as fin:\n fin.write('{0}\\n'.format(command))\n fin.close()",
"def add_python_files(self):",
"def append(self, mod=None, **xargs):\n if mod is None:\n raise ValueError('stack.append: module not specified')\n else:\n m = mod(self, **xargs)\n self.append_instance(m)",
"def maybe_append_new_line(code):\n lines = code.split(\"\\n\")\n\n if lines[0] in [\"py\", \"python\"]:\n # add new line before last line being ```\n last_line = lines[-1]\n lines.pop()\n lines.append(\"\\n\" + last_line)\n\n return \"\\n\".join(lines)",
"def add_hook(self, method, args=None, kwargs=None):\n self.hook.append((method, args, kwargs))",
"def do_append(self, level, msg, *args, **kwargs):\n record = self.log.makeRecord(\"poller\", level, \"(fn)\", 0, msg, args, None, \"()\", None)\n s = self.buf_formatter.format(record)\n self.buf.append(s)",
"def _log_append(self, msg):\n\t\tp = self._edit.get_buffer()\n\t\tstart,end = p.get_bounds()\n\t\tp.insert(end, msg)\n\t\tself._trunc_lines()\n\t\tself._edit.scroll_to_iter(p.get_end_iter(), 0.0)",
"def append (self, item):\n pass",
"def append(self, x) -> None:\n pass",
"def add_to_cmd(run, batch, source, add_line, basename='xrb'):\n filepath = grid_strings.cmd_filepath(run, batch, source=source, basename=basename)\n print(f'Writing: {filepath}')\n with open(filepath) as f:\n lines = f.readlines()\n\n lines = [f'{add_line}\\n'] + lines\n with open(filepath, 'w') as f:\n f.writelines(lines)",
"def append(self, val):\n self.new_dll.append(val)",
"def add_line(self, line):\n self.code.extend([\" \" * self.indent_level, line, \"\\n\"])",
"def add_line(self, line):\n self.code.extend([\" \" * self.indent_level, line, \"\\n\"])",
"def append_function(self, extra_function):\n assert extra_function.name != self.name, \\\n 'Name of the remote function should be different'\n for func in self.appended_functions:\n assert extra_function.name != func.name, \\\n 'Cannot append functions with the same name'\n self.appended_functions.append(extra_function)",
"def extend(source, add_attribute):\n\n ExtendCommandExecutor().extend(source, add_attribute)",
"def extend(self, *args):\n for arg in args:\n self.add(arg)",
"def append( self, command ):\n self.commands.append( command )\n command.chunk= self"
] | [
"0.6781854",
"0.64491373",
"0.6193824",
"0.61880773",
"0.6120018",
"0.6037418",
"0.58205456",
"0.5801156",
"0.575452",
"0.5685965",
"0.5684664",
"0.5610529",
"0.5569516",
"0.5468532",
"0.5458067",
"0.5443156",
"0.54287255",
"0.5416714",
"0.5333754",
"0.53315073",
"0.5325522",
"0.5291511",
"0.5269338",
"0.52334166",
"0.5209042",
"0.5209042",
"0.518707",
"0.51826626",
"0.51689833",
"0.51548177"
] | 0.79051167 | 0 |
add lines to the swim with a %pythonprepend scope. requires the method to which to bind. | def add_python_prepend(self, method: Function, lines):
return self.add_feature(lines, method.pattern, 'pythonprepend') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_python_begin(self, lines):\n return self._add_scope(lines, '%pythonbegin %{', '%}', inline=False)",
"def add_python(self, lines):\n return self._add_scope(lines, '%pythoncode %{', '%}', inline=False)",
"def add_python_append(self, method: Function, lines):\n return self.add_feature(lines, method.pattern, 'pythonappend')",
"def add_begin(self, lines):\n return self._add_scope(lines, '%begin %{', '%}')",
"def add_extend(self, lines, name=''):\n return self._add_scope(lines, '%extend ' + name + '{', '}', indent=None, inline=False)",
"def _log_prepend(self, msg):\n\t\tp = self._edit.get_buffer()\n\t\tstart = p.get_start_iter()\n\t\tp.insert(start, msg)\n\t\tself._trunc_lines()\n\t\tself._edit.scroll_to_iter(p.get_start_iter(), 0.0)",
"def add_insert(self, lines):\n return self._add_scope(lines, '%{', '%}')",
"def add_import_line(self, line: str) -> None:\n if line not in self._import_lines:\n self._import_lines.append(line)",
"def _add_scope(self, *args, **kwargs):\n lines = scope_lines(*args, **kwargs)\n self.lines.extend(lines)\n return len(lines)",
"def add_line(self, line):\n self.code.extend([\" \" * self.indent_level, line, \"\\n\"])",
"def add_line(self, line):\n self.code.extend([\" \" * self.indent_level, line, \"\\n\"])",
"def maybe_append_new_line(code):\n lines = code.split(\"\\n\")\n\n if lines[0] in [\"py\", \"python\"]:\n # add new line before last line being ```\n last_line = lines[-1]\n lines.pop()\n lines.append(\"\\n\" + last_line)\n\n return \"\\n\".join(lines)",
"def gip(line):\n import os\n import shlex\n import textwrap\n args = shlex.split(line)\n if len(args) == 0:\n path = '~/.ipyscratch'\n else:\n try:\n path = eval(line)\n except:\n path = args[0]\n with open(os.path.expanduser(path)) as f:\n cmd = textwrap.dedent(f.read())\n get_ipython().run_line_magic('pycat', path)\n # update history\n In[-1] = cmd\n get_ipython().run_code(cmd)",
"def prepend(self, value):\n pass",
"def prepend(self, *args):\n return _libsbml.ListWrapperSBase_prepend(self, *args)",
"def AppendExtra(self, extra):\n self.script.append(extra)",
"def ipython_monkeypatch(self, IP):\n\n # new input hook for Python source\n # also, trap stdout, stderr\n IP._runsource = IP.runsource\n def runsource(source, filename=\"<input>\", symbol=\"single\"):\n code = IP._runsource(source, filename=filename, symbol=symbol)\n if code == False:\n # it's complete\n number = IP.outputcache.prompt_count\n self.add_input(source, number)\n if (self.checkpoint is not None and \n not IP.outputcache.prompt_count % self.checkpoint):\n self.write()\n return code\n IP.runsource = runsource\n\n # new input hook for aliases\n IP._handle_alias = IP.handle_alias\n def handle_alias(line,continue_prompt=None,\n pre=None,iFun=None,theRest=None):\n line_out = IP._handle_alias(line, continue_prompt, pre, iFun,\n theRest)\n number = IP.outputcache.prompt_count\n self.add_special_input(line, number)\n return line_out\n IP.handle_alias = handle_alias\n\n # new input hook for shell escapes\n IP._handle_shell_escape = IP.handle_shell_escape\n def handle_shell_escape(line, continue_prompt=None,\n pre=None,iFun=None,theRest=None):\n line_out = IP._handle_shell_escape(line, continue_prompt, pre, \n iFun, theRest)\n number = IP.outputcache.prompt_count\n self.add_special_input(line, number)\n return line_out\n IP.handle_shell_escape = handle_shell_escape\n\n # new input hook for magics\n IP._handle_magic = IP.handle_magic\n def handle_magic(line, continue_prompt=None,\n pre=None,iFun=None,theRest=None):\n line_out = IP._handle_magic(line, continue_prompt, pre, \n iFun, theRest)\n number = IP.outputcache.prompt_count\n self.add_special_input(line, number)\n return line_out\n IP.handle_magic = handle_magic\n\n # new input hook for autocall lines\n IP._handle_auto = IP.handle_auto\n def handle_auto(line, continue_prompt=None,\n pre=None,iFun=None,theRest=None):\n line_out = IP._handle_auto(line, continue_prompt, pre, \n iFun, theRest)\n number = IP.outputcache.prompt_count\n self.add_special_input(line, number)\n return line_out\n IP.handle_auto = handle_auto\n\n # new input hook for helps\n IP._handle_help = IP.handle_help\n def handle_help(line, continue_prompt=None,\n pre=None,iFun=None,theRest=None):\n line_out = IP._handle_help(line, continue_prompt, pre, \n iFun, theRest)\n number = IP.outputcache.prompt_count\n self.add_special_input(line, number)\n return line_out\n IP.handle_help = handle_help\n\n # new output hook\n IP.outputcache._update = IP.outputcache.update\n def update(arg):\n IP.outputcache._update(arg)\n self.add_output(self.get_str(arg), IP.outputcache.prompt_count)\n IP.outputcache.update = update\n\n IP.esc_handlers = {IP.ESC_PAREN:handle_auto,\n IP.ESC_QUOTE:handle_auto,\n IP.ESC_QUOTE2:handle_auto,\n IP.ESC_MAGIC:handle_magic,\n IP.ESC_HELP:handle_help,\n IP.ESC_SHELL:handle_shell_escape,\n }\n\n self.IP = IP\n\n # I'm *so* going to Hell for this.",
"def add_breakpoint():\n raise NotImplementedError()",
"def add_raw(self, lines):\n return self._add_scope(lines, None, None, indent=None, inline=False)",
"def prepend(self, p: edu.uci.ics.jung.visualization.control.GraphMousePlugin) -> None:\n ...",
"def bp_ins(filename, start, end):\n with open(filename, 'r') as f:\n lines = f.readlines()\n lines.insert(start-1, \"\")\n lines.insert(end+1, \"\")\n lines.insert(0, \"\")\n lines[start-1] = 'ipdb.set_trace()\\n'\n lines[end+1] = 'ipdb.set_trace()\\n'\n lines[0] = \"import ipdb\\n\"\n with open(f\"break_{filename}\", 'w+') as f:\n f.writelines(lines)",
"def extend(source, add_attribute):\n\n ExtendCommandExecutor().extend(source, add_attribute)",
"def leet(line_leet: str):\n for old_printer, new_printer in leet_replacements:\n line_leet = line_leet.replace(old_printer, new_printer)\n print(line_leet)\n if args.append is not None:\n print(line_leet + args.append)\n if args.prepend is not None:\n print(args.prepend + line_leet)",
"def insert(self, line, where=0):\n self.buffer.insert(where, line)",
"def add_code(self, code_lines: List[str]) -> None:\n self.__code_block__ += code_lines",
"def add_to_cmd(run, batch, source, add_line, basename='xrb'):\n filepath = grid_strings.cmd_filepath(run, batch, source=source, basename=basename)\n print(f'Writing: {filepath}')\n with open(filepath) as f:\n lines = f.readlines()\n\n lines = [f'{add_line}\\n'] + lines\n with open(filepath, 'w') as f:\n f.writelines(lines)",
"def _generate_headlines(self):\n includes = set()\n for decl in self._ast.decls:\n includes.add(decl.cpp_file)\n for include in includes:\n yield f'#include \"{include}\"'\n yield '#include \"third_party/pybind11/include/pybind11/pybind11.h\"'\n yield ''\n yield 'namespace py = pybind11;'\n yield ''",
"def prepend(self, sequence):\n self._add_sequence(0, sequence)",
"def prepend(self, *args):\n return _libsbml.ListWrapperModelCreator_prepend(self, *args)",
"def add_python_files(self):"
] | [
"0.73254114",
"0.6698062",
"0.65658134",
"0.61451954",
"0.6006999",
"0.56337845",
"0.5618978",
"0.56152195",
"0.5612275",
"0.55361205",
"0.55361205",
"0.55045193",
"0.55004585",
"0.54829097",
"0.54454756",
"0.5405922",
"0.5385084",
"0.5319596",
"0.5301912",
"0.5252837",
"0.5244087",
"0.5186686",
"0.5161014",
"0.51551306",
"0.5144426",
"0.5127558",
"0.5073861",
"0.5068775",
"0.50558764",
"0.5049595"
] | 0.81695557 | 0 |
add lines to the swim with a %exception scope. requires the method to which to bind. | def add_exception_check(self, lines):
return self.add_feature(lines, '', 'exception') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def addExceptionMessage(self, q, inst, traceback):\n self.fail('FAIL: Exception raised: %s' % inst)\n self.addMessage('')\n for line in traceback.format_exc().split('\\n'):\n self.addMessage(line)",
"def append_exception(self, message=u''): \n if isinstance(sys.exc_info()[1], sqlalchemy.exceptions.SQLAlchemyError):\n self.db_session.rollback()\n Webwidgets.Widget.append_exception(self, message)",
"def exception(self, *args, **kwargs):",
"def on_exception(self):\n pass",
"def add_try_clause(code, excpt):\n code = code.replace('\\t', ' ')\n return (\"try:\\n ...\\n\" + '\\n'.join([\" \" + line for line in code.split('\\n')])\n + \"\\nexcept \" + excpt.__name__ + \":\\n pass\")",
"def add_exception(self, exception):\n self._set_instance_data('exceptions',\n self.add_dependency(exception))",
"def _on_exception(self, exception):\n pass",
"def raise_(err):\n raise err",
"def add_breakpoint():\n raise NotImplementedError()",
"def exception(self, e):\n pass",
"def _exception_handler(self, exception_class, message):\n self.messages.append((exception_class, message))\n raise exception_class(message)",
"def _add_exception (self, e) :\n\n self._exceptions.append (e)\n self._messages.append (e.message)\n\n if e._rank > self._top_exception._rank :\n self._top_exception = e",
"def _error_handling(self,e,func):\n print(self.type, \" sufferred exception in \" , func , \":\" , e)",
"def user_exception(self, frame, exc_tuple):\r\n frame.f_locals['__exc_tuple__'] = exc_tuple\r\n\r\n if not self._wait_for_mainpyfile:\r\n self.interaction(frame, exc_tuple)",
"def traceback(self):",
"def exception(self, *args, **kwargs):\n return super(Blueprint, self).exception(*args, **kwargs)",
"def exception_handler(self, exception):\n pass",
"def on_error(self, exception):\n traceback.print_exc()",
"def main():\n cause_a_bunch_of_exceptions_to_happen()",
"def ExceptionAppend(e, msg):\n if not e.args:\n e.args = (msg,)\n elif len(e.args) == 1:\n e.args = (str(e.args[0]) + ' ' + msg,)\n else:\n e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]",
"def unexpectedException(self):",
"def unexpected_error(self, exception):",
"def user_exception(self, frame, exc_info):\n pass",
"def whenException(self, channel, call):",
"def idb_excepthook(type, value, tb):\n if hasattr(sys, \"ps1\") or not sys.stderr.isatty():\n sys.__excepthook__(type, value, tb)\n else:\n traceback.print_exception(type, value, tb)\n print\n pdb.pm()",
"def ERR(self):",
"def hook_server_inspect_exception(self, request_event, reply_event, exc_infos):\r\n task_context = self.hook_get_task_context()\r\n for functor in self._hooks['server_inspect_exception']:\r\n functor(request_event, reply_event, task_context, exc_infos)",
"def on_exception(self):\n\n def decorator(coro):\n self._hooks.append((\"exception\", coro))\n return coro\n\n return decorator",
"def error(self, *lines):\n if self.__debug_level >= DEBUG_LEVELS['error']:\n self.print_lines(self.colored(('red', 'bold'), lines))",
"def user_line(self, frame):\r\n if \"__exc_tuple__\" in frame.f_locals:\r\n del frame.f_locals['__exc_tuple__']\r\n\r\n if self._wait_for_mainpyfile:\r\n if (self.mainpyfile != self.canonic(frame.f_code.co_filename)\r\n or frame.f_lineno <= 0):\r\n return\r\n self._wait_for_mainpyfile = False\r\n self.bottom_frame = frame\r\n\r\n if self.get_break(self.canonic(frame.f_code.co_filename), frame.f_lineno):\r\n self.current_bp = (\r\n self.canonic(frame.f_code.co_filename), frame.f_lineno)\r\n else:\r\n self.current_bp = None\r\n self.ui.update_breakpoints()\r\n\r\n self.interaction(frame)"
] | [
"0.5950198",
"0.58148843",
"0.57832515",
"0.5621935",
"0.5551961",
"0.5530148",
"0.5487709",
"0.5476564",
"0.5469961",
"0.54511577",
"0.5426439",
"0.5423702",
"0.5421121",
"0.5376286",
"0.53517544",
"0.5350704",
"0.5337269",
"0.53213924",
"0.5309759",
"0.53026044",
"0.5300409",
"0.5281702",
"0.5250168",
"0.5245143",
"0.5227078",
"0.5221895",
"0.5211951",
"0.5210624",
"0.5206161",
"0.51745373"
] | 0.66227597 | 0 |
add lines to the swim with a %contract scope. requires the method to which to bind. | def add_contract(self, method: Function, lines):
return self.add_feature(lines, method.pattern, 'contract') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_contract(self, contract):\n self.contracts[contract.name] = contract\n self.alphabet = list(set(self.alphabet) | set(contract.variables))",
"def _add_scope(self, *args, **kwargs):\n lines = scope_lines(*args, **kwargs)\n self.lines.extend(lines)\n return len(lines)",
"def scope_line(self, text):\n self.__write_scope()\n self.write_line(text)",
"def enroll(self, contract_name, contract_address, contract_abi):\n contract_data = [contract_name, contract_address, contract_abi]\n registry_data = self.read()\n registry_data.append(contract_data)\n self.__write(registry_data)",
"def add_begin(self, lines):\n return self._add_scope(lines, '%begin %{', '%}')",
"def enterScope(self, name):",
"def augment_with_ast_info(contract):\n contract.functions = []\n contract_file = get_containing_file(contract)\n ast = contract_file.ast\n\n\n\n contract.ast = get_contract_ast(ast, contract.name) # Attention, here we write over a already filled variable\n\n\n f_asts = get_all_functions_for_contract(contract)\n\n\n # f_asts = get_function_asts(contract.ast)\n contract.calldata_name_map = get_calldata_name_map(abi_json_to_abi(contract.abi))\n for f_ast in f_asts:\n file = get_containing_file(contract)\n contract.functions.append(SolidityFunction(f_ast, file.data, contract.calldata_name_map))\n if f_ast in contract.implemented_functions:\n contract.implemented_functions[contract.implemented_functions.index(f_ast)] = contract.functions[-1]",
"def add_raw(self, lines):\n return self._add_scope(lines, None, None, indent=None, inline=False)",
"def addScope(self, scope):\n assert isinstance(scope, ScopeInterface);",
"def contract(self, contract):\n\n self._contract = contract",
"def command_create(state, bindings, pargs) :\n parser = argparse.ArgumentParser(prog='create')\n parser.add_argument('-c', '--contract-class', help='Name of the contract class', required = True, type=str)\n parser.add_argument('-i', '--interpreter', help='Name of the interpreter used to evaluate the contract', default=state.get(['Contract', 'Interpreter']))\n parser.add_argument('-s', '--contract-source', help='File that contains contract source code', required=True, type=str)\n parser.add_argument('-r', '--compilation-report', help='File that contains contract compilation report', type=str)\n parser.add_argument('-p', '--pservice-group', help='Name of the provisioning service group to use', default=\"default\")\n parser.add_argument('-e', '--eservice-group', help='Name of the enclave service group to use', default=\"default\")\n parser.add_argument('-f', '--save-file', help='File where contract data is stored', type=str)\n parser.add_argument('--symbol', help='binding symbol for result', type=str)\n options = parser.parse_args(pargs)\n\n contract_class = options.contract_class\n contract_source = options.contract_source\n\n # ---------- load the invoker's keys ----------\n try :\n keyfile = state.private_key_file\n keypath = state.get(['Key', 'SearchPath'])\n client_keys = ServiceKeys.read_from_file(keyfile, keypath)\n except Exception as e :\n raise Exception('unable to load client keys; {0}'.format(str(e)))\n\n # ---------- read the contract source code ----------\n try :\n source_path = state.get(['Contract', 'SourceSearchPath'])\n report = None\n if options.compilation_report:\n report_file = options.compilation_report\n report = ContractCompilationReport.create_from_file(report_file, source_path)\n contract_code = ContractCode.create_from_file(contract_class, contract_source, source_path, interpreter=options.interpreter, compilation_report=report)\n except Exception as e :\n raise Exception('unable to load contract source; {0}'.format(str(e)))\n\n logger.debug('Loaded contract code for %s', contract_class)\n\n # ---------- set up the enclave clients ----------\n eservice_clients = get_eservice_list(state, options.eservice_group)\n if len(eservice_clients) == 0 :\n raise Exception('unable to locate enclave services in the group %s', options.eservice_group)\n\n preferred_eservice_client = get_eservice(state, eservice_group=options.eservice_group)\n if preferred_eservice_client.interpreter != options.interpreter :\n raise Exception('enclave interpreter does not match requested contract interpreter %s', options.interpreter)\n\n # ---------- set up the provisioning service clients ----------\n pservice_clients = get_pservice_list(state, options.pservice_group)\n if len(pservice_clients) == 0 :\n raise Exception('unable to locate provisioning services in the group %s', options.pservice_group)\n\n # ---------- register contract ----------\n data_directory = state.get(['Contract', 'DataDirectory'])\n ledger_config = state.get(['Ledger'])\n\n try :\n provisioning_service_keys = [pc.identity for pc in pservice_clients]\n contract_id = register_contract(\n ledger_config, client_keys, contract_code, provisioning_service_keys)\n\n logger.debug('Registered contract with class %s and id %s', contract_class, contract_id)\n contract_state = ContractState.create_new_state(contract_id)\n contract = Contract(contract_code, contract_state, contract_id, client_keys.identity)\n\n # must fix this later\n contract.extra_data['preferred-enclave'] = preferred_eservice_client.enclave_id\n\n contract_file = \"{0}_{1}.pdo\".format(contract_class, contract.short_id)\n if options.save_file :\n contract_file = options.save_file\n\n contract.save_to_file(contract_file, data_dir=data_directory)\n\n except Exception as e :\n raise Exception('failed to register the contract; {0}'.format(str(e)))\n\n # provision the encryption keys to all of the enclaves\n try :\n encrypted_state_encryption_keys = __add_enclave_secrets(\n ledger_config, contract.contract_id, client_keys, eservice_clients, pservice_clients)\n\n for enclave_id in encrypted_state_encryption_keys :\n encrypted_key = encrypted_state_encryption_keys[enclave_id]\n contract.set_state_encryption_key(enclave_id, encrypted_key)\n\n contract.save_to_file(contract_file, data_dir=data_directory)\n except Exception as e :\n raise Exception('failed to provisioning the enclaves; {0}'.format(str(e)))\n\n # create the initial contract state\n try :\n __create_contract(ledger_config, client_keys, preferred_eservice_client, eservice_clients, contract)\n\n contract.contract_state.save_to_cache(data_dir = data_directory)\n contract.save_to_file(contract_file, data_dir=data_directory)\n except Exception as e :\n raise Exception('failed to create the initial contract state; {0}'.format(str(e)))\n\n if contract_id and options.symbol :\n bindings.bind(options.symbol, contract_id)",
"def add_insert(self, lines):\n return self._add_scope(lines, '%{', '%}')",
"def add_line(self, line):\n self._set_instance_data('body', self.indent + ' ' * 4 + line)",
"def add_extend(self, lines, name=''):\n return self._add_scope(lines, '%extend ' + name + '{', '}', indent=None, inline=False)",
"def add_line(self, line):\n self.code.extend([\" \" * self.indent_level, line, \"\\n\"])",
"def add_line(self, line):\n self.code.extend([\" \" * self.indent_level, line, \"\\n\"])",
"def do_add_route(self, line):\n items = line.split(' ')\n if len(items) < 3:\n log.error('route only takes at least 3 arguments: '\n 'network via_address metric')\n else:\n points = []\n i = 2\n while i < len(items):\n points.append((items[i-1], items[i]))\n i += 2\n log.critical('Add route request at %s',\n datetime.datetime.now().strftime('%H.%M.%S.%f'))\n self.fibbing.install_route(items[0], points, True)",
"def beginScope():",
"def proc_body(self, statements):\n for stmt in statements:\n if (isinstance(stmt, FunctionDef) and stmt.name not in\n {KW_RECV_EVENT, KW_SENT_EVENT}):\n self.debug(\"Adding function %s to process scope.\" % stmt.name,\n stmt)\n self.current_scope.add_name(stmt.name)\n elif isinstance(stmt, ClassDef):\n self.debug(\"Adding class %s to process scope.\" % stmt.name,\n stmt)\n self.current_scope.add_name(stmt.name)\n elif isinstance(stmt, Assign):\n for expr in stmt.targets:\n if isinstance(expr, Name):\n self.debug(\n \"Adding variable %s to process scope.\" % expr.id,\n stmt)\n self.current_scope.add_name(expr.id)\n elif isinstance(stmt, AugAssign):\n if isinstance(target, Name):\n self.current_scope.add_name(target.id)\n for stmt in statements:\n self.visit(stmt)\n if self.current_label is not None:\n # Create a noop statement to hold the last label:\n self.create_stmt(dast.NoopStmt, statements[-1], nopush=True)",
"def add(self, line):\n self.body.append(line)",
"def append_circuit(self, lines: Tuple[int, int], circuit: ACircuit, content: str) -> None:",
"def add_scope(self, scope_name):\r\n scp = '{}/{}'.format(self._model, scope_name)\r\n self._scopes.append(scp)",
"def add_python_append(self, method: Function, lines):\n return self.add_feature(lines, method.pattern, 'pythonappend')",
"def add_python(self, lines):\n return self._add_scope(lines, '%pythoncode %{', '%}', inline=False)",
"def add_contract(self, contract: 'cn.Contract_HTLC') -> bool:\n if self.is_owner1(contract.payer):\n if self.amount_owner1_can_transfer_to_owner2 < contract.amount_in_msat:\n contract.invalidate()\n return False\n self._owner1_htlc_locked_setter(self._owner1_htlc_locked + contract.amount_in_msat)\n else:\n if self.amount_owner2_can_transfer_to_owner1 < contract.amount_in_msat:\n contract.invalidate()\n return False\n self._owner2_htlc_locked_setter(self._owner2_htlc_locked + contract.amount_in_msat)\n self._state.htlc_contracts.append(contract)\n return True",
"def do_add_node(self, line=''):\n self.fibbing.add_node()",
"def addConstrs(self, constrs, name=''):\n ...",
"def add(self, name, command):",
"def add(self, line):\n self.cull()\n self.lines.append(line)",
"def add_python_begin(self, lines):\n return self._add_scope(lines, '%pythonbegin %{', '%}', inline=False)"
] | [
"0.59992",
"0.5861132",
"0.58554286",
"0.55261743",
"0.5306385",
"0.52570355",
"0.5239001",
"0.52368",
"0.51944435",
"0.5152344",
"0.5056834",
"0.50365",
"0.49871847",
"0.49718606",
"0.49624184",
"0.49624184",
"0.49115306",
"0.4886142",
"0.48651546",
"0.48415607",
"0.4821491",
"0.48176786",
"0.4750986",
"0.47466275",
"0.47322285",
"0.47240257",
"0.47200447",
"0.47089088",
"0.47018525",
"0.46829143"
] | 0.730265 | 0 |
Parse a Post Office Box PMB digits POB digits P.O.B digits P.O. Box digits P.O. digits | def parse_pob(self):
index = self.index
start = self.index
if self.words[index]['word'] == 'pob' or self.words[index]['word'] == 'pmb':
index += 1
if index == self.length:
return None, 0
if self.words[index]['word'] == '.':
return self.words[index+1]['word'], 3
else:
return self.words[index]['word'], 2
elif self.words[index]['word'] == 'p':
index += 1
if index == self.length:
return None, 0
if self.words[index]['word'] == '.':
index += 1
if index == self.length:
return None, 0
if self.words[index]['word'] not in ['o', 'm']:
return None, 0
index += 1
if index == self.length:
return None, 0
if self.words[index]['word'] == '.':
index += 1
if index == self.length:
return None, 0
if self.words[index]['word'] in ['b', 'box']:
index += 1
if index == self.length:
return None, 0
elif not self.words[index]['word'].isdigit():
return None,0
if self.words[index]['word'] == '.':
index += 1
if index == self.length:
return None, 0
return self.words[index]['word'], index - start + 1
if self.words[index]['word'] == 'po':
index += 1
if index == self.length:
return None, 0
if self.words[index]['word'] == 'box':
index += 1
if index == self.length:
return None, 0
return self.words[index]['word'], index - start + 1
return None, 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extract_postcode(s):\n pc_regex = r'([Gg][Ii][Rr] 0[Aa]{2})|((([A-Za-z][0-9]{1,2})|(([A-Za-z][A-Ha-hJ-Yj-y]'\n pc_regex += r'[0-9]{1,2})|(([A-Za-z][0-9][A-Za-z])|([A-Za-z][A-Ha-hJ-Yj-y][0-9]?[A-Za-z]'\n pc_regex += r'))))\\s?[0-9][A-Za-z]{2})'\n\n re_search = re.search(pc_regex, s)\n if re_search:\n p = re_search.group(0)\n else:\n p = ''\n return p",
"def _decode_string(box_string):\r\n if box_string == \"no_box\":\r\n return np.zeros((0,4))\r\n else:\r\n try:\r\n boxes = np.array([np.array([int(eval(i)) for i in box.split(\" \")])\r\n for box in box_string.split(\";\")])\r\n return boxes\r\n except:\r\n print(box_string)\r\n print(\"Submission is not well formatted. empty boxes will be returned\")\r\n return np.zeros((0,4))",
"def _read_pnm_header(self, data):\r\n bpm = data[1:2] in b\"14\"\r\n regroups = re.search(b\"\".join((\r\n b\"(^(P[123456]|P7 332)\\s+(?:#.*[\\r\\n])*\",\r\n b\"\\s*(\\d+)\\s+(?:#.*[\\r\\n])*\",\r\n b\"\\s*(\\d+)\\s+(?:#.*[\\r\\n])*\" * (not bpm),\r\n b\"\\s*(\\d+)\\s(?:\\s*#.*[\\r\\n]\\s)*)\")), data).groups() + (1, ) * bpm\r\n self.header = regroups[0]\r\n self.magicnum = regroups[1]\r\n self.width = int(regroups[2])\r\n self.height = int(regroups[3])\r\n self.maxval = int(regroups[4])\r\n self.depth = 3 if self.magicnum in b\"P3P6P7 332\" else 1\r\n self.tupltypes = [self._types[self.magicnum]]",
"def _parse_info(form) :\n w = 0\n n = 0\n nh = 0\n for part in re.findall(\"[A-Z]+[0-9]+\",form):\n m = re.match(\"([A-Z]+)([0-9]+)\",part)\n element = m.group(1)\n number = int(m.group(2))\n w += mass[element.capitalize()]*number\n n += number\n if element != \"H\" : nh += number\n return w,n,nh",
"def _parse_info(form) :\n w = 0\n n = 0\n nh = 0\n for part in re.findall(\"[A-Z]+[0-9]+\",form):\n m = re.match(\"([A-Z]+)([0-9]+)\",part)\n element = m.group(1)\n number = int(m.group(2))\n w += mass[element.capitalize()]*number\n n += number\n if element != \"H\" : nh += number\n return w,n,nh",
"def test_parse_post_code_field(self):\n fields = {'Post code': {'offset': 171,\n 'length': 4}}\n p = top.Parser(fields=fields)\n received = p.parse_line(self._line)\n expected = {'Post code': '2048'}\n msg = 'Post code field parse incorrect'\n self.assertEqual(received, expected, msg)",
"def parse_postalUS(self):\n \n index = self.index\n \n # US Postal Code\n if len(self.words[index]['word']) != 5 or not self.words[index]['word'].isdigit():\n return None, 0\n postal = self.words[index]['word']\n \n if index + 1 < self.length:\n if self.words[index+1]['word'] == '-':\n index += 2\n if index == self.length:\n return None, 0\n if len(self.words[index]['word']) == 4 and self.words[index]['word'].isdigit():\n postal += '-' + self.words[index]['word']\n return postal, 3\n else:\n return postal, 1\n \n return postal, 1",
"def parse_pint_string(self, pint_string):\n val = pint_string.split(' ')[0]\n units = pint_string.split(val+' ')[-1]\n return val, units",
"def parse_postalCA(self):\n \n index = self.index\n \n if len(self.words[index]['word']) != 3:\n return None, 0\n postal = self.words[index]['word']\n index += 1\n if index == self.length:\n return None, 0\n \n if len(self.words[index]['word']) != 3:\n return None, 0\n postal += self.words[index]['word']\n \n return postal, 2",
"def parse_precision(p):\n min = max = 0\n for c in p:\n if c in '@0':\n min += 1\n max += 1\n elif c == '#':\n max += 1\n elif c == ',':\n continue\n else:\n break\n return min, max",
"def parse_phone(s):\n pattern = '''\n ^\\s* # Leading spaces\n (?P<areacode>\n \\d{3}-? # \"xxx\" or \"xxx-\"\n | \\(\\d{3}\\)\\s* # OR \"(xxx) \"\n )\n (?P<prefix>\\d{3}) # xxx\n -? # Dash (optional)\n (?P<suffix>\\d{4}) # xxxx\n \\s*$ # Trailing spaces\n '''\n matcher = re.compile(pattern, re.VERBOSE)\n matches = matcher.match(s)\n if matches is None:\n print(s)\n return s\n else:\n areacode = re.search('\\d{3}', matches.group ('areacode')).group()\n prefix = matches.group ('prefix')\n suffix = matches.group ('suffix')\n return areacode+'-'+prefix+'-'+suffix",
"def digibokurn_from_text(T):\n return re.findall(\"(?<=digibok_)[0-9]{13}\", T)",
"def _decode_35701(data):\n start_byte = 0\n n_bytes = 2\n var_id = struct.unpack('<H', data[start_byte:start_byte + n_bytes])[0]\n if var_id == 29995:\n start_byte += n_bytes\n n_bytes = 4\n n_points = struct.unpack(\n '<I', data[start_byte:start_byte + n_bytes])[0]\n return {'n_points': n_points}",
"def parse_puzzle(puzzle):\n puzzle = re.sub(\"\\sGrid \\d{2}\",\"\", sample)\n puzzle = puzzle.strip().split(\"\\n\") \n return puzzle",
"def parse_post_age(text):\n if 'hours' in text:\n return '1'\n return ''.join(list(filter(lambda c: c.isdigit(), text)))",
"def clean_number_plate(self, vrn):\n cleaned = re.sub(r'[^\\dA-Z]', '', vrn)\n\n if re.match(r'^[A-Z]{2}', cleaned) and len(cleaned) == 7:\n if cleaned[2] == 'O':\n cleaned = cleaned[:2] + '0' + cleaned[3:]\n if cleaned[2] == 'I':\n cleaned = cleaned[:2] + '1' + cleaned[3:]\n if cleaned[3] == 'O':\n cleaned = cleaned[:3] + '0' + cleaned[4:]\n if cleaned[3] == 'I':\n cleaned = cleaned[:3] + '1' + cleaned[4:]\n\n if re.match(r'^B', cleaned) and len(cleaned) == 7:\n if cleaned[1] == 'O':\n cleaned = cleaned[:1] + '0' + cleaned[2:]\n if cleaned[1] == 'I':\n cleaned = cleaned[:1] + '1' + cleaned[2:]\n if cleaned[2] == 'O':\n cleaned = cleaned[:2] + '0' + cleaned[3:]\n if cleaned[2] == 'I':\n cleaned = cleaned[:2] + '1' + cleaned[3:]\n if cleaned[3] == 'O':\n cleaned = cleaned[:3] + '0' + cleaned[4:]\n if cleaned[3] == 'I':\n cleaned = cleaned[:3] + '1' + cleaned[4:]\n\n if re.match(r'^[A-Z]{2}', cleaned) and len(cleaned) == 8:\n if cleaned[0] == 'Y':\n cleaned = 'V' + cleaned[1:]\n if cleaned[1] == 'Y':\n cleaned = cleaned[0] + 'V' + cleaned[2:]\n\n return cleaned",
"def parse_proasis(input_string):\n return (\n input_string[:3].strip(),\n int(input_string[5:].strip()),\n input_string[3:5].strip(),\n )",
"def parse_pdb(self, line):\n super().parse_pdb(line)\n self.num_remark = int(line[10:15].strip())\n self.num_het = int(line[20:25].strip())\n self.num_helix = int(line[25:30].strip())\n self.num_sheet = int(line[30:35].strip())\n self.num_turn = int(line[35:40].strip())\n self.num_site = int(line[40:45].strip())\n self.num_xform = int(line[45:50].strip())\n self.num_coord = int(line[50:55].strip())\n self.num_ter = int(line[55:60].strip())\n self.num_conect = int(line[60:65].strip())\n self.num_seq = int(line[65:70].strip())",
"def test_parse_pieces_field(self):\n fields = {'Pieces': {'offset': 588,\n 'length': 5}}\n p = top.Parser(fields=fields)\n received = p.parse_line(self._line)\n expected = {'Pieces': '00001'}\n msg = 'Pieces field parse incorrect'\n self.assertEqual(received, expected, msg)",
"def parse_paragraphs(self):\n paragraphs = self.paragraphs\n for paragraph in paragraphs:\n try:\n if paragraph == \"Oznaczenie sądu\" and not self.locked_cells[\"Oznaczenie sądu\"]:\n self.search_index(4, \"Oznaczenie sądu\", paragraph)\n\n if paragraph.startswith(\"3.Firma,\") and not self.locked_cells[\"Firma, pod którą spółka działa\"]:\n self.search_index(2, \"Firma, pod którą spółka działa\", paragraph)\n\n if paragraph.startswith(\"3.Nazwa\") and not self.locked_cells[\"Firma, pod którą spółka działa\"]:\n self.search_index(2, \"Firma, pod którą spółka działa\", paragraph)\n\n if paragraph.startswith(\"1.Siedziba\") and not self.locked_cells[\"Siedziba\"]:\n self.search_index(4, \"Siedziba\", paragraph)\n\n if paragraph.startswith(\"2.Adres\") and not self.locked_cells[\"Adres\"]:\n self.search_index(4, \"Adres\", paragraph)\n\n if paragraph.startswith(\"Numer KRS\") and not self.locked_cells[\"KRS\"]:\n self.datafields[\"KRS\"] = paragraph.split()[-1]\n self.locked_cells[\"KRS\"] = True\n\n if paragraph.startswith(\"2.Numer REGON/NIP\") and not self.locked_cells[\"REGON/NIP\"]:\n self.search_index(2, \"REGON/NIP\", paragraph)\n\n if paragraph.startswith(\"1.Oznaczenie formy prawnej\") and not self.locked_cells[\"Forma Prawna\"]:\n self.search_index(2, \"Forma Prawna\", paragraph)\n\n if paragraph.startswith(\"1.Wysokość kapitału zakładowego\"):\n self.search_index(2, \"Kapitał Zakładowy\", paragraph)\n\n if paragraph.startswith(\"5.Kwotowe określenie części kapitału wpłaconego\"):\n self.search_index(2, \"Kapitał Wpłacony\", paragraph)\n\n if paragraph.startswith(\"Rubryka 7 - Dane wspólników\"): # Open \"Wspólnicy\" parsing block.\n self.locked_cells[\"Wspólnicy\"] = True\n\n if paragraph.startswith(\"Rubryka 7 - Komitet założycielski\"): # STOWARZYSZENIE\n break\n\n if paragraph.startswith(\"1.Nazwisko / Nazwa lub firma\") and self.locked_cells[\"Wspólnicy\"]:\n self.active += 1\n self.datafields[f\"Wspólnik {self.active}\"] = {}\n\n pattern = rf\"^[A-Z{self.unicode}]+\"\n self.search_loop(pattern, \"Wspólnik\", \"Nazwisko/Nazwa\", paragraph)\n\n if paragraph.startswith(\"2.Imiona\") and self.locked_cells[\"Wspólnicy\"]:\n pattern = rf\"[A-Z{self.unicode}]+\\s[A-Z{self.unicode}]+$|^[A-Z{self.unicode}]+$|^[*]+$\"\n self.search_loop(pattern, \"Wspólnik\", \"Imiona\", paragraph)\n\n if paragraph.startswith(\"3.Numer PESEL/REGON\") and self.locked_cells[\"Wspólnicy\"]:\n pattern = r\"[-]+|[0-9]{9,11}\"\n self.search_loop(pattern, \"Wspólnik\", \"PESEL/REGON\", paragraph)\n\n if paragraph.startswith(\"4.Numer KRS\") and self.locked_cells[\"Wspólnicy\"]:\n pattern = r\"[-]+|[*]+|[0-9]{10}$\"\n self.search_loop(pattern, \"Wspólnik\", \"KRS\", paragraph)\n\n if paragraph.startswith(\"5.Posiadane przez wspólnika udziały\"):\n index = paragraphs.index(paragraph)\n line_1 = paragraphs[index + 2].strip(\" \")\n line_2 = paragraphs[index + 3].strip(\" \")\n if line_2:\n self.datafields[f\"Wspólnik {self.active}\"][\"Udziały\"] = f\"{line_1} {line_2}\"\n else:\n self.datafields[f\"Wspólnik {self.active}\"][\"Udziały\"] = f\"{line_1}\"\n\n if paragraph == \"ZARZĄD\":\n self.locked_cells[\"Wspólnicy\"] = False # Close \"Wspólnicy\" parsing block.\n self.locked_cells[\"Zarząd\"] = True # Open \"Zarząd\" parsing block.\n self.active = 0\n\n if paragraph.startswith(\"1.Nazwisko\") and self.locked_cells[\"Zarząd\"]:\n self.active += 1\n self.datafields[f\"Zarząd {self.active}\"] = {}\n pattern = rf\"^[A-Z{self.unicode}]+\"\n self.search_loop(pattern, \"Zarząd\", \"Nazwisko/Nazwa\", paragraph)\n\n if paragraph.startswith(\"2.Imiona\") and self.locked_cells[\"Zarząd\"]:\n pattern = rf\"^[A-Z{self.unicode}]+\\s[A-Z{self.unicode}]+$|^[A-Z{self.unicode}]+$|^[*]+$\"\n self.search_loop(pattern, \"Zarząd\", \"Imiona\", paragraph)\n\n if paragraph.startswith(\"5.Funkcja w organie \") and self.locked_cells[\"Zarząd\"]:\n paragraph = paragraph.strip(\"5.Funkcja w organie reprezentującym \")\n self.datafields[f\"Zarząd {self.active}\"][\"Funkcja\"] = paragraph\n\n if paragraph.startswith(\"Rubryka 2 - Organ nadzoru\"):\n self.locked_cells[\"Zarząd\"] = False # Close \"Zarząd\" parsing block.\n except KeyError:\n pass\n return self.datafields",
"def parse_nbest_line(line):\n fields=[ x.strip() for x in line.strip().split('|||') ]\n fields[0]=int(fields[0])\n fields[3]=float(fields[3])\n return fields",
"def phoneNumberExtractor(self,data):\n\t\tdata = data.replace(\"\\r\", \" \")\n\t\tdata = data.replace(\"\\r\\n\", \" \")\n\n\t\t#first is identifying 10 digits code\n\t\tdata = data.split()\n\t\tresult = []\n\t\tfor word in data:\n\t\t\tres = None\n\t\t\tres = word if word.isdecimal() and len(word) == 10 and not res else res\n\t\t\tres = word[2:] if word.isdecimal() and len(word) == 12 and not res else res\n\t\t\tres = word[3:] if word[3:].isdecimal() and len(word) == 10 and not res else res\n\t\t\tif (\"(\" and \")\") in word or \"-\" in word:\n\t\t\t\tword = word.replace(\"(\",\"\")\n\t\t\t\tword = word.replace(\")\",\"\")\n\t\t\t\tword = word.replace (\"-\",\"\")\n\t\t\t\tres = word if(len(word) == 10) else None\n\t\t\tif res:\n\t\t\t\tresult.append(res)\n\t\t\t\tdel(res)\n\t\treturn set(result)",
"def test_parses_map_3(self):\n p = GPBEC()\n p.parse(\"GPBEC,220516,5130.02,N,00046.34,W,213.8,T,218.0,M,0004.6,N,EGLM,X*11\")\n\n self.assertEquals(\"GPBEC\", p.sen_type)\n self.assertEquals(\"220516\", p.timestamp)\n self.assertEquals(\"5130.02\", p.waypoint_lat)\n self.assertEquals(\"N\", p.waypoint_lat_dir)\n self.assertEquals(\"00046.34\", p.waypoint_lon)\n self.assertEquals(\"W\", p.waypoint_lon_dir)\n self.assertEquals(\"213.8\", p.bearing_true)\n self.assertEquals(\"T\", p.bearing_true_sym)\n self.assertEquals(\"218.0\", p.bearing_mag)\n self.assertEquals(\"M\", p.bearing_mag_sym)\n self.assertEquals(\"0004.6\", p.nautical_miles)\n self.assertEquals(\"N\", p.nautical_miles_sym)\n self.assertEquals(\"EGLM\", p.waypoint_id)\n self.assertEquals(\"X\", p.faa_mode)\n self.assertEquals(\"11\", p.checksum)",
"def test_parses_map_2(self):\n p = GPBEC()\n p.parse(\"GPBEC,220516,5130.02,N,00046.34,W,213.8,T,218.0,M,0004.6,N,EGLM*11\")\n\n self.assertEquals(\"GPBEC\", p.sen_type)\n self.assertEquals(\"220516\", p.timestamp)\n self.assertEquals(\"5130.02\", p.waypoint_lat)\n self.assertEquals(\"N\", p.waypoint_lat_dir)\n self.assertEquals(\"00046.34\", p.waypoint_lon)\n self.assertEquals(\"W\", p.waypoint_lon_dir)\n self.assertEquals(\"213.8\", p.bearing_true)\n self.assertEquals(\"T\", p.bearing_true_sym)\n self.assertEquals(\"218.0\", p.bearing_mag)\n self.assertEquals(\"M\", p.bearing_mag_sym)\n self.assertEquals(\"0004.6\", p.nautical_miles)\n self.assertEquals(\"N\", p.nautical_miles_sym)\n self.assertEquals(\"EGLM\", p.waypoint_id)\n self.assertEquals(\"11\", p.checksum)",
"def test_parses_map_1(self):\n p = GPBEC()\n p.parse(\"$GPBEC,081837,,,,,,T,,M,,N,*13\")\n\n self.assertEquals(\"GPBEC\", p.sen_type)\n self.assertEquals(\"081837\", p.timestamp)\n self.assertEquals(\"\", p.waypoint_lat)\n self.assertEquals(\"\", p.waypoint_lat_dir)\n self.assertEquals(\"\", p.waypoint_lon)\n self.assertEquals(\"\", p.waypoint_lon_dir)\n self.assertEquals(\"\", p.bearing_true)\n self.assertEquals(\"T\", p.bearing_true_sym)\n self.assertEquals(\"\", p.bearing_mag)\n self.assertEquals(\"M\", p.bearing_mag_sym)\n self.assertEquals(\"\", p.nautical_miles)\n self.assertEquals(\"N\", p.nautical_miles_sym)\n self.assertEquals(\"\", p.waypoint_id)\n self.assertEquals(\"13\", p.checksum)",
"def phone_parser(phone, mode='PL'):\n\n if not phone:\n raise WrongInput(\"Input cannot be blank\")\n if not isinstance(phone, str):\n raise WrongInput(\"Invalid phone format\")\n\n if mode == 'PL':\n gsm_prefixes = ['50', '51', '53', '57', '60', '66', '69', '72', '73', '78', '79', '88']\n if phone[:2] in gsm_prefixes:\n phone_pattern = re.compile(r'''\n # don't match beginning of string\n (\\d{0,2}) # area code of 2 digits (e.g. '42')\n \\D* # optional separator\n (\\d{3}\\D*\\d{3}\\D*\\d{3}) # rest of number - divide into 3 3-digit sequences with optional separators\n # (e.g. '605-789-567')\n $ # end of string\n ''', re.VERBOSE)\n else:\n phone_pattern = re.compile(r'''\n # don't match beginning of string\n (\\d{0,2}) # area code of 2 digits (e.g. '42')\n \\D* # optional separator\n (\\d{3}\\D*\\d{2}\\D*\\d{2}) # rest of number - divide into 3 2-digit sequences with optional separators\n # (e.g. '605-78-56')\n $ # end of string\n ''', re.VERBOSE)\n else:\n phone_pattern = re.compile(r'''\n # don't match the beginning of the string\n (\\d{3}) # area code of 3 digits (e.g. '800')\n \\D* # optional separator\n (\\d{3}\\D*\\d{4}\\D*\\d+) # rest of number - divide into 3 sequences with optional separators: two obligatory\n # with 3 and 4 digits, one optional with any number of digits\n $ # end of string\n ''', re.VERBOSE)\n if not re.search(phone_pattern, phone):\n raise WrongInput(\"Invalid phone format.\")\n\n phone_obj = phone_pattern.search(phone)\n phone_area, phone_num = phone_obj.groups()\n phone = re.sub(r'\\D', '', phone_num)\n return phone, phone_area, phone_num",
"def readAMBERCrd(self, phys, filename):\r\n\r\n\tf = open(filename, 'r')\r\n\tdata = f.read()\r\n\t# Keep going with this!!!\r\n numbers = data.split(' ')\r\n while (numbers.count('') != 0):\r\n numbers.remove('')\r\n \r\n phys.posvec.resize(int(numbers[0].replace('\\n', '')))\r\n for i in range(1, len(numbers), 3):\r\n if (numbers[i].find('\\n') != -1):\r\n numbers[i].replace('\\n', '')\r\n phys.positions[i-1] = numbers[i]\r\n phys.positions[i] = numbers[i+1]\r\n phys.positions[i+1] = numbers[i+2]",
"def parse_bagexpr(text: str) -> Tuple[int, str]:\n match = re.fullmatch(r\"(\\d+) (.*) bags?\", text)\n assert match\n num, kind = match.groups()\n return int(num), kind",
"def parse_binary_field(b):\n\n\n codec, length, params = struct.unpack(\">iii\", b[:12])\n len4 = lambda b: int(len(b[12:]) / 4)\n if codec == 1: return struct.unpack(\"f\" * length, b[12:])\n elif codec == 2: return struct.unpack(\"b\" * length, b[12:])\n elif codec == 3: return struct.unpack(\">\" + \"h\" * length, b[12:])\n elif codec == 4: return struct.unpack(\">\" + \"i\" * length, b[12:])\n elif codec == 5:\n chars = struct.unpack(\"c\" * (length * 4), b[12:])\n return [b\"\".join([\n c for c in chars[i * 4: (i + 1) * 4] if c != b\"\\x00\"\n ]).decode() for i in range(length)]\n elif codec == 6:\n integers = struct.unpack(\">\" + (\"i\" * len4(b)), b[12:])\n return [chr(c) if c != 0 else \"\" for c in run_length_decode(integers)]\n elif codec == 7:\n integers = struct.unpack(\">\" + (\"i\" * len4(b)), b[12:])\n return run_length_decode(integers)\n elif codec == 8:\n integers = struct.unpack(\">\" + (\"i\" * len4(b)), b[12:])\n return delta_decode(run_length_decode(integers))\n elif codec == 9:\n integers = struct.unpack(\">\" + (\"i\" * len4(b)), b[12:])\n return [n / params for n in run_length_decode(integers)]\n elif codec == 10:\n integers = struct.unpack(\">\" + (\"h\" * int(len(b[12:]) / 2)), b[12:])\n return [n / params for n in delta_decode(recursive_decode(integers))]\n else: raise ValueError(\".mmtf error: {} is invalid codec\".format(codec))",
"def parse_number(txt):\n return int(txt)"
] | [
"0.5509301",
"0.5439012",
"0.5419785",
"0.5337647",
"0.5337647",
"0.5328429",
"0.53065366",
"0.51627904",
"0.5156804",
"0.5141506",
"0.5076204",
"0.50632983",
"0.5041245",
"0.50348336",
"0.49797523",
"0.49784377",
"0.4953256",
"0.4939872",
"0.4927507",
"0.49075076",
"0.489521",
"0.48917124",
"0.48855507",
"0.4869515",
"0.4867919",
"0.48655865",
"0.48623946",
"0.4851821",
"0.4848811",
"0.4828232"
] | 0.6392054 | 0 |
Parse a Postal Station (STN|RPO|Station) word | def parse_stn(self):
index = self.index
start = self.index
if index == self.length:
return None, 0
if self.words[index]['word'] == 'stn' or self.words[index]['word'] == 'station' or self.words[index]['word'] == 'rpo':
index += 1
if index == self.length:
return None, 0
if self.words[index]['word'] == '.':
return self.words[index+1]['word'], 3
else:
return self.words[index]['word'], 2
return None, 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_station_name (station_name):\n try:\n _,chinese_name,code,full_pinyin,short_pinyin = station_name.split('|')\n except ValueError:\n # print(station_name)\n _,chinese_name,code,full_pinyin,short_pinyin,_ = station_name.split('|')\n return {chinese_name:code,full_pinyin:code,short_pinyin:code}",
"def parse_streetdir(self):\n \n first = self.words[self.index]['word']\n if self.index + 1 < self.length:\n second = self.words[self.index+1]['word']\n else:\n second = None\n \n if first in ['northwest', 'northeast', 'southwest', 'southeast']:\n return first, 1 \n elif first == 'nw':\n return \"northwest\", 1\n elif first == 'ne':\n return \"northeast\", 1\n elif first == 'sw':\n return \"southwest\", 1\n elif first == 'se':\n return \"southeast\", 1\n \n if first in ['n', 'north']:\n if second in ['w', 'west']:\n return \"northwest\", 2\n elif second in ['e', 'east']:\n return \"northeast\", 2\n else:\n return \"north\", 1\n elif first in ['s', 'south']:\n if second in ['w', 'west']:\n return \"southwest\", 2\n elif second in ['e', 'east']:\n return \"southeast\", 2\n else:\n return \"south\", 1\n elif first in ['e', 'east']:\n return \"east\", 1\n elif first in ['w', 'west']:\n return \"west\", 1\n \n return None,0",
"def canon_station_name(s, line):\n s = s.strip()\n s = re.sub('^Heathrow$', 'Heathrow Terminals 1, 2, 3', s)\n s = re.sub('^Olympia$', 'Kensington (Olympia)', s)\n s = re.sub('^Warwick Ave$', 'Warwick Avenue', s)\n s = re.sub('^Camden$', 'Camden Town', s)\n s = re.sub('^Central$', 'Finchley Central', s) # They say \"Between Central and East Finchley\"\n s = re.sub('\\s*Platform \\d$', '', s)\n s = s + ' Station'\n s = s.replace('(Bakerloo)', 'Bakerloo').replace('Earls', 'Earl\\'s') \\\n .replace(' fast ', ' ') \\\n .replace('\\xe2\\x80\\x99', \"'\") \\\n .replace('St ', 'St. ') \\\n .replace('Elephant and Castle', 'Elephant & Castle') \\\n .replace('Lambeth Station', 'Lambeth North Station') \\\n .replace('Chalfont Station', 'Chalfont & Latimer Station') \\\n .replace('West Brompon', 'West Brompton') \\\n .replace('Picadilly Circus', 'Piccadilly Circus') \\\n .replace('High Barent', 'High Barnet') \\\n .replace('Bartnet', 'Barnet') \\\n .replace('Faringdon', 'Farringdon') \\\n .replace('Turnham Greens', 'Turnham Green') \\\n .replace('Ruilsip', 'Ruislip') \\\n .replace('Dagemham', 'Dagenham') \\\n .replace('Edgware Road (H & C)', 'Edgware Road Circle') \\\n .replace('Hammersmith (Circle and H&C)', 'Hammersmith') \\\n .replace('Shepherds Bush (Central Line)', \"Shepherd's Bush\") \\\n .replace('Terminals 123', 'Terminals 1, 2, 3').replace('Terminal 1,2,3', 'Terminals 1, 2, 3') \\\n .replace('Woodford Junction', 'Woodford') \\\n .replace(\"King's Cross Station\", \"King's Cross St. Pancras Station\") \\\n .replace(\"Kings Cross Station\", \"King's Cross St. Pancras Station\") \\\n .replace('Central Finchley', 'Finchley Central').replace('District and Picc', 'D & P') \\\n .replace('South Fields', 'Southfields') \\\n .replace('Regents Park', \"Regent's Park\") \\\n .replace('Bromley-by-Bow', \"Bromley-By-Bow\") \\\n .replace('Brent Oak', 'Burnt Oak') \\\n .replace('St. Johns Wood', \"St. John's Wood\") \\\n .replace('Totteridge and Whetstone', 'Totteridge & Whetstone') \\\n .replace('Newbury Park Loop', 'Newbury Park') \\\n .replace('Harrow-on-the-Hill', 'Harrow on the Hill')\n if s == 'Edgware Road Station' and line == 'B':\n s = 'Edgware Road Bakerloo Station'\n if s == 'Edgware Road Station' and line != 'B':\n s = 'Edgware Road Circle Station'\n return s",
"def parse_streetname(self):\n index = self.index\n \n name = \"\"\n for i in range(4):\n if index + i == self.length:\n break\n if self.words[index+i]['word'] == ',':\n break\n # Hack\n if self.words[index+i]['word'] == 'doctor':\n self.words[index+i]['word'] = 'drive'\n break\n try:\n word = sttype[self.words[index+i]['word']]\n break\n except:\n try:\n word = vocab[self.words[index+i]['word']]\n if Vocabulary.STREET_TYPE in word['tag']:\n break\n if name != '':\n name += ' ' + word['lemma'][0]\n else:\n name = word['lemma'][0]\n except: \n if self.words[index+i]['word'][-2:] in [ 'th', 'st', 'nd', 'rd' ]:\n name = self.words[index+i]['word'][:-2]\n else:\n self.index += i\n _dir, _n = self.parse_streetdir()\n self.index -= i\n if _dir:\n break\n if name != '':\n name += ' ' + self.words[index+i]['word']\n else:\n name = self.words[index+i]['word']\n \n if i == 0 or i == 4:\n return None, 0\n else:\n return name, i",
"def stationabbreviation(station):\n stations = {'Utrecht': 'Ut',\n 'Amsterdam Centraal': 'asd'}\n if station in stations:\n return stations[station]",
"def street_parser(*street_data):\n\n # parsing tuples\n if len(street_data) == 2:\n if not isinstance(street_data[0], str) and not isinstance(street_data[1], str):\n raise WrongInput(\"Invalid format\")\n # street name as the tuple's first item\n strname, strnumber = street_data\n # street number as the tuple's first item\n if street_data[0][0] in digits:\n strname, strnumber = strnumber, strname\n\n # parsing strings\n else:\n if not isinstance(street_data[0], str):\n raise WrongInput(\"Invalid format\")\n if not street_data[0]:\n raise WrongInput(\"Input cannot be blank\")\n\n # string starting with street number\n if street_data[0][0] in digits:\n street_pattern = re.compile(r'''\n ^ # beginning of string\n (\\d+) # street number is any number of digits\n \\W+ # separator\n (\\w+\\W*\\w*\\W*) # street name is one or more words with optional separators\n $ # end of string\n ''', re.VERBOSE)\n street_obj = street_pattern.search(street_data[0])\n strnumber, strname = street_obj.groups()\n\n # string starting with street name\n else:\n street_pattern = re.compile(r'''\n ^ # beginning of string\n (\\w+\\W*\\w*\\s*) # street name is one or more words with optional separators\n \\W+ # separator\n (\\d+) # street number is any number of digits\n $ # end of string\n ''', re.VERBOSE)\n street_obj = street_pattern.search(street_data[0])\n (strname, strnumber) = street_obj.groups()\n\n # replace specific words in street name with their abbreviates\n strname = strname.lower()\n special = {r'\\baleje\\b': 'Al.', r'\\bavenue\\b': 'Av.', r'\\broad\\b': 'Rd.', r'\\bsquare\\b': 'Sq.',\n r'\\bstreet\\b': 'St.', r'\\bdrive\\b': 'Dr.'}\n for key in special:\n strname = re.sub(key, special[key], strname)\n return strname.title(), strnumber",
"def nlp_parse(self, input):\n resp = {}\n resp['type'] = 'nomatch'\n VDB_set = {}\n WP_set = {}\n tagset = self.build_tagset(input)\n resp['words'] = self.build_keywords(tagset)\n w = resp['words']\n\n if not w:\n if constants.DEBUG:\n log.debug(\"No words: \" + str(resp))\n return resp\n\n # store nouns\n NN_set = set(w.get('NN', []))\n\n # matches a request for a list\n if 'list' in NN_set \\\n or 'List' in w.get('NNP', []):\n resp['count'] = w.get('CD', [constants.LIST_COUNT])[0]\n resp['type'] = 'show-list'\n if set(['serving', 'serve']) & set(w.get('VBG', [])):\n resp['meal'] = (NN_set & constants.MEALS_SET).pop()\n if 'in' in w.get('IN', []):\n resp['zone'] = w.get('NNP', [None])[0]\n if 'close' in w.get('VBD', []) \\\n or 'close' in w.get('JJ', []) \\\n or 'close' in NN_set:\n resp['distance'] = True\n return resp\n\n # finds neighborhood\n for word in tagset:\n if word[1] == 'VBD':\n VDB_set = word[0]\n for word in tagset:\n if word[1] == 'WP':\n WP_set = word[0]\n if 'neighborhood' in VDB_set and 'what' in WP_set:\n if w.get('NNP', [None])[0]: \n r_name = w.get('NNP', [None])[0]\n else :\n return resp\n\n r_name = w.get('NNP', [None])[0] \n resp['restaurant'] = r_name\n resp['type'] = 'name-zone'\n return resp\n\n # matches \"how expensive it is\" and \"is it expensive\"\n if 'expensive' in w.get('JJ', ()):\n if w.get('NNP', [None])[0]: \n r_name = w.get('NNP', [None])[0]\n else :\n return resp\n\n r_name = w.get('NNP', [None])[0] \n resp['restaurant'] = r_name\n resp['type'] = 'name-price'\n return resp\n\n if 'between' in w.get('IN', ()) \\\n or 'price' in NN_set:\n price_range = w.get('CD', ())\n\n # price between a and b\n # require at least 2 numerals\n if len(price_range) >= 2:\n resp['min'] = min(map(int, price_range))\n resp['max'] = max(map(int, price_range))\n resp['type'] = 'list-price-range'\n return resp\n\n # price of exactly a\n if len(price_range) > 0:\n price_range = w.get('CD', ())\n resp['price'] = min(price_range)\n resp['type'] = 'list-price-single'\n return resp\n\n\n # need to merge NN and JJ for this step\n w['NNJJ'] = NN_set | set(w.get('JJ', []))\n meal = constants.MEALS_SET & w['NNJJ']\n if meal:\n resp['type'] = 'list-meal-single'\n resp['meal'] = meal.copy().pop()\n return resp\n\n # matches a quality list\n if 'quality' in NN_set and \\\n (constants.QUALITIES & w['NNJJ']) and \\\n (set(['food', 'service']) & w['NNJJ']):\n resp['degree'] = (constants.QUALITIES \\\n & w['NNJJ']).pop()\n resp['type'] = 'list-quality-' + \\\n (set(['food', 'service']) & w['NNJJ']).pop()\n return resp\n\n # matches a phone number request\n if NN_set & constants.PHONE_KEYWORDS:\n r_name = w.get('NNP', [None])[0] or \\\n w['NN'][-1]\n\n resp['restaurant'] = r_name\n resp['type'] = 'name-phone'\n return resp\n\n # matches a single meal request\n if NN_set & constants.MEALS_SET:\n r_name = w.get('NNP', [None])[0] or \\\n w['NN'][-1]\n\n resp['restaurant'] = r_name\n resp['type'] = 'name-meal'\n resp['meal'] = word.lower()\n return resp\n\n # matches a request for an address\n if 'address' in NN_set:\n r_name = w.get('NNP', [None])[0] or \\\n w['NN'][-1]\n resp['restaurant'] = r_name\n resp['type'] = 'name-location'\n return resp\n\n # matches a restaurant in neighborhood\n if 'in' in w.get('IN', []) and \\\n NN_set & constants.NAME_KEYWORDS:\n r_name = w.get('NNP', [None])[0]\n if not r_name:\n for kw in reversed(w['NN']):\n if kw not in constants.NAME_KEYWORDS:\n r_name = kw\n break\n if r_name:\n resp['type'] = 'random-city'\n resp['city'] = string.capitalize(r_name)\n return resp\n\n # matches a request for a cuisine type\n if NN_set & constants.NAME_KEYWORDS:\n r_name = w.get('NNP', [None])[0]\n if not r_name:\n for kw in reversed(w['NN']):\n if kw not in constants.NAME_KEYWORDS:\n r_name = kw\n break\n if r_name:\n resp['type'] = 'random-cuisine'\n resp['cuisine'] = string.capitalize(r_name)\n return resp\n\n # merge all numerals together for list-mode\n w['CDLS'] = set(w.get('CD', []) + w.get('LS', []))\n if w['CDLS']:\n w_copy = w['CDLS'].copy()\n while w_copy:\n try:\n resp['listitem'] = int(w_copy.pop())\n resp['type'] = 'single-listitem'\n return resp\n except:\n pass\n\n # distance / how far\n if ('far' in w.get('RB', [])\n and 'how' in w.get('WRB', [])\n ) or ('distance' in NN_set):\n r = w.get('NNP', [None])[0]\n if r:\n resp['type'] = 'name-distance'\n resp['restaurant'] = string.capitalize(r)\n return resp\n\n if constants.DEBUG:\n log.debug(resp)\n return resp",
"def kwextract(s):\n try:\n return strip(s, \"$\").strip().split(\": \")[1]\n except IndexError:\n return \"<unknown>\"",
"def parse_streettype(self):\n \n\n try:\n word = sttype[self.words[self.index]['word']]\n if Vocabulary.STREET_TYPE in word['tag']:\n itag = word['tag'].index(Vocabulary.STREET_TYPE)\n lemma = word['lemma'][itag]\n return lemma, 1\n return None, 0\n except: return None, 0",
"def parse_special_word(s):\n index1 = s.find(special_word_marker)\n if index1 != -1:\n index2 = s.find(special_word_marker, index1 + 1)\n if index2 != -1:\n sw = normalize(s[index1+len(special_word_marker) : index2])\n rest = normalize(s[index2+len(special_word_marker) :])\n return sw, rest\n return None, s",
"def parse_postalUS(self):\n \n index = self.index\n \n # US Postal Code\n if len(self.words[index]['word']) != 5 or not self.words[index]['word'].isdigit():\n return None, 0\n postal = self.words[index]['word']\n \n if index + 1 < self.length:\n if self.words[index+1]['word'] == '-':\n index += 2\n if index == self.length:\n return None, 0\n if len(self.words[index]['word']) == 4 and self.words[index]['word'].isdigit():\n postal += '-' + self.words[index]['word']\n return postal, 3\n else:\n return postal, 1\n \n return postal, 1",
"def parse(self, word):\n word = self.son.segs(word)\n son_map = self._sonority_map(word)\n son_map = self._mark_offglides(son_map)\n son_map = self._adjust_anom_fric_cod(son_map)\n son_map = self._adjust_anom_fric_ons(son_map)\n ons_son = self._initial_onset(son_map)\n cod_son = self._final_coda(son_map)\n ons = self.from_map(ons_son, word)\n cod = self.from_reverse_map(cod_son, word)\n return (ons, cod)",
"def _station_from_intent(intent, stations):\n slots = intent['slots']\n if slots.get('station_name', {}).get('value'):\n name = slots['station_name']['value']\n if ' and ' in name:\n # Try to be robust to re-orderings of street names.\n tokens = name.split(' and ')\n if len(tokens) != 2:\n first, second = name, None\n else:\n first, second = name.split(' and ')\n else:\n first, second = name, None\n else:\n first = slots['first_street']['value']\n second = slots.get('second_street', {}).get('value')\n sta = location.find_station(stations, first, second, exact=False)\n return sta",
"def find_places(query):\n parts = str(query).split(' ')\n for i, p in enumerate(parts):\n p = p.replace('-', ' ').strip()\n try:\n postal_code = int(p)\n if len(postal_code) == 4:\n print(postal_code, parts[i+1])\n # Check \n #response = get_osm_location(\"{postal_code} {name}\")\n #lon = response['lon']\n #lat = response['lat']\n #poly = \n except Exception as e:\n continue",
"def parseTweetsThruLocation(tweet):\n try:\n parts = tweet.rsplit('|',3)\n if len(parts) < 3:\n return \"Error\", tweet\n return parts[-1], parts[0]\n except IndexError:\n print('======Error detected in parsing tweets thru location')\n print(tweet)\n return \"Error\", tweet",
"def parse(self, word):\n raise NotImplementedError",
"def extract_postcode(s):\n pc_regex = r'([Gg][Ii][Rr] 0[Aa]{2})|((([A-Za-z][0-9]{1,2})|(([A-Za-z][A-Ha-hJ-Yj-y]'\n pc_regex += r'[0-9]{1,2})|(([A-Za-z][0-9][A-Za-z])|([A-Za-z][A-Ha-hJ-Yj-y][0-9]?[A-Za-z]'\n pc_regex += r'))))\\s?[0-9][A-Za-z]{2})'\n\n re_search = re.search(pc_regex, s)\n if re_search:\n p = re_search.group(0)\n else:\n p = ''\n return p",
"def _parse_wkt(s):\n if s.startswith('SRID'):\n s = s[s.index(';') + 1:]\n return shapely.wkt.loads(s)",
"def _parse_station(station) -> WeatherStation:\n with open(core_season_file_path) as file_handle:\n core_seasons = json.load(file_handle)\n ecodivisions = geopandas.read_file(ecodiv_shape_file_path)\n station_coord = Point(\n float(station['longitude']), float(station['latitude']))\n\n # hacky fix for station 447 (WATSON LAKE FS), which is in the Yukon\n # so ecodivision name has to be hard-coded\n if station['stationCode'] == '447':\n ecodiv_name = \"SUB-ARCTIC HIGHLANDS\"\n else:\n for index, row in ecodivisions.iterrows(): # pylint: disable=redefined-outer-name, unused-variable\n geom = row['geometry']\n if station_coord.within(geom):\n ecodiv_name = row['CDVSNNM']\n break\n return WeatherStation(\n code=station['stationCode'],\n name=station['displayLabel'],\n lat=station['latitude'],\n long=station['longitude'],\n ecodivision_name=ecodiv_name,\n core_season=core_seasons[ecodiv_name]['core_season'])",
"def parse(s):\n return s",
"def station_name(f):\n return f.split('/')[1].split('_')[0]",
"def __defaultCaseForParseString(self, rootForm: str, parseString: str, partOfSpeech: str) -> str:\n if parseString == \"P3SG+NOM$PNON+ACC\":\n if partOfSpeech == \"PROP\":\n return \"PNON+ACC\"\n else:\n return \"P3SG+NOM\"\n elif parseString == \"A2SG+P2SG$A3SG+P3SG\":\n return \"A3SG+P3SG\"\n elif parseString == \"A3PL+P3PL+NOM$A3PL+P3SG+NOM$A3PL+PNON+ACC$A3SG+P3PL+NOM\":\n return \"A3PL+P3SG+NOM\"\n elif parseString == \"P2SG$P3SG\":\n return \"P3SG\"\n elif parseString == \"A3PL+PNON+NOM$A3SG+PNON+NOM^DB+VERB+ZERO+PRES+A3PL\":\n return \"A3PL+PNON+NOM\"\n elif parseString == \"P2SG+NOM$PNON+GEN\":\n return \"PNON+GEN\"\n elif parseString == \"AOR^DB+ADJ+ZERO$AOR+A3SG\":\n return \"AOR+A3SG\"\n elif parseString == \"P2SG$PNON\":\n return \"PNON\"\n elif parseString == \"ADV+SINCE$VERB+ZERO+PRES+COP+A3SG\":\n if rootForm == \"yıl\" or rootForm == \"süre\" or rootForm == \"zaman\" or rootForm == \"ay\":\n return \"ADV+SINCE\"\n else:\n return \"VERB+ZERO+PRES+COP+A3SG\"\n elif parseString == \"CONJ$VERB+POS+IMP+A2SG\":\n return \"CONJ\"\n elif parseString == \"NEG+IMP+A2SG$POS^DB+NOUN+INF2+A3SG+PNON+NOM\":\n return \"POS^DB+NOUN+INF2+A3SG+PNON+NOM\"\n elif parseString == \"NEG+OPT+A3SG$POS^DB+NOUN+INF2+A3SG+PNON+DAT\":\n return \"POS^DB+NOUN+INF2+A3SG+PNON+DAT\"\n elif parseString == \"NOUN+A3SG+P3SG+NOM$NOUN^DB+ADJ+ALMOST\":\n return \"NOUN+A3SG+P3SG+NOM\"\n elif parseString == \"ADJ$VERB+POS+IMP+A2SG\":\n return \"ADJ\"\n elif parseString == \"NOUN+A3SG+PNON+NOM$VERB+POS+IMP+A2SG\":\n return \"NOUN+A3SG+PNON+NOM\"\n elif parseString == \"INF2+A3SG+P3SG+NOM$INF2^DB+ADJ+ALMOST$\":\n return \"INF2+A3SG+P3SG+NOM\"\n else:\n return None",
"def parse(s):\n Term.str = s\n new_term = None\n while Term.str != '':\n new_term, Term.str = Term.parse_prefix(Term.str)\n return new_term",
"def parse(self, word):\n son_map = self._sonority_map(word)\n ons_son = self._initial_onset(son_map)\n cod_son = self._final_coda(son_map)\n ons = self.from_map(ons_son, word)\n cod = self.from_reverse_map(cod_son, word)\n return (ons, cod)",
"def handle_special_cases(substring):\r\n if substring==['', 'EB/NB', '2'] or substring==['EB/NB', '2', ''] or substring==['', 'EB/NB', '2', '#1'] or substring==['', 'EB/NB', '2', '#2']:\r\n dir='E'\r\n road=2\r\n elif substring== ['', 'WB/SB', '2'] or substring==['WB/SB', '2', '']:\r\n dir='W'\r\n road=2\r\n elif substring==['WB210', '']:\r\n dir='W'\r\n road=210\r\n elif substring==['S', '605/W', '10', '']:\r\n dir='W'\r\n road=10 \r\n elif substring==['', 'NB5', 'TRK', 'RTE']:\r\n dir='N'\r\n road=5\r\n elif substring==['', 'S605/E10']:\r\n dir='E'\r\n road=10\r\n else:\r\n dir=None\r\n road=0\r\n return dir, road",
"def getStation(line, stn):\n url = generateUrl(line, stn)\n tree = getXmlTree(url)\n return xmlTreeToStation(tree)",
"def parse_location(location):\n city, state = location.strip().split(',')\n return f\"{city.strip().replace(' ', '-')}-{state.strip().replace(' ', '-')}\"",
"def extract_alleles_from_snp_string(snp_string):\n (allele1, allele2) = snp_string[1:4].split(\"/\")\n assert allele1 in \"ATGC\"\n assert allele2 in \"ATGC\"\n return allele1, allele2",
"def parse_phone(s):\n pattern = '''\n ^\\s* # Leading spaces\n (?P<areacode>\n \\d{3}-? # \"xxx\" or \"xxx-\"\n | \\(\\d{3}\\)\\s* # OR \"(xxx) \"\n )\n (?P<prefix>\\d{3}) # xxx\n -? # Dash (optional)\n (?P<suffix>\\d{4}) # xxxx\n \\s*$ # Trailing spaces\n '''\n matcher = re.compile(pattern, re.VERBOSE)\n matches = matcher.match(s)\n if matches is None:\n print(s)\n return s\n else:\n areacode = re.search('\\d{3}', matches.group ('areacode')).group()\n prefix = matches.group ('prefix')\n suffix = matches.group ('suffix')\n return areacode+'-'+prefix+'-'+suffix",
"def _parse_word(self, token, ctxinfo) :\n ignore = False\n if token.startswith(\"|\") and token.endswith(\"|\") : # regular token\n token = token[1:-1]\n token_parts = token.rsplit( \"_\", 1 )\n if len(token_parts) == 2 :\n lemma_and_index, pos = token_parts\n lemma_parts = lemma_and_index.rsplit( \":\", 1 )\n if len(lemma_parts) == 2 : \n lemma, index = lemma_parts\n if lemma.endswith(\"\\\\\") :\n lemma = lemma[:-1] # separator was \\: \n else :\n ignore = True\n else :\n ignore = True\n if ignore :\n ctxinfo.warn(\"Ignoring bad token `{token}`\", token=token)\n return None\n else : \n return (lemma, index, pos)"
] | [
"0.623392",
"0.6233269",
"0.61185384",
"0.60672194",
"0.5706585",
"0.5652462",
"0.5631425",
"0.5517574",
"0.5503128",
"0.5474191",
"0.5456381",
"0.5427626",
"0.53800154",
"0.53769773",
"0.5357613",
"0.53118235",
"0.527336",
"0.5262144",
"0.52560794",
"0.5253394",
"0.5230342",
"0.5224799",
"0.52199745",
"0.5168089",
"0.5156833",
"0.51553833",
"0.51417774",
"0.51242924",
"0.5122382",
"0.5111767"
] | 0.6842843 | 0 |
Parse a Street Direction (n|s|w|e|nw|ne|sw|se) (north|south|west|east) (north[[sp]west|east]|south[[sp]west|east]) | def parse_streetdir(self):
first = self.words[self.index]['word']
if self.index + 1 < self.length:
second = self.words[self.index+1]['word']
else:
second = None
if first in ['northwest', 'northeast', 'southwest', 'southeast']:
return first, 1
elif first == 'nw':
return "northwest", 1
elif first == 'ne':
return "northeast", 1
elif first == 'sw':
return "southwest", 1
elif first == 'se':
return "southeast", 1
if first in ['n', 'north']:
if second in ['w', 'west']:
return "northwest", 2
elif second in ['e', 'east']:
return "northeast", 2
else:
return "north", 1
elif first in ['s', 'south']:
if second in ['w', 'west']:
return "southwest", 2
elif second in ['e', 'east']:
return "southeast", 2
else:
return "south", 1
elif first in ['e', 'east']:
return "east", 1
elif first in ['w', 'west']:
return "west", 1
return None,0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_streetname(self):\n index = self.index\n \n name = \"\"\n for i in range(4):\n if index + i == self.length:\n break\n if self.words[index+i]['word'] == ',':\n break\n # Hack\n if self.words[index+i]['word'] == 'doctor':\n self.words[index+i]['word'] = 'drive'\n break\n try:\n word = sttype[self.words[index+i]['word']]\n break\n except:\n try:\n word = vocab[self.words[index+i]['word']]\n if Vocabulary.STREET_TYPE in word['tag']:\n break\n if name != '':\n name += ' ' + word['lemma'][0]\n else:\n name = word['lemma'][0]\n except: \n if self.words[index+i]['word'][-2:] in [ 'th', 'st', 'nd', 'rd' ]:\n name = self.words[index+i]['word'][:-2]\n else:\n self.index += i\n _dir, _n = self.parse_streetdir()\n self.index -= i\n if _dir:\n break\n if name != '':\n name += ' ' + self.words[index+i]['word']\n else:\n name = self.words[index+i]['word']\n \n if i == 0 or i == 4:\n return None, 0\n else:\n return name, i",
"def street_parser(*street_data):\n\n # parsing tuples\n if len(street_data) == 2:\n if not isinstance(street_data[0], str) and not isinstance(street_data[1], str):\n raise WrongInput(\"Invalid format\")\n # street name as the tuple's first item\n strname, strnumber = street_data\n # street number as the tuple's first item\n if street_data[0][0] in digits:\n strname, strnumber = strnumber, strname\n\n # parsing strings\n else:\n if not isinstance(street_data[0], str):\n raise WrongInput(\"Invalid format\")\n if not street_data[0]:\n raise WrongInput(\"Input cannot be blank\")\n\n # string starting with street number\n if street_data[0][0] in digits:\n street_pattern = re.compile(r'''\n ^ # beginning of string\n (\\d+) # street number is any number of digits\n \\W+ # separator\n (\\w+\\W*\\w*\\W*) # street name is one or more words with optional separators\n $ # end of string\n ''', re.VERBOSE)\n street_obj = street_pattern.search(street_data[0])\n strnumber, strname = street_obj.groups()\n\n # string starting with street name\n else:\n street_pattern = re.compile(r'''\n ^ # beginning of string\n (\\w+\\W*\\w*\\s*) # street name is one or more words with optional separators\n \\W+ # separator\n (\\d+) # street number is any number of digits\n $ # end of string\n ''', re.VERBOSE)\n street_obj = street_pattern.search(street_data[0])\n (strname, strnumber) = street_obj.groups()\n\n # replace specific words in street name with their abbreviates\n strname = strname.lower()\n special = {r'\\baleje\\b': 'Al.', r'\\bavenue\\b': 'Av.', r'\\broad\\b': 'Rd.', r'\\bsquare\\b': 'Sq.',\n r'\\bstreet\\b': 'St.', r'\\bdrive\\b': 'Dr.'}\n for key in special:\n strname = re.sub(key, special[key], strname)\n return strname.title(), strnumber",
"def _station_from_intent(intent, stations):\n slots = intent['slots']\n if slots.get('station_name', {}).get('value'):\n name = slots['station_name']['value']\n if ' and ' in name:\n # Try to be robust to re-orderings of street names.\n tokens = name.split(' and ')\n if len(tokens) != 2:\n first, second = name, None\n else:\n first, second = name.split(' and ')\n else:\n first, second = name, None\n else:\n first = slots['first_street']['value']\n second = slots.get('second_street', {}).get('value')\n sta = location.find_station(stations, first, second, exact=False)\n return sta",
"def parse_stn(self):\n \n index = self.index\n start = self.index \n \n if index == self.length:\n return None, 0\n \n if self.words[index]['word'] == 'stn' or self.words[index]['word'] == 'station' or self.words[index]['word'] == 'rpo':\n index += 1\n if index == self.length:\n return None, 0\n if self.words[index]['word'] == '.':\n return self.words[index+1]['word'], 3\n else:\n return self.words[index]['word'], 2\n \n return None, 0",
"def parse_streettype(self):\n \n\n try:\n word = sttype[self.words[self.index]['word']]\n if Vocabulary.STREET_TYPE in word['tag']:\n itag = word['tag'].index(Vocabulary.STREET_TYPE)\n lemma = word['lemma'][itag]\n return lemma, 1\n return None, 0\n except: return None, 0",
"def possible_directions(valid_positions):\n if valid_positions == \"n\":\n print(\"You can travel: (N)orth.\")\n elif valid_positions == \"nes\":\n print(\"You can travel: (N)orth or (E)ast or (S)outh.\")\n elif valid_positions == \"es\":\n print(\"You can travel: (E)ast or (S)outh.\")\n elif valid_positions == \"sw\":\n print(\"You can travel: (S)outh or (W)est.\")\n elif valid_positions == \"ew\":\n print(\"You can travel: (E)ast or (W)est.\")\n elif valid_positions == \"ns\":\n print(\"You can travel: (N)orth or (S)outh.\")",
"def maploc(loc):\n\n\n loc = REGEX['parens'].sub('', loc)\n loc = REGEX['and'].sub('', loc)\n loc = REGEX['num'].sub('', loc)\n\n \"\"\"\n 'parens' 'and' 'single' 'num' 'seeley' 'iab' 'brh'\n \"\"\"\n \"\"\"\n /* For non-street address, strip room numbers */\n if (!location.match(' Ave')) {\n location = location.replace(/LL[0-9]/g, '').replace(/[0-9]/g, '');\n }\n /* Some text substitutions */\n location = location.replace('Seeley W.', '').replace('International Affairs Building', '420 W 118th St').replace('Broadway Residence Hall', '2900 Broadway');\n\n \"\"\"\n return loc + ', New York, NY 10027'",
"def valid_directions(y, x):\n results = []\n if x % 2: # east side of intersection\n results.append((y, x-1, 1))\n if x+1 < M*2:\n results.append((y, x+1, 2)) # street\n else: # west side of intersection\n if x > 0:\n results.append((y, x-1, 2)) # street\n results.append((y, x+1, 1))\n if y % 2: # south side of intersection\n if y+1 < N*2:\n results.append((y+1, x, 2)) # street\n results.append((y-1, x, 1))\n else:\n results.append((y+1, x, 1))\n if y > 0:\n results.append((y-1, x, 2))\n return results",
"def parse_address_street(address_str, address_zip_us_re, address_zip_us_lax_re):\n address = {}\n errors = []\n parts = address_str.split('$')\n if DEBUG:\n address['debug_address_str'] = address_str\n address['debug_part_1'] = parts[0]\n address['debug_part_last'] = parts[-1]\n address['debug_length'] = len(parts)\n #if len(parts) == 1:\n #print('cannot split: {}: {}'.format(debug_type, address_str))\n match = re.search(address_zip_us_re, parts[-1])\n if match:\n if DEBUG:\n address['debug_parser'] = 'A'\n address['city'] = match.group(1)\n address['region'] = match.group(2).upper()\n address['postalCode'] = match.group(3)\n address['countryId'] = 'US'\n if len(parts) == 2:\n if DEBUG:\n address['debug_parser'] = 'B'\n address['addressLine1'] = parts[0]\n else:\n if len(parts) == 3:\n if DEBUG:\n address['debug_parser'] = 'C'\n address['addressLine1'] = parts[0]\n if parts[0] != parts[1]:\n if DEBUG:\n address['debug_parser'] = 'D'\n address['addressLine2'] = parts[1]\n else:\n match2 = re.search(address_zip_us_lax_re, address_str)\n if match2:\n if DEBUG:\n address['debug_parser'] = 'E'\n address['region'] = match2.group(2).upper()\n address['postalCode'] = match2.group(3)\n address['countryId'] = 'US'\n # FIXME: Cannot reliably parse the remainder for city and street address\n errors.append('Partial parse street address: {}'.format(address_str))\n address['addressLine1'] = match2.group(1)\n else:\n # This is the remainder that we could not parse.\n # So just put it all into \"addressLine1\" to be manually adjusted later.\n if DEBUG:\n address['debug_parser'] = 'F'\n errors.append('Cannot parse street address: {}'.format(address_str))\n address['addressLine1'] = address_str\n return (address, errors)",
"def parse_street_waynodes(input, use_highway):\r\n way_key = use_highway and name_highway_key or name_key\r\n rels, ways, nodes = ParserOSM().parse(input, way_key=way_key)\r\n \r\n return ways, nodes",
"def getDirection(self):\n if 'N' in str(self.trip_update.trip.trip_id):\n direction = 'northbound'\n if 'S' in str(self.trip_update.trip.trip_id):\n direction = 'southbound'\n return direction",
"def handle_special_cases(substring):\r\n if substring==['', 'EB/NB', '2'] or substring==['EB/NB', '2', ''] or substring==['', 'EB/NB', '2', '#1'] or substring==['', 'EB/NB', '2', '#2']:\r\n dir='E'\r\n road=2\r\n elif substring== ['', 'WB/SB', '2'] or substring==['WB/SB', '2', '']:\r\n dir='W'\r\n road=2\r\n elif substring==['WB210', '']:\r\n dir='W'\r\n road=210\r\n elif substring==['S', '605/W', '10', '']:\r\n dir='W'\r\n road=10 \r\n elif substring==['', 'NB5', 'TRK', 'RTE']:\r\n dir='N'\r\n road=5\r\n elif substring==['', 'S605/E10']:\r\n dir='E'\r\n road=10\r\n else:\r\n dir=None\r\n road=0\r\n return dir, road",
"def stops_on_routes_with_direction():\n routes_and_stops = {}\n routes = ['102y', '102z', '104y', '104z', '111y', '111z', '114y', '114z', '116y', '116z', '118y', '11y', '11z', '120y', '120z', '122y', '122z', '123y', '123z', '130y', '130z', '13y', '13z', '140y', '140z', '142y', '142z', '145y', '145z', '14Cy', '14Cz', '14y', '14z', '150y', '150z', '151y', '151z', '15Ay', '15Az', '15By', '15Bz', '15y', '15z', '161y', '161z', '16Cy', '16Cz', '16y', '16z', '17Ay', '17Az', '17y', '17z', '184y', '184z', '185y', '185z', '18y', '18z', '1y', '1z', '220y', '220z', '236y', '236z', '238y', '238z', '239y', '239z', '25Ay', '25Az', '25By', '25Bz', '25Xy', '25Xz', '25y', '25z', '26y', '26z', '270y', '270z', '27Ay', '27Az', '27By', '27Bz', '27Xy', '27Xz', '27y', '27z', '29Ay', '29Az', '31Ay', '31Az', '31By', '31Bz', '31y', '31z', '32Ay', '32Az', '32By', '32Bz', '32Xy', '32Xz', '32y', '32z', '33Ay', '33Az', '33By', '33Bz', '33Xy', '33Xz', '33y', '33z', '37y', '37z', '38Ay', '38Az', '38By', '38Bz', '38y', '38z', '39Ay', '39Az', '39y', '39z', '40By', '40Bz', '40Dy', '40Dz', '40y', '40z', '41Ay', '41By', '41Bz', '41Cy', '41Cz', '41Xy', '41Xz', '41y', '41z', '42y', '42z', '43y', '43z', '44By', '44Bz', '44y', '44z', '45Ay', '45Az', '46Ay', '46Az', '46Ey', '47y', '47z', '49y', '49z', '4y', '4z', '51Dy', '51Dz', '51Xy', '53By', '53Bz', '53y', '53z', '54Ay', '54Az', '56Ay', '56Az', '59y', '59z', '61y', '61z', '63y', '63z', '65By', '65Bz', '65y', '65z', '66Ay', '66Az', '66By', '66Bz', '66Xy', '66Xz', '66y', '66z', '67Xy', '67Xz', '67y', '67z', '68Ay', '68Az', '68y', '68z', '69Xy', '69Xz', '69y', '69z', '70y', '70z', '747y', '747z', '75y', '75z', '76Ay', '76Az', '76y', '76z', '77Ay', '77Az', '79Ay', '79Az', '79y', '79z', '7By', '7Bz', '7Dy', '7Dz', '7y', '7z', '83Ay', '83Az', '83y', '83z', '84Ay', '84Az', '84Xy', '84Xz', '84y', '84z', '8y', '8z', '9y', '9z']\n for route in routes:\n routes_and_stops[route] = [] # new array value for each route key\n reader = csv.reader(open(\"../Data/Sorted Data/stopped_bus_data.csv\"))\n for line in reader:\n try:\n current_route = extract_route_and_direction(line[3])\n if int(line[13]) not in routes_and_stops[current_route]:\n routes_and_stops[current_route].append(int(line[13]))\n except:\n continue\n return routes_and_stops",
"def net_xy(street):\r\n\r\n # api-endpoint\r\n URL = \"https://ags.govmap.gov.il/Search/FreeSearch\"\r\n # headers\r\n headers = {\"Content-Type\": \"application/json\", \"charset\": \"utf-8\"}\r\n # location given here\r\n try:\r\n p = \"{\\\"keyword\\\": \\\"\" + street + \"\\\",\\\"LstResult\\\": null}\"\r\n PARAMS = p.encode(\"utf-8\")\r\n\r\n # sending get request and saving the response as response object\r\n r = requests.post(url=URL, data=PARAMS, headers=headers)\r\n\r\n # extracting data in json format\r\n data = r.json()\r\n\r\n # extracting latitude, longitude and formatted address\r\n # of the first matching location\r\n\r\n X = data['data']['Result'][0]['X']\r\n Y = data['data']['Result'][0]['Y']\r\n except Exception as e:\r\n print(e)\r\n # print('exception ddamammnnnnn')\r\n print(street)\r\n return 0,0\r\n return X,Y",
"def _parse_location(location_string):\n location_regex = r\"(\\d+)-(\\d+)(\\(+\\)|\\(-\\)|)\"\n match = re.match(location_regex, location_string.strip())\n start, end, strand = match.groups()\n return int(start), int(end), -1 if strand == \"(-)\" else 1",
"def nextSS(map_route, ss_def, contacts_def):\n map_index = []\n for i in range(0, len(map_route)):\n for j in range(0, 2):\n ti = map_route[i][j]\n if ti in map_index:\n pass\n else:\n map_index.append(ti)\n map_index.sort()\n\n temp_numcontacts = []\n for i in range(-1, 2, 2): # HARDCODE, next possible SSE two directions\n if i < 0:\n if map_index[0] > 0:\n next_sse_index = map_index[0] + i\n tnum = total_data(map_index, next_sse_index, ss_def, contacts_def)\n\n else:\n tnum = -1\n else:\n if map_index[-1] <= len(ss_def) - 1:\n next_sse_index = map_index[-1] + i\n tnum = total_data(map_index, next_sse_index, ss_def, contacts_def)\n\n else:\n tnum = -1\n\n temp_numcontacts.append(tnum)\n\n if temp_numcontacts[0] >= temp_numcontacts[1]:\n new_direction = 'left'\n ti = map_index[0] - 1\n tj = map_index[0]\n return ti, tj, new_direction\n else:\n new_direction = 'right'\n ti = map_index[-1]\n tj = map_index[-1] + 1\n return ti, tj, new_direction",
"def extract_route_and_direction(code):\n try:\n if int(code[-4:]): # testing if pattern ends in 4 digits, error here results in \"\" being returned\n if code[-4] == '1':\n return (code[:-4] + \"y\").lstrip(\n '0') # eliminates leading 0s (for routes containing letters eg 046A) and the trailing 3-digit code\n elif code[-4] == '0':\n return (code[:-4] + \"z\").lstrip('0')\n except:\n return \"\" # error handling picked in bus_routes() function, this will catch null values and journey ids in the incorrect format",
"def canon_station_name(s, line):\n s = s.strip()\n s = re.sub('^Heathrow$', 'Heathrow Terminals 1, 2, 3', s)\n s = re.sub('^Olympia$', 'Kensington (Olympia)', s)\n s = re.sub('^Warwick Ave$', 'Warwick Avenue', s)\n s = re.sub('^Camden$', 'Camden Town', s)\n s = re.sub('^Central$', 'Finchley Central', s) # They say \"Between Central and East Finchley\"\n s = re.sub('\\s*Platform \\d$', '', s)\n s = s + ' Station'\n s = s.replace('(Bakerloo)', 'Bakerloo').replace('Earls', 'Earl\\'s') \\\n .replace(' fast ', ' ') \\\n .replace('\\xe2\\x80\\x99', \"'\") \\\n .replace('St ', 'St. ') \\\n .replace('Elephant and Castle', 'Elephant & Castle') \\\n .replace('Lambeth Station', 'Lambeth North Station') \\\n .replace('Chalfont Station', 'Chalfont & Latimer Station') \\\n .replace('West Brompon', 'West Brompton') \\\n .replace('Picadilly Circus', 'Piccadilly Circus') \\\n .replace('High Barent', 'High Barnet') \\\n .replace('Bartnet', 'Barnet') \\\n .replace('Faringdon', 'Farringdon') \\\n .replace('Turnham Greens', 'Turnham Green') \\\n .replace('Ruilsip', 'Ruislip') \\\n .replace('Dagemham', 'Dagenham') \\\n .replace('Edgware Road (H & C)', 'Edgware Road Circle') \\\n .replace('Hammersmith (Circle and H&C)', 'Hammersmith') \\\n .replace('Shepherds Bush (Central Line)', \"Shepherd's Bush\") \\\n .replace('Terminals 123', 'Terminals 1, 2, 3').replace('Terminal 1,2,3', 'Terminals 1, 2, 3') \\\n .replace('Woodford Junction', 'Woodford') \\\n .replace(\"King's Cross Station\", \"King's Cross St. Pancras Station\") \\\n .replace(\"Kings Cross Station\", \"King's Cross St. Pancras Station\") \\\n .replace('Central Finchley', 'Finchley Central').replace('District and Picc', 'D & P') \\\n .replace('South Fields', 'Southfields') \\\n .replace('Regents Park', \"Regent's Park\") \\\n .replace('Bromley-by-Bow', \"Bromley-By-Bow\") \\\n .replace('Brent Oak', 'Burnt Oak') \\\n .replace('St. Johns Wood', \"St. John's Wood\") \\\n .replace('Totteridge and Whetstone', 'Totteridge & Whetstone') \\\n .replace('Newbury Park Loop', 'Newbury Park') \\\n .replace('Harrow-on-the-Hill', 'Harrow on the Hill')\n if s == 'Edgware Road Station' and line == 'B':\n s = 'Edgware Road Bakerloo Station'\n if s == 'Edgware Road Station' and line != 'B':\n s = 'Edgware Road Circle Station'\n return s",
"def _parse_location(self, response):\n if \"1700 S. Wentworth\" in response.text:\n return {\n \"address\": \"1700 S. Wentworth Avenue, Chicago, Illinois\",\n \"name\": \"Leonard M. Louie Fieldhouse\",\n }\n elif \"Zoom\" in response.text:\n return {\n \"address\": \"\",\n \"name\": \"Zoom\",\n }\n else:\n raise ValueError(\"Meeting address has changed\")",
"def process_capture_groups(group):\n group = group[0][0].replace(',', '')\n group = group.split(' ')\n street_type = group[-1]\n street_name = ' '.join(group[:-1])\n if street_type in STREET_ABBREVIATION_TO_NAME.keys():\n street_type = STREET_ABBREVIATION_TO_NAME[street_type]\n return street_name, street_type",
"def parseLAN(board, lan):\n\n # To parse LAN pawn moves like \"e2-e4\" as SAN moves, we have to remove a few\n # fields\n if len(lan) == 5:\n if \"x\" in lan:\n # e4xd5 -> exd5\n return parseSAN(board, lan[0] + lan[3:])\n else:\n # e2-e4 -> e4\n return parseSAN(board, lan[3:])\n\n # We want to use the SAN parser for LAN moves like \"Nb1-c3\" or \"Rd3xd7\"\n # The san parser should be able to handle most stuff, as long as we remove\n # the slash\n if not lan.upper().startswith(\"O-O\") and not lan.startswith(\"--\"):\n lan = lan.replace(\"-\", \"\")\n return parseSAN(board, lan)",
"def parse_location(location):\n city, state = location.strip().split(',')\n return f\"{city.strip().replace(' ', '-')}-{state.strip().replace(' ', '-')}\"",
"def valid_directions(x,y):\n North, South, West, East = walls(x,y)\n print ('You can travel: ',end='')\n if North:\n print ('(N)orth' ,end='')\n if East or West or South:\n print (' or' ,end=' ')\n if East:\n print ('(E)ast', end='')\n if West or South:\n print (' or' ,end=' ')\n if South:\n print('(S)outh', end='')\n if West:\n print (' or' ,end=' ')\n if West:\n print ('(W)est',end='')\n print('.')\n return North, South, West, East",
"def _isAddr(self, words, index):\n # Street-Number Street-Direction Street-Name Street-Type [, City, State, Postal]\n \n start = index\n addr = Address(words, index)\n if addr.is_addr():\n return addr.index - start\n else:\n return 0",
"def streettype(self):\n if self.index >= self.length:\n return False\n \n self._typ, n = self.parse_streettype()\n if self._typ is not None:\n self.idx_typ = self.index\n self.index += n\n if self.index < self.length and self.words[self.index]['word'] == '.':\n self.index += 1\n if self.index < self.length and self.words[self.index]['word'] == ',':\n self.index += 1\n if self._debug: print(\"TYP\", self._typ, self.idx_typ)\n self.isaddr = True\n return True\n return False",
"def abbr_2_st(state_series, abbr_2_st=True):\n us_state_abbrev = {\n 'Alabama': 'AL',\n 'Alaska': 'AK',\n 'American Samoa': 'AS',\n 'Arizona': 'AZ',\n 'Arkansas': 'AR',\n 'California': 'CA',\n 'Colorado': 'CO',\n 'Connecticut': 'CT',\n 'Delaware': 'DE',\n 'District of Columbia': 'DC',\n 'Florida': 'FL',\n 'Georgia': 'GA',\n 'Guam': 'GU',\n 'Hawaii': 'HI',\n 'Idaho': 'ID',\n 'Illinois': 'IL',\n 'Indiana': 'IN',\n 'Iowa': 'IA',\n 'Kansas': 'KS',\n 'Kentucky': 'KY',\n 'Louisiana': 'LA',\n 'Maine': 'ME',\n 'Maryland': 'MD',\n 'Massachusetts': 'MA',\n 'Michigan': 'MI',\n 'Minnesota': 'MN',\n 'Mississippi': 'MS',\n 'Missouri': 'MO',\n 'Montana': 'MT',\n 'Nebraska': 'NE',\n 'Nevada': 'NV',\n 'New Hampshire': 'NH',\n 'New Jersey': 'NJ',\n 'New Mexico': 'NM',\n 'New York': 'NY',\n 'North Carolina': 'NC',\n 'North Dakota': 'ND',\n 'Northern Mariana Islands':'MP',\n 'Ohio': 'OH',\n 'Oklahoma': 'OK',\n 'Oregon': 'OR',\n 'Pennsylvania': 'PA',\n 'Puerto Rico': 'PR',\n 'Rhode Island': 'RI',\n 'South Carolina': 'SC',\n 'South Dakota': 'SD',\n 'Tennessee': 'TN',\n 'Texas': 'TX',\n 'Utah': 'UT',\n 'Vermont': 'VT',\n 'Virgin Islands': 'VI',\n 'Virginia': 'VA',\n 'Washington': 'WA',\n 'West Virginia': 'WV',\n 'Wisconsin': 'WI',\n 'Wyoming': 'WY'\n}\n if abbr_2_st == True:\n inv_map = {v: k for k, v in us_state_abbrev.items()}\n full_names = []\n for abbv in state_series:\n full_names.append(inv_map[abbv])\n return full_names\n else:\n # Return Abbreviation\n abbvs = []\n for full_name in state_series:\n abbvs.append(us_state_abbrev[full_name])\n return abbvs",
"def street_address(full_address):\n full_address = normalise_address(full_address)\n capture_groups = ROAD_PATTERN.findall(full_address)\n if capture_groups:\n name, _type = process_capture_groups(capture_groups)\n return '{} {}'.format(name, _type)\n return ''",
"def travel(direction, x, y):\n x_new = x\n y_new = y\n for i in range(len(direction)):\n test = direction[i].lower()\n if test == 'n':\n y_new += 1\n elif test == 's':\n y_new -= 1\n elif test == 'e':\n x_new += 1\n elif test == 'w':\n x_new -= 1\n return (x_new, y_new)",
"def find_street(self, text):\n text = text.replace('\"', '')\n result = ''\n textarr = text.split()\n for i in range(0, len(textarr)):\n if textarr[i] in self.streets:\n # decide if the result is nearly empty or not to add the word with or without a space\n if len(result) < 2:\n result = result + textarr[i]\n else:\n result = result + ' ' + textarr[i]\n\n if result in self.streets:\n\n # ends the function if the street is found\n if (result + '\\n') in self.streets and result != 'straße' and result != 'in' and result != '' \\\n and result != 'ein' and result != 'der' and result != 'und' and result != ' ' \\\n and result != 'an':\n return result\n else:\n result_list = result.split()\n del result_list[0]\n result = ' '.join(w for w in result_list)\n result = result.lstrip(' ')\n # ends the function if the street is found\n if (result + '\\n') in self.streets and result != 'straße' and result != 'in' and result != '' \\\n and result != 'ein' and result != 'der' and result != 'und' and result != ' ' \\\n and result != 'ist' and result != 'an':\n return result\n elif i == len(textarr)-1:\n result_list = result.split()\n del result_list[0]\n result = ' '.join(w for w in result_list).lstrip(' ')\n if (result + '\\n') in self.streets and result != 'straße' and result != 'in' and result != '' \\\n and result != 'ein' and result != 'der' and result != 'und' and result != ' ' \\\n and result != 'ist' and result != 'an':\n return result\n print(result)\n return None",
"def __isDirection__(self, word):\n self.directions = ('north', 'south', 'east', 'west', 'down', 'up', 'left', 'right', 'back')\n for direction in self.directions:\n if direction == word:\n return ('direction', word), True\n return None, False"
] | [
"0.6338947",
"0.61096317",
"0.56044453",
"0.5496037",
"0.54842347",
"0.544372",
"0.53669584",
"0.53196144",
"0.53152376",
"0.5198874",
"0.51243407",
"0.51109093",
"0.5085592",
"0.50316674",
"0.500003",
"0.49935362",
"0.49368477",
"0.49091184",
"0.4898173",
"0.48960108",
"0.489498",
"0.48687926",
"0.4864183",
"0.48612142",
"0.48528805",
"0.4837379",
"0.47951686",
"0.47813696",
"0.4767056",
"0.47606006"
] | 0.7305631 | 0 |
Parse a Street Name | def parse_streetname(self):
index = self.index
name = ""
for i in range(4):
if index + i == self.length:
break
if self.words[index+i]['word'] == ',':
break
# Hack
if self.words[index+i]['word'] == 'doctor':
self.words[index+i]['word'] = 'drive'
break
try:
word = sttype[self.words[index+i]['word']]
break
except:
try:
word = vocab[self.words[index+i]['word']]
if Vocabulary.STREET_TYPE in word['tag']:
break
if name != '':
name += ' ' + word['lemma'][0]
else:
name = word['lemma'][0]
except:
if self.words[index+i]['word'][-2:] in [ 'th', 'st', 'nd', 'rd' ]:
name = self.words[index+i]['word'][:-2]
else:
self.index += i
_dir, _n = self.parse_streetdir()
self.index -= i
if _dir:
break
if name != '':
name += ' ' + self.words[index+i]['word']
else:
name = self.words[index+i]['word']
if i == 0 or i == 4:
return None, 0
else:
return name, i | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def street_parser(*street_data):\n\n # parsing tuples\n if len(street_data) == 2:\n if not isinstance(street_data[0], str) and not isinstance(street_data[1], str):\n raise WrongInput(\"Invalid format\")\n # street name as the tuple's first item\n strname, strnumber = street_data\n # street number as the tuple's first item\n if street_data[0][0] in digits:\n strname, strnumber = strnumber, strname\n\n # parsing strings\n else:\n if not isinstance(street_data[0], str):\n raise WrongInput(\"Invalid format\")\n if not street_data[0]:\n raise WrongInput(\"Input cannot be blank\")\n\n # string starting with street number\n if street_data[0][0] in digits:\n street_pattern = re.compile(r'''\n ^ # beginning of string\n (\\d+) # street number is any number of digits\n \\W+ # separator\n (\\w+\\W*\\w*\\W*) # street name is one or more words with optional separators\n $ # end of string\n ''', re.VERBOSE)\n street_obj = street_pattern.search(street_data[0])\n strnumber, strname = street_obj.groups()\n\n # string starting with street name\n else:\n street_pattern = re.compile(r'''\n ^ # beginning of string\n (\\w+\\W*\\w*\\s*) # street name is one or more words with optional separators\n \\W+ # separator\n (\\d+) # street number is any number of digits\n $ # end of string\n ''', re.VERBOSE)\n street_obj = street_pattern.search(street_data[0])\n (strname, strnumber) = street_obj.groups()\n\n # replace specific words in street name with their abbreviates\n strname = strname.lower()\n special = {r'\\baleje\\b': 'Al.', r'\\bavenue\\b': 'Av.', r'\\broad\\b': 'Rd.', r'\\bsquare\\b': 'Sq.',\n r'\\bstreet\\b': 'St.', r'\\bdrive\\b': 'Dr.'}\n for key in special:\n strname = re.sub(key, special[key], strname)\n return strname.title(), strnumber",
"def parse_address_street(address_str, address_zip_us_re, address_zip_us_lax_re):\n address = {}\n errors = []\n parts = address_str.split('$')\n if DEBUG:\n address['debug_address_str'] = address_str\n address['debug_part_1'] = parts[0]\n address['debug_part_last'] = parts[-1]\n address['debug_length'] = len(parts)\n #if len(parts) == 1:\n #print('cannot split: {}: {}'.format(debug_type, address_str))\n match = re.search(address_zip_us_re, parts[-1])\n if match:\n if DEBUG:\n address['debug_parser'] = 'A'\n address['city'] = match.group(1)\n address['region'] = match.group(2).upper()\n address['postalCode'] = match.group(3)\n address['countryId'] = 'US'\n if len(parts) == 2:\n if DEBUG:\n address['debug_parser'] = 'B'\n address['addressLine1'] = parts[0]\n else:\n if len(parts) == 3:\n if DEBUG:\n address['debug_parser'] = 'C'\n address['addressLine1'] = parts[0]\n if parts[0] != parts[1]:\n if DEBUG:\n address['debug_parser'] = 'D'\n address['addressLine2'] = parts[1]\n else:\n match2 = re.search(address_zip_us_lax_re, address_str)\n if match2:\n if DEBUG:\n address['debug_parser'] = 'E'\n address['region'] = match2.group(2).upper()\n address['postalCode'] = match2.group(3)\n address['countryId'] = 'US'\n # FIXME: Cannot reliably parse the remainder for city and street address\n errors.append('Partial parse street address: {}'.format(address_str))\n address['addressLine1'] = match2.group(1)\n else:\n # This is the remainder that we could not parse.\n # So just put it all into \"addressLine1\" to be manually adjusted later.\n if DEBUG:\n address['debug_parser'] = 'F'\n errors.append('Cannot parse street address: {}'.format(address_str))\n address['addressLine1'] = address_str\n return (address, errors)",
"def street_address(full_address):\n full_address = normalise_address(full_address)\n capture_groups = ROAD_PATTERN.findall(full_address)\n if capture_groups:\n name, _type = process_capture_groups(capture_groups)\n return '{} {}'.format(name, _type)\n return ''",
"def return_street(streetname):\r\n if streetname == None:\r\n return streetname\r\n if streetname.split(\" \")[-1] in valid_suffix:\r\n return \" \".join(str(streetname).split(\" \")[:-1])\r\n\r\n return streetname",
"def street_name(self):\n return self._street_name",
"def parse_name(first_name, last_name):\n\n return first_name + \" \" + last_name",
"def parse_address(self, address: str) -> Optional[Address]:\n raise NotImplemented",
"def parse_address(address: str) -> OrderedDict[str, str]:\n\n address = address.replace(\"\\n\", \", \")\n\n parsed_address, address_type = usaddress.tag(address)\n if address_type != \"Street Address\":\n logger.warning(\n f\"Couldn't parse address '{address}' of type {address_type}; best guess: {parsed_address}\"\n )\n\n # Fixup: At least one address has \"WI, USA\" in the \"StateName\" component.\n # Strip non-state components\n if parsed_address.get(\"StateName\"):\n parsed_address[\"StateName\"] = parsed_address[\"StateName\"].partition(\",\")[0]\n\n return parsed_address",
"def parse_building_address(addr_string):\n addr_string = re.sub(_regexp, '', addr_string)\n addr_string = re.sub(r'(?P<key>[a-zA-Z]+)', _replace_dir, addr_string)\n addr_string = re.sub(r'(?P<key>[a-zA-Z]+)', _replace_suffix, addr_string)\n addr_string = re.sub(_regexp_extra_space, ' ', addr_string)\n return addr_string.strip().upper()",
"def get_address(address: str) -> Tuple[str, str, str]:\n\n # Try to geocode the address as given\n g = geocoder.osm(address)\n\n if g.json is not None:\n\n # TODO this is inefficient and hacky\n\n # First thing we attempt if the result isn't complete is just to\n # add the housenumber (often the issue).\n if not good_geocoder_result(g.json):\n g.json['housenumber'] = usaddress.tag(address)[0]['AddressNumber']\n\n # If the result is now good, return it\n if good_geocoder_result(g.json):\n\n # Geocoding was successful. Return the result\n return (\n # First part is a nicely formatted address\n f\"{g.json['housenumber']} {g.json['street']}, {g.json['city']}, {g.json['state']} {g.json['postal']}\",\n # Second is the latitude\n g.json['lat'],\n # And third is the longitude\n g.json['lng']\n )\n\n # Geocoding was unsuccessful.\n # Let's try to create a cleaner address by first parsing out the pieces we need, then try again.\n \n # Parsing the address components...\n parsed, addr_type = usaddress.tag(address)\n if addr_type != \"Street Address\":\n raise ValueError(f\"Address could not be properly parsed. Resulting type: {addr_type}. Result: \\n{parsed}\")\n \n # Trim off any whitespace from the parsed components.\n for part in parsed:\n parsed[part] = parsed[part].strip()\n\n reqd_address_parts = ['AddressNumber', 'StreetName', 'PlaceName']\n if any(address_part not in parsed for address_part in reqd_address_parts):\n raise ValueError(f\"The address must have at least a house number, street, and city.\")\n \n # Initialize the resulting address string with the address number (aka house/street number)\n new_address = parsed['AddressNumber']\n \n # If the streetname is just a number, make it ordinal\n if parsed['StreetName'].isnumeric():\n parsed['StreetName'] = ordinal(parsed['StreetName'])\n \n # Get the whole street name\n for k, v in [(k, v) for k, v in parsed.items() if k.startswith(\"StreetName\")]:\n new_address += f\" {v}\"\n \n # Add the city...\n new_address += f\", {parsed['PlaceName']}\"\n # Add the state, if it exists\n if 'StateName' in parsed:\n new_address += f\", {parsed['StateName']}\"\n # And the zip code, if it exists\n if 'ZipCode' in parsed:\n new_address += f\" {parsed['ZipCode']}\"\n \n # Now try to geocode this improved address\n g = geocoder.osm(new_address)\n\n if g.json is not None:\n\n # Geocoding was successful. Return the result\n return (\n # First part is a nicely formatted address\n f\"{g.json['housenumber']} {g.json['street']}, {g.json['city']}, {g.json['state']} {g.json['postal']}\",\n # Second is the latitude\n g.json['lat'],\n # And third is the longitude\n g.json['lng']\n )\n \n # Still can't geocode the address. Throw an error\n else:\n raise ValueError(f\"Could not geocode this address: {address}\")",
"def street_name(self):\n return self._street_name",
"def process_capture_groups(group):\n group = group[0][0].replace(',', '')\n group = group.split(' ')\n street_type = group[-1]\n street_name = ' '.join(group[:-1])\n if street_type in STREET_ABBREVIATION_TO_NAME.keys():\n street_type = STREET_ABBREVIATION_TO_NAME[street_type]\n return street_name, street_type",
"def parse_address_campus(address_str, address_campus_re, address_campus_room_re):\n address = {}\n errors = []\n if '$' not in address_str:\n match = re.search(address_campus_room_re, address_str)\n if match:\n address['addressLine1'] = match.group(1)\n else:\n # This leftover is either an erroneous email address or a building name\n if '@' in address_str:\n errors.append('Campus address seems to be email: {}'.format(address_str))\n #FIXME: Should this be saved to addressLine1 anyway.\n else:\n # It seems to be a building address\n address['addressLine2'] = address_str\n else:\n match = re.search(address_campus_re, address_str)\n if match:\n address['addressLine2'] = match.group(1)\n address['addressLine1'] = match.group(2)\n #else:\n # FIXME: here just for debug\n #errors.append('Cannot parse campus address: {}'.format(address_str))\n return (address, errors)",
"def is_street_name(elem):\n\n return (elem.attrib[\"k\"] == \"addr:street\") or (elem.attrib[\"k\"] == \"addr:street_1\")",
"def street_address():\r\n\r\n return _random.choice(\r\n [\r\n '%d-%d %s' % (\r\n _random.randrange(999),\r\n _random.randrange(999),\r\n street_name()\r\n ),\r\n '%d %s' % (\r\n _random.randrange(999),\r\n street_name()\r\n ),\r\n '%s %d, %s' % (\r\n 'P.O. Box',\r\n _random.randrange(999),\r\n street_name()\r\n )\r\n ]\r\n )",
"def is_street_name(elem):\n return elem.attrib['k'] == \"addr:street\"",
"def clean_street(self):\n street = self.cleaned_data['street'].strip().title()\n street = re.sub(r'\\bRoad\\b', 'Rd', street)\n street = re.sub(r'\\bStreet\\b', 'Str', street)\n street = re.sub(r'\\bAvenue\\b', 'Ave', street)\n street = re.sub(r'\\bParkway\\b', 'Pkwy', street)\n street = re.sub(r'\\bSuite\\b', 'Ste', street)\n street = re.sub(r'\\bApartment\\b', 'Apt', street)\n street = re.sub(r'\\s+', ' ', street) # Remove runs of spaces\n return street",
"def parse_addr(addr):\n\ttry:\n\t\tnew_addr = socket.inet_aton(addr)\n\texcept:\n\t\taddr = socket.gethostbyname(addr)\n\t\ttry:\n\t\t\tnew_addr = socket.inet_aton(addr)\n\t\texcept ValueError:\n\t\t\tlogging.exception('Error:')\n\t\t\traise ValueError, 'Invalid address: %s' % addr\n\n\treturn new_addr",
"def extract_zipcode(full_address):\n full_address = full_address.strip()\n last_space_index = full_address.rindex(\" \")\n zipcode = full_address[last_space_index + 1 : ]\n return zipcode",
"def process_address(text):\n return sanitize(text[9:])",
"def is_street_name(elem):\n return (elem.attrib['k'] == \"addr:street\")",
"def valid_street_name(cls, new_street):\n if type(new_street) is str:\n return True\n # else\n return False",
"def street_address1(self) -> str:\n return pulumi.get(self, \"street_address1\")",
"def _parse_location(self, response):\n if \"1700 S. Wentworth\" in response.text:\n return {\n \"address\": \"1700 S. Wentworth Avenue, Chicago, Illinois\",\n \"name\": \"Leonard M. Louie Fieldhouse\",\n }\n elif \"Zoom\" in response.text:\n return {\n \"address\": \"\",\n \"name\": \"Zoom\",\n }\n else:\n raise ValueError(\"Meeting address has changed\")",
"def parse_phone(s):\n pattern = '''\n ^\\s* # Leading spaces\n (?P<areacode>\n \\d{3}-? # \"xxx\" or \"xxx-\"\n | \\(\\d{3}\\)\\s* # OR \"(xxx) \"\n )\n (?P<prefix>\\d{3}) # xxx\n -? # Dash (optional)\n (?P<suffix>\\d{4}) # xxxx\n \\s*$ # Trailing spaces\n '''\n matcher = re.compile(pattern, re.VERBOSE)\n matches = matcher.match(s)\n if matches is None:\n print(s)\n return s\n else:\n areacode = re.search('\\d{3}', matches.group ('areacode')).group()\n prefix = matches.group ('prefix')\n suffix = matches.group ('suffix')\n return areacode+'-'+prefix+'-'+suffix",
"def normalize_address(address):\n # Fix 'Place/Place' -> 'Place & Place'\n if re.findall(r'[a-zA-Z0-9]/[a-zA-Z0-9]', address):\n address = address.replace('/', ' & ')\n # Fix 'Place:Place' -> 'Place & Place'\n if re.findall(r'[a-zA-Z0-9]:[a-zA-Z0-9]', address):\n address = address.replace(':', ' & ')\n # Fix 'RD' -> 'Rd' & 'PK' -> 'Pk'\n if re.findall(r'[PRSA][KDTV]', address):\n address = re.sub(r'([PRSA][KDTV])', \\\n lambda x: x.group(0).title(), address)\n # Fix 'Bl' -> 'Blvd'\n if re.findall(r'(Bl)[\\ ]', address):\n address = address.replace('Bl', 'Blvd')\n # Fix 'w 156th' -> 'W 156th'\n if re.findall(r'[^a-zA-Z][wnse][/ ]', address):\n address = re.sub(r'[^a-zA-Z]([wnse])[/ ]', \\\n lambda x: x.group(0).upper(), address)\n # Fix '151 St' -> '151st St'\n if re.findall(r'[0-9][\\ ][SA][tv]', address):\n address = re.sub(r'[0-9]+', \\\n ordinal_conversion, address)\n return address",
"def extract_unit_num_street(self, address):\n\n unit = street_num = street = None\n\n address_parts = self.clean(address).split()\n\n try:\n street_num = int(address_parts[1])\n street = address_parts[2]\n unit = int(address_parts[0])\n\n except IndexError:\n raise InvalidAddress(address)\n\n except ValueError:\n\n try:\n street_num = int(address_parts[0])\n except ValueError:\n # Not even a number\n raise InvalidAddress(address)\n\n street = address_parts[1]\n\n return (unit, street_num, street)",
"def parse_address_from_geocoding_response(geocoded_data: dict) -> str:\n return geocoded_data[\n 'response'][\n 'GeoObjectCollection'][\n 'featureMember'][0][\n 'GeoObject'][\n 'metaDataProperty'][\n 'GeocoderMetaData'][\n 'text']",
"def street(self):\n if \"street\" in self._prop_dict:\n return self._prop_dict[\"street\"]\n else:\n return None",
"def parseName(self,event=None):\r\n txt = event.widget.get()\r\n\r\n resPhone = self.cPhone.search(txt)\r\n resEmail = self.cEmail.search(txt)\r\n resName = self.cName.search(txt)\r\n \r\n if resPhone!=None:\r\n self.phone.setVal(resPhone.group())\r\n\r\n if resEmail!=None:\r\n self.email.setVal(resEmail.group())\r\n\r\n if resName!=None:\r\n if resName.group('title')==None:\r\n self.title.setVal(\"Mr.\")\r\n else:\r\n self.title.setVal(resName.group('title'))\r\n self.first.setVal(resName.group('first'))\r\n if resName.group('middle')==None:\r\n self.middle.setVal(\"\")\r\n else:\r\n self.middle.setVal(resName.group('middle'))\r\n self.last.setVal(resName.group('last'))"
] | [
"0.73314106",
"0.66084766",
"0.6545703",
"0.6426236",
"0.62570745",
"0.61638844",
"0.6144636",
"0.6112116",
"0.6097728",
"0.6069476",
"0.60527843",
"0.60126626",
"0.5993045",
"0.5958215",
"0.5957148",
"0.5909725",
"0.5891397",
"0.58875704",
"0.58830124",
"0.58734214",
"0.58724535",
"0.58716965",
"0.5867482",
"0.58501774",
"0.58483565",
"0.5840585",
"0.574357",
"0.57036865",
"0.56956553",
"0.5694534"
] | 0.75930023 | 0 |
Parse a Street Type | def streettype(self):
if self.index >= self.length:
return False
self._typ, n = self.parse_streettype()
if self._typ is not None:
self.idx_typ = self.index
self.index += n
if self.index < self.length and self.words[self.index]['word'] == '.':
self.index += 1
if self.index < self.length and self.words[self.index]['word'] == ',':
self.index += 1
if self._debug: print("TYP", self._typ, self.idx_typ)
self.isaddr = True
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_streettype(self):\n \n\n try:\n word = sttype[self.words[self.index]['word']]\n if Vocabulary.STREET_TYPE in word['tag']:\n itag = word['tag'].index(Vocabulary.STREET_TYPE)\n lemma = word['lemma'][itag]\n return lemma, 1\n return None, 0\n except: return None, 0",
"def street_parser(*street_data):\n\n # parsing tuples\n if len(street_data) == 2:\n if not isinstance(street_data[0], str) and not isinstance(street_data[1], str):\n raise WrongInput(\"Invalid format\")\n # street name as the tuple's first item\n strname, strnumber = street_data\n # street number as the tuple's first item\n if street_data[0][0] in digits:\n strname, strnumber = strnumber, strname\n\n # parsing strings\n else:\n if not isinstance(street_data[0], str):\n raise WrongInput(\"Invalid format\")\n if not street_data[0]:\n raise WrongInput(\"Input cannot be blank\")\n\n # string starting with street number\n if street_data[0][0] in digits:\n street_pattern = re.compile(r'''\n ^ # beginning of string\n (\\d+) # street number is any number of digits\n \\W+ # separator\n (\\w+\\W*\\w*\\W*) # street name is one or more words with optional separators\n $ # end of string\n ''', re.VERBOSE)\n street_obj = street_pattern.search(street_data[0])\n strnumber, strname = street_obj.groups()\n\n # string starting with street name\n else:\n street_pattern = re.compile(r'''\n ^ # beginning of string\n (\\w+\\W*\\w*\\s*) # street name is one or more words with optional separators\n \\W+ # separator\n (\\d+) # street number is any number of digits\n $ # end of string\n ''', re.VERBOSE)\n street_obj = street_pattern.search(street_data[0])\n (strname, strnumber) = street_obj.groups()\n\n # replace specific words in street name with their abbreviates\n strname = strname.lower()\n special = {r'\\baleje\\b': 'Al.', r'\\bavenue\\b': 'Av.', r'\\broad\\b': 'Rd.', r'\\bsquare\\b': 'Sq.',\n r'\\bstreet\\b': 'St.', r'\\bdrive\\b': 'Dr.'}\n for key in special:\n strname = re.sub(key, special[key], strname)\n return strname.title(), strnumber",
"def process_capture_groups(group):\n group = group[0][0].replace(',', '')\n group = group.split(' ')\n street_type = group[-1]\n street_name = ' '.join(group[:-1])\n if street_type in STREET_ABBREVIATION_TO_NAME.keys():\n street_type = STREET_ABBREVIATION_TO_NAME[street_type]\n return street_name, street_type",
"def parse_address_street(address_str, address_zip_us_re, address_zip_us_lax_re):\n address = {}\n errors = []\n parts = address_str.split('$')\n if DEBUG:\n address['debug_address_str'] = address_str\n address['debug_part_1'] = parts[0]\n address['debug_part_last'] = parts[-1]\n address['debug_length'] = len(parts)\n #if len(parts) == 1:\n #print('cannot split: {}: {}'.format(debug_type, address_str))\n match = re.search(address_zip_us_re, parts[-1])\n if match:\n if DEBUG:\n address['debug_parser'] = 'A'\n address['city'] = match.group(1)\n address['region'] = match.group(2).upper()\n address['postalCode'] = match.group(3)\n address['countryId'] = 'US'\n if len(parts) == 2:\n if DEBUG:\n address['debug_parser'] = 'B'\n address['addressLine1'] = parts[0]\n else:\n if len(parts) == 3:\n if DEBUG:\n address['debug_parser'] = 'C'\n address['addressLine1'] = parts[0]\n if parts[0] != parts[1]:\n if DEBUG:\n address['debug_parser'] = 'D'\n address['addressLine2'] = parts[1]\n else:\n match2 = re.search(address_zip_us_lax_re, address_str)\n if match2:\n if DEBUG:\n address['debug_parser'] = 'E'\n address['region'] = match2.group(2).upper()\n address['postalCode'] = match2.group(3)\n address['countryId'] = 'US'\n # FIXME: Cannot reliably parse the remainder for city and street address\n errors.append('Partial parse street address: {}'.format(address_str))\n address['addressLine1'] = match2.group(1)\n else:\n # This is the remainder that we could not parse.\n # So just put it all into \"addressLine1\" to be manually adjusted later.\n if DEBUG:\n address['debug_parser'] = 'F'\n errors.append('Cannot parse street address: {}'.format(address_str))\n address['addressLine1'] = address_str\n return (address, errors)",
"def street_address(full_address):\n full_address = normalise_address(full_address)\n capture_groups = ROAD_PATTERN.findall(full_address)\n if capture_groups:\n name, _type = process_capture_groups(capture_groups)\n return '{} {}'.format(name, _type)\n return ''",
"def audit_street_type(street_types, street_name):\n m = street_type_re.search(street_name)\n word_list=street_name.split()\n n=capital.match(word_list[-1])\n if m:\n street_type = m.group()\n if street_type not in expected_types:\n try:\n if word_list[-2]==\"Avenue\" and n:\n pass\n else:\n street_types[street_type].add(street_name)\n except IndexError:\n pass",
"def extract_unit_num_street(self, address):\n\n unit = street_num = street = None\n\n address_parts = self.clean(address).split()\n\n try:\n street_num = int(address_parts[1])\n street = address_parts[2]\n unit = int(address_parts[0])\n\n except IndexError:\n raise InvalidAddress(address)\n\n except ValueError:\n\n try:\n street_num = int(address_parts[0])\n except ValueError:\n # Not even a number\n raise InvalidAddress(address)\n\n street = address_parts[1]\n\n return (unit, street_num, street)",
"def parse_streetname(self):\n index = self.index\n \n name = \"\"\n for i in range(4):\n if index + i == self.length:\n break\n if self.words[index+i]['word'] == ',':\n break\n # Hack\n if self.words[index+i]['word'] == 'doctor':\n self.words[index+i]['word'] = 'drive'\n break\n try:\n word = sttype[self.words[index+i]['word']]\n break\n except:\n try:\n word = vocab[self.words[index+i]['word']]\n if Vocabulary.STREET_TYPE in word['tag']:\n break\n if name != '':\n name += ' ' + word['lemma'][0]\n else:\n name = word['lemma'][0]\n except: \n if self.words[index+i]['word'][-2:] in [ 'th', 'st', 'nd', 'rd' ]:\n name = self.words[index+i]['word'][:-2]\n else:\n self.index += i\n _dir, _n = self.parse_streetdir()\n self.index -= i\n if _dir:\n break\n if name != '':\n name += ' ' + self.words[index+i]['word']\n else:\n name = self.words[index+i]['word']\n \n if i == 0 or i == 4:\n return None, 0\n else:\n return name, i",
"def street_type():\r\n return _random.choice(\r\n [\r\n \"Abbey\", \"Acres\", \"Allée\", \"Alley\", \"Autoroute\", \"Avenue\",\r\n \"Bay\", \"Beach\", \"Bend\", \"Boulevard\", \"By-pass\", \"Byway\",\r\n \"Campus\", \"Cape\", \"Carré\", \"Carrefour\", \"Centre\", \"Cercle\",\r\n \"Chase\", \"Chemin\", \"Circle\", \"Circuit\", \"Close\", \"Common\",\r\n \"Concession\", \"Corners\", \"Côte\", \"Cour\", \"Cours\", \"Court\",\r\n \"Cove\", \"Crescent\", \"Croissant\", \"Crossing\", \"Cul-de-sac\"\r\n \"Dale\", \"Dell\", \"Diversion\", \"Downs\", \"Drive\", \"Échangeur\",\r\n \"End\", \"Esplanade\", \"Estates\", \"Expressway\", \"Extension\",\r\n \"Farm\", \"Field\", \"Forest\", \"Freeway\", \"Front\", \"Gardens\",\r\n \"Gate\", \"Glade\", \"Glen\", \"Green\", \"Grounds\", \"Grove\",\r\n \"Harbour\", \"Heath\", \"Heights\", \"Highlands\", \"Highway\",\r\n \"Hill\", \"Hollow\", \"Île\", \"Impasse\", \"Inlet\", \"Island\",\r\n \"Key\", \"Knoll\", \"Landing\", \"Lane\", \"Limits\", \"Line\",\r\n \"Link\", \"Lookout\", \"Loop\", \"Mall\", \"Manor\", \"Maze\",\r\n \"Meadow\", \"Mews\", \"Montée\", \"Moor\", \"Mount\", \"Mountain\",\r\n \"Orchard\", \"Parade\", \"Parc\", \"Park\", \"Parkway\",\r\n \"Passage\", \"Path\", \"Pathway\", \"Pines\", \"Place\",\r\n \"Plateau\", \"Plaza\", \"Point\", \"Pointe\", \"Port\",\r\n \"Private\", \"Promenade\", \"Quai\", \"Quay\", \"Ramp\",\r\n \"Rang\", \"Range\", \"Ridge\", \"Rise\", \"Road\",\r\n \"Rond-point\" \"Route\", \"Row\", \"Rue\", \"Ruelle\",\r\n \"Run\", \"Sentier\", \"Square\", \"Street\", \"Subdivision\",\r\n \"Terrace\", \"Terrasse\", \"Thicket\", \"Towers\",\r\n \"Townline\", \"Trail\", \"Turnabout\", \"Vale\", \"Via\",\r\n \"View\", \"Village\", \"Villas\", \"Vista\", \"Voie\", \"Walk\",\r\n \"Way\", \"Wharf\", \"Wood\", \"Wynd\"\r\n ]\r\n )",
"def _address_type(self, address):\n parsed_type = None\n parsed = urlparse.urlparse(address)\n if parsed.scheme not in ('http', 'https', 'ipc', 'tcp'):\n raise ValueError('Invalid volttron central address.')\n\n return parsed.scheme",
"def parse_address(self, address: str) -> Optional[Address]:\n raise NotImplemented",
"def parse_address(address, sanity=True):\n address = address.split(':')\n address, port = ':'.join(address[:-1]), address[-1]\n\n guessed_type = 4\n if address.startswith('['):\n address = address[1:]\n guessed_type = 6\n if address.endswith(']') or (sanity and guessed_type == 6):\n if sanity:\n assert address.endswith(']')\n address = address[:-1]\n guessed_type = 6\n if address.count(':') > 3:\n if sanity:\n assert guessed_type == 6\n guessed_type = 6\n\n return address, int(port), guessed_type",
"def audit_street_type(street_types, street_name):\n # search for street name\n m = street_type_re.search(street_name)\n if m:\n street_type = m.group(2)\n if street_type.lower() not in expected:\n # street type unexpected, add to dictionary\n street_types[street_type].add(street_name)",
"def _parse_place_types(types_str, delimiter='|'):\n return [p.strip() for p in types_str.split(delimiter)\\\n if p.strip() in places.TYPES]",
"def address_type(self) -> str:\n return pulumi.get(self, \"address_type\")",
"def parse(self, src, line):\n r = line.split('\\t')\n p = {}\n if src == 'sf':\n p['businessID'] = r[0]\n p['name'] = r[1]\n p['address'] = r[2]\n p['city'] = r[3]\n p['state'] = r[4]\n p['zip'] = r[5]\n p['latitude'] = r[6]\n p['longitude'] = r[7]\n p['phone'] = r[8]\n elif src == 'nyc':\n p['businessID'] = r[0]\n p['name'] = r[1]\n # nyc separates the building number from the street name\n p['address'] = ' '.join([r[3].strip(), r[4].strip()])\n p['city'] = 'NYC'\n p['state'] = 'NY'\n p['zip'] = r[5]\n p['latitude'] = None\n p['longitude'] = None\n p['phone'] = r[6]\n return p",
"def parse_address(address: str) -> OrderedDict[str, str]:\n\n address = address.replace(\"\\n\", \", \")\n\n parsed_address, address_type = usaddress.tag(address)\n if address_type != \"Street Address\":\n logger.warning(\n f\"Couldn't parse address '{address}' of type {address_type}; best guess: {parsed_address}\"\n )\n\n # Fixup: At least one address has \"WI, USA\" in the \"StateName\" component.\n # Strip non-state components\n if parsed_address.get(\"StateName\"):\n parsed_address[\"StateName\"] = parsed_address[\"StateName\"].partition(\",\")[0]\n\n return parsed_address",
"def _parse_location(self, response):\n if \"1700 S. Wentworth\" in response.text:\n return {\n \"address\": \"1700 S. Wentworth Avenue, Chicago, Illinois\",\n \"name\": \"Leonard M. Louie Fieldhouse\",\n }\n elif \"Zoom\" in response.text:\n return {\n \"address\": \"\",\n \"name\": \"Zoom\",\n }\n else:\n raise ValueError(\"Meeting address has changed\")",
"def _get_address_type(self):\n return self.__address_type",
"def audit_street_type(street_types, street_name):\n\n match = street_type_re.search(street_name)\n if match:\n street_type = match.group()\n if street_type not in expected:\n street_types[street_type].add(street_name)",
"def get_address(address: str) -> Tuple[str, str, str]:\n\n # Try to geocode the address as given\n g = geocoder.osm(address)\n\n if g.json is not None:\n\n # TODO this is inefficient and hacky\n\n # First thing we attempt if the result isn't complete is just to\n # add the housenumber (often the issue).\n if not good_geocoder_result(g.json):\n g.json['housenumber'] = usaddress.tag(address)[0]['AddressNumber']\n\n # If the result is now good, return it\n if good_geocoder_result(g.json):\n\n # Geocoding was successful. Return the result\n return (\n # First part is a nicely formatted address\n f\"{g.json['housenumber']} {g.json['street']}, {g.json['city']}, {g.json['state']} {g.json['postal']}\",\n # Second is the latitude\n g.json['lat'],\n # And third is the longitude\n g.json['lng']\n )\n\n # Geocoding was unsuccessful.\n # Let's try to create a cleaner address by first parsing out the pieces we need, then try again.\n \n # Parsing the address components...\n parsed, addr_type = usaddress.tag(address)\n if addr_type != \"Street Address\":\n raise ValueError(f\"Address could not be properly parsed. Resulting type: {addr_type}. Result: \\n{parsed}\")\n \n # Trim off any whitespace from the parsed components.\n for part in parsed:\n parsed[part] = parsed[part].strip()\n\n reqd_address_parts = ['AddressNumber', 'StreetName', 'PlaceName']\n if any(address_part not in parsed for address_part in reqd_address_parts):\n raise ValueError(f\"The address must have at least a house number, street, and city.\")\n \n # Initialize the resulting address string with the address number (aka house/street number)\n new_address = parsed['AddressNumber']\n \n # If the streetname is just a number, make it ordinal\n if parsed['StreetName'].isnumeric():\n parsed['StreetName'] = ordinal(parsed['StreetName'])\n \n # Get the whole street name\n for k, v in [(k, v) for k, v in parsed.items() if k.startswith(\"StreetName\")]:\n new_address += f\" {v}\"\n \n # Add the city...\n new_address += f\", {parsed['PlaceName']}\"\n # Add the state, if it exists\n if 'StateName' in parsed:\n new_address += f\", {parsed['StateName']}\"\n # And the zip code, if it exists\n if 'ZipCode' in parsed:\n new_address += f\" {parsed['ZipCode']}\"\n \n # Now try to geocode this improved address\n g = geocoder.osm(new_address)\n\n if g.json is not None:\n\n # Geocoding was successful. Return the result\n return (\n # First part is a nicely formatted address\n f\"{g.json['housenumber']} {g.json['street']}, {g.json['city']}, {g.json['state']} {g.json['postal']}\",\n # Second is the latitude\n g.json['lat'],\n # And third is the longitude\n g.json['lng']\n )\n \n # Still can't geocode the address. Throw an error\n else:\n raise ValueError(f\"Could not geocode this address: {address}\")",
"def address_regex(self) -> Any:",
"def parse_address_campus(address_str, address_campus_re, address_campus_room_re):\n address = {}\n errors = []\n if '$' not in address_str:\n match = re.search(address_campus_room_re, address_str)\n if match:\n address['addressLine1'] = match.group(1)\n else:\n # This leftover is either an erroneous email address or a building name\n if '@' in address_str:\n errors.append('Campus address seems to be email: {}'.format(address_str))\n #FIXME: Should this be saved to addressLine1 anyway.\n else:\n # It seems to be a building address\n address['addressLine2'] = address_str\n else:\n match = re.search(address_campus_re, address_str)\n if match:\n address['addressLine2'] = match.group(1)\n address['addressLine1'] = match.group(2)\n #else:\n # FIXME: here just for debug\n #errors.append('Cannot parse campus address: {}'.format(address_str))\n return (address, errors)",
"def __parse__(self, filter):\n \n if filter == 'zipcode':\n # Return 5 digit zip or, if applicable, Concatenate 5 digit and \n # 4 digit zipcode\n if self.data['Mailing Zip 4']:\n return \"%s-%s\" %(str(self.data['Mailing Zip Code'])[:-2],\n str(self.data['Mailing Zip 4'])[:-2]\n )\n else:\n return str(self.data['Mailing Zip Code'])[:-2]\n elif filter == 'employee_count':\n # Convert employee count string to digit\n pattern = '.+to\\s([0-9]+)'\n try:\n return re.findall(\n pattern, self.data['Location Employee Size Range'])[0]\n except IndexError:\n pass\n elif filter == 'phone':\n # Regex phone number digits and concatenate\n number = ''.join(re.findall('[0-9]+', \n self.data['Phone Number Combined']))\n return number if len(number) == 10 else 0",
"def street_type():\r\n\r\n cursor.execute('SELECT * FROM street_types \\\r\n order by RANDOM() limit 1;')\r\n return cursor.fetchone()[0]",
"def parse_building_address(addr_string):\n addr_string = re.sub(_regexp, '', addr_string)\n addr_string = re.sub(r'(?P<key>[a-zA-Z]+)', _replace_dir, addr_string)\n addr_string = re.sub(r'(?P<key>[a-zA-Z]+)', _replace_suffix, addr_string)\n addr_string = re.sub(_regexp_extra_space, ' ', addr_string)\n return addr_string.strip().upper()",
"def phone_parser(phone, mode='PL'):\n\n if not phone:\n raise WrongInput(\"Input cannot be blank\")\n if not isinstance(phone, str):\n raise WrongInput(\"Invalid phone format\")\n\n if mode == 'PL':\n gsm_prefixes = ['50', '51', '53', '57', '60', '66', '69', '72', '73', '78', '79', '88']\n if phone[:2] in gsm_prefixes:\n phone_pattern = re.compile(r'''\n # don't match beginning of string\n (\\d{0,2}) # area code of 2 digits (e.g. '42')\n \\D* # optional separator\n (\\d{3}\\D*\\d{3}\\D*\\d{3}) # rest of number - divide into 3 3-digit sequences with optional separators\n # (e.g. '605-789-567')\n $ # end of string\n ''', re.VERBOSE)\n else:\n phone_pattern = re.compile(r'''\n # don't match beginning of string\n (\\d{0,2}) # area code of 2 digits (e.g. '42')\n \\D* # optional separator\n (\\d{3}\\D*\\d{2}\\D*\\d{2}) # rest of number - divide into 3 2-digit sequences with optional separators\n # (e.g. '605-78-56')\n $ # end of string\n ''', re.VERBOSE)\n else:\n phone_pattern = re.compile(r'''\n # don't match the beginning of the string\n (\\d{3}) # area code of 3 digits (e.g. '800')\n \\D* # optional separator\n (\\d{3}\\D*\\d{4}\\D*\\d+) # rest of number - divide into 3 sequences with optional separators: two obligatory\n # with 3 and 4 digits, one optional with any number of digits\n $ # end of string\n ''', re.VERBOSE)\n if not re.search(phone_pattern, phone):\n raise WrongInput(\"Invalid phone format.\")\n\n phone_obj = phone_pattern.search(phone)\n phone_area, phone_num = phone_obj.groups()\n phone = re.sub(r'\\D', '', phone_num)\n return phone, phone_area, phone_num",
"def postcode(full_address):\n return capture_address_element(POSTCODE_PATTERN, full_address)",
"def update_street_name(name, mapping):\r\n m = street_type_re.search(name)\r\n if m:\r\n street_type = m.group()\r\n if street_type in list(mapping.keys()):\r\n better_street_type = mapping[street_type]\r\n name = street_type_re.sub(better_street_type, name)\r\n return name",
"def parse_phone(s):\n pattern = '''\n ^\\s* # Leading spaces\n (?P<areacode>\n \\d{3}-? # \"xxx\" or \"xxx-\"\n | \\(\\d{3}\\)\\s* # OR \"(xxx) \"\n )\n (?P<prefix>\\d{3}) # xxx\n -? # Dash (optional)\n (?P<suffix>\\d{4}) # xxxx\n \\s*$ # Trailing spaces\n '''\n matcher = re.compile(pattern, re.VERBOSE)\n matches = matcher.match(s)\n if matches is None:\n print(s)\n return s\n else:\n areacode = re.search('\\d{3}', matches.group ('areacode')).group()\n prefix = matches.group ('prefix')\n suffix = matches.group ('suffix')\n return areacode+'-'+prefix+'-'+suffix"
] | [
"0.7352092",
"0.646472",
"0.63622284",
"0.6181158",
"0.60935646",
"0.6071878",
"0.6047718",
"0.59844154",
"0.5948823",
"0.57437384",
"0.56188196",
"0.5609972",
"0.55859494",
"0.55766195",
"0.5505224",
"0.5481181",
"0.54733306",
"0.5466849",
"0.5461348",
"0.5458719",
"0.54111236",
"0.5364625",
"0.53318197",
"0.5296516",
"0.52675307",
"0.5222905",
"0.5216094",
"0.5212568",
"0.51949155",
"0.51902467"
] | 0.66800004 | 1 |
Parse a Street Type | def parse_streettype(self):
try:
word = sttype[self.words[self.index]['word']]
if Vocabulary.STREET_TYPE in word['tag']:
itag = word['tag'].index(Vocabulary.STREET_TYPE)
lemma = word['lemma'][itag]
return lemma, 1
return None, 0
except: return None, 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def streettype(self):\n if self.index >= self.length:\n return False\n \n self._typ, n = self.parse_streettype()\n if self._typ is not None:\n self.idx_typ = self.index\n self.index += n\n if self.index < self.length and self.words[self.index]['word'] == '.':\n self.index += 1\n if self.index < self.length and self.words[self.index]['word'] == ',':\n self.index += 1\n if self._debug: print(\"TYP\", self._typ, self.idx_typ)\n self.isaddr = True\n return True\n return False",
"def street_parser(*street_data):\n\n # parsing tuples\n if len(street_data) == 2:\n if not isinstance(street_data[0], str) and not isinstance(street_data[1], str):\n raise WrongInput(\"Invalid format\")\n # street name as the tuple's first item\n strname, strnumber = street_data\n # street number as the tuple's first item\n if street_data[0][0] in digits:\n strname, strnumber = strnumber, strname\n\n # parsing strings\n else:\n if not isinstance(street_data[0], str):\n raise WrongInput(\"Invalid format\")\n if not street_data[0]:\n raise WrongInput(\"Input cannot be blank\")\n\n # string starting with street number\n if street_data[0][0] in digits:\n street_pattern = re.compile(r'''\n ^ # beginning of string\n (\\d+) # street number is any number of digits\n \\W+ # separator\n (\\w+\\W*\\w*\\W*) # street name is one or more words with optional separators\n $ # end of string\n ''', re.VERBOSE)\n street_obj = street_pattern.search(street_data[0])\n strnumber, strname = street_obj.groups()\n\n # string starting with street name\n else:\n street_pattern = re.compile(r'''\n ^ # beginning of string\n (\\w+\\W*\\w*\\s*) # street name is one or more words with optional separators\n \\W+ # separator\n (\\d+) # street number is any number of digits\n $ # end of string\n ''', re.VERBOSE)\n street_obj = street_pattern.search(street_data[0])\n (strname, strnumber) = street_obj.groups()\n\n # replace specific words in street name with their abbreviates\n strname = strname.lower()\n special = {r'\\baleje\\b': 'Al.', r'\\bavenue\\b': 'Av.', r'\\broad\\b': 'Rd.', r'\\bsquare\\b': 'Sq.',\n r'\\bstreet\\b': 'St.', r'\\bdrive\\b': 'Dr.'}\n for key in special:\n strname = re.sub(key, special[key], strname)\n return strname.title(), strnumber",
"def process_capture_groups(group):\n group = group[0][0].replace(',', '')\n group = group.split(' ')\n street_type = group[-1]\n street_name = ' '.join(group[:-1])\n if street_type in STREET_ABBREVIATION_TO_NAME.keys():\n street_type = STREET_ABBREVIATION_TO_NAME[street_type]\n return street_name, street_type",
"def parse_address_street(address_str, address_zip_us_re, address_zip_us_lax_re):\n address = {}\n errors = []\n parts = address_str.split('$')\n if DEBUG:\n address['debug_address_str'] = address_str\n address['debug_part_1'] = parts[0]\n address['debug_part_last'] = parts[-1]\n address['debug_length'] = len(parts)\n #if len(parts) == 1:\n #print('cannot split: {}: {}'.format(debug_type, address_str))\n match = re.search(address_zip_us_re, parts[-1])\n if match:\n if DEBUG:\n address['debug_parser'] = 'A'\n address['city'] = match.group(1)\n address['region'] = match.group(2).upper()\n address['postalCode'] = match.group(3)\n address['countryId'] = 'US'\n if len(parts) == 2:\n if DEBUG:\n address['debug_parser'] = 'B'\n address['addressLine1'] = parts[0]\n else:\n if len(parts) == 3:\n if DEBUG:\n address['debug_parser'] = 'C'\n address['addressLine1'] = parts[0]\n if parts[0] != parts[1]:\n if DEBUG:\n address['debug_parser'] = 'D'\n address['addressLine2'] = parts[1]\n else:\n match2 = re.search(address_zip_us_lax_re, address_str)\n if match2:\n if DEBUG:\n address['debug_parser'] = 'E'\n address['region'] = match2.group(2).upper()\n address['postalCode'] = match2.group(3)\n address['countryId'] = 'US'\n # FIXME: Cannot reliably parse the remainder for city and street address\n errors.append('Partial parse street address: {}'.format(address_str))\n address['addressLine1'] = match2.group(1)\n else:\n # This is the remainder that we could not parse.\n # So just put it all into \"addressLine1\" to be manually adjusted later.\n if DEBUG:\n address['debug_parser'] = 'F'\n errors.append('Cannot parse street address: {}'.format(address_str))\n address['addressLine1'] = address_str\n return (address, errors)",
"def street_address(full_address):\n full_address = normalise_address(full_address)\n capture_groups = ROAD_PATTERN.findall(full_address)\n if capture_groups:\n name, _type = process_capture_groups(capture_groups)\n return '{} {}'.format(name, _type)\n return ''",
"def audit_street_type(street_types, street_name):\n m = street_type_re.search(street_name)\n word_list=street_name.split()\n n=capital.match(word_list[-1])\n if m:\n street_type = m.group()\n if street_type not in expected_types:\n try:\n if word_list[-2]==\"Avenue\" and n:\n pass\n else:\n street_types[street_type].add(street_name)\n except IndexError:\n pass",
"def extract_unit_num_street(self, address):\n\n unit = street_num = street = None\n\n address_parts = self.clean(address).split()\n\n try:\n street_num = int(address_parts[1])\n street = address_parts[2]\n unit = int(address_parts[0])\n\n except IndexError:\n raise InvalidAddress(address)\n\n except ValueError:\n\n try:\n street_num = int(address_parts[0])\n except ValueError:\n # Not even a number\n raise InvalidAddress(address)\n\n street = address_parts[1]\n\n return (unit, street_num, street)",
"def parse_streetname(self):\n index = self.index\n \n name = \"\"\n for i in range(4):\n if index + i == self.length:\n break\n if self.words[index+i]['word'] == ',':\n break\n # Hack\n if self.words[index+i]['word'] == 'doctor':\n self.words[index+i]['word'] = 'drive'\n break\n try:\n word = sttype[self.words[index+i]['word']]\n break\n except:\n try:\n word = vocab[self.words[index+i]['word']]\n if Vocabulary.STREET_TYPE in word['tag']:\n break\n if name != '':\n name += ' ' + word['lemma'][0]\n else:\n name = word['lemma'][0]\n except: \n if self.words[index+i]['word'][-2:] in [ 'th', 'st', 'nd', 'rd' ]:\n name = self.words[index+i]['word'][:-2]\n else:\n self.index += i\n _dir, _n = self.parse_streetdir()\n self.index -= i\n if _dir:\n break\n if name != '':\n name += ' ' + self.words[index+i]['word']\n else:\n name = self.words[index+i]['word']\n \n if i == 0 or i == 4:\n return None, 0\n else:\n return name, i",
"def street_type():\r\n return _random.choice(\r\n [\r\n \"Abbey\", \"Acres\", \"Allée\", \"Alley\", \"Autoroute\", \"Avenue\",\r\n \"Bay\", \"Beach\", \"Bend\", \"Boulevard\", \"By-pass\", \"Byway\",\r\n \"Campus\", \"Cape\", \"Carré\", \"Carrefour\", \"Centre\", \"Cercle\",\r\n \"Chase\", \"Chemin\", \"Circle\", \"Circuit\", \"Close\", \"Common\",\r\n \"Concession\", \"Corners\", \"Côte\", \"Cour\", \"Cours\", \"Court\",\r\n \"Cove\", \"Crescent\", \"Croissant\", \"Crossing\", \"Cul-de-sac\"\r\n \"Dale\", \"Dell\", \"Diversion\", \"Downs\", \"Drive\", \"Échangeur\",\r\n \"End\", \"Esplanade\", \"Estates\", \"Expressway\", \"Extension\",\r\n \"Farm\", \"Field\", \"Forest\", \"Freeway\", \"Front\", \"Gardens\",\r\n \"Gate\", \"Glade\", \"Glen\", \"Green\", \"Grounds\", \"Grove\",\r\n \"Harbour\", \"Heath\", \"Heights\", \"Highlands\", \"Highway\",\r\n \"Hill\", \"Hollow\", \"Île\", \"Impasse\", \"Inlet\", \"Island\",\r\n \"Key\", \"Knoll\", \"Landing\", \"Lane\", \"Limits\", \"Line\",\r\n \"Link\", \"Lookout\", \"Loop\", \"Mall\", \"Manor\", \"Maze\",\r\n \"Meadow\", \"Mews\", \"Montée\", \"Moor\", \"Mount\", \"Mountain\",\r\n \"Orchard\", \"Parade\", \"Parc\", \"Park\", \"Parkway\",\r\n \"Passage\", \"Path\", \"Pathway\", \"Pines\", \"Place\",\r\n \"Plateau\", \"Plaza\", \"Point\", \"Pointe\", \"Port\",\r\n \"Private\", \"Promenade\", \"Quai\", \"Quay\", \"Ramp\",\r\n \"Rang\", \"Range\", \"Ridge\", \"Rise\", \"Road\",\r\n \"Rond-point\" \"Route\", \"Row\", \"Rue\", \"Ruelle\",\r\n \"Run\", \"Sentier\", \"Square\", \"Street\", \"Subdivision\",\r\n \"Terrace\", \"Terrasse\", \"Thicket\", \"Towers\",\r\n \"Townline\", \"Trail\", \"Turnabout\", \"Vale\", \"Via\",\r\n \"View\", \"Village\", \"Villas\", \"Vista\", \"Voie\", \"Walk\",\r\n \"Way\", \"Wharf\", \"Wood\", \"Wynd\"\r\n ]\r\n )",
"def _address_type(self, address):\n parsed_type = None\n parsed = urlparse.urlparse(address)\n if parsed.scheme not in ('http', 'https', 'ipc', 'tcp'):\n raise ValueError('Invalid volttron central address.')\n\n return parsed.scheme",
"def parse_address(self, address: str) -> Optional[Address]:\n raise NotImplemented",
"def parse_address(address, sanity=True):\n address = address.split(':')\n address, port = ':'.join(address[:-1]), address[-1]\n\n guessed_type = 4\n if address.startswith('['):\n address = address[1:]\n guessed_type = 6\n if address.endswith(']') or (sanity and guessed_type == 6):\n if sanity:\n assert address.endswith(']')\n address = address[:-1]\n guessed_type = 6\n if address.count(':') > 3:\n if sanity:\n assert guessed_type == 6\n guessed_type = 6\n\n return address, int(port), guessed_type",
"def audit_street_type(street_types, street_name):\n # search for street name\n m = street_type_re.search(street_name)\n if m:\n street_type = m.group(2)\n if street_type.lower() not in expected:\n # street type unexpected, add to dictionary\n street_types[street_type].add(street_name)",
"def _parse_place_types(types_str, delimiter='|'):\n return [p.strip() for p in types_str.split(delimiter)\\\n if p.strip() in places.TYPES]",
"def address_type(self) -> str:\n return pulumi.get(self, \"address_type\")",
"def parse(self, src, line):\n r = line.split('\\t')\n p = {}\n if src == 'sf':\n p['businessID'] = r[0]\n p['name'] = r[1]\n p['address'] = r[2]\n p['city'] = r[3]\n p['state'] = r[4]\n p['zip'] = r[5]\n p['latitude'] = r[6]\n p['longitude'] = r[7]\n p['phone'] = r[8]\n elif src == 'nyc':\n p['businessID'] = r[0]\n p['name'] = r[1]\n # nyc separates the building number from the street name\n p['address'] = ' '.join([r[3].strip(), r[4].strip()])\n p['city'] = 'NYC'\n p['state'] = 'NY'\n p['zip'] = r[5]\n p['latitude'] = None\n p['longitude'] = None\n p['phone'] = r[6]\n return p",
"def parse_address(address: str) -> OrderedDict[str, str]:\n\n address = address.replace(\"\\n\", \", \")\n\n parsed_address, address_type = usaddress.tag(address)\n if address_type != \"Street Address\":\n logger.warning(\n f\"Couldn't parse address '{address}' of type {address_type}; best guess: {parsed_address}\"\n )\n\n # Fixup: At least one address has \"WI, USA\" in the \"StateName\" component.\n # Strip non-state components\n if parsed_address.get(\"StateName\"):\n parsed_address[\"StateName\"] = parsed_address[\"StateName\"].partition(\",\")[0]\n\n return parsed_address",
"def _parse_location(self, response):\n if \"1700 S. Wentworth\" in response.text:\n return {\n \"address\": \"1700 S. Wentworth Avenue, Chicago, Illinois\",\n \"name\": \"Leonard M. Louie Fieldhouse\",\n }\n elif \"Zoom\" in response.text:\n return {\n \"address\": \"\",\n \"name\": \"Zoom\",\n }\n else:\n raise ValueError(\"Meeting address has changed\")",
"def _get_address_type(self):\n return self.__address_type",
"def audit_street_type(street_types, street_name):\n\n match = street_type_re.search(street_name)\n if match:\n street_type = match.group()\n if street_type not in expected:\n street_types[street_type].add(street_name)",
"def get_address(address: str) -> Tuple[str, str, str]:\n\n # Try to geocode the address as given\n g = geocoder.osm(address)\n\n if g.json is not None:\n\n # TODO this is inefficient and hacky\n\n # First thing we attempt if the result isn't complete is just to\n # add the housenumber (often the issue).\n if not good_geocoder_result(g.json):\n g.json['housenumber'] = usaddress.tag(address)[0]['AddressNumber']\n\n # If the result is now good, return it\n if good_geocoder_result(g.json):\n\n # Geocoding was successful. Return the result\n return (\n # First part is a nicely formatted address\n f\"{g.json['housenumber']} {g.json['street']}, {g.json['city']}, {g.json['state']} {g.json['postal']}\",\n # Second is the latitude\n g.json['lat'],\n # And third is the longitude\n g.json['lng']\n )\n\n # Geocoding was unsuccessful.\n # Let's try to create a cleaner address by first parsing out the pieces we need, then try again.\n \n # Parsing the address components...\n parsed, addr_type = usaddress.tag(address)\n if addr_type != \"Street Address\":\n raise ValueError(f\"Address could not be properly parsed. Resulting type: {addr_type}. Result: \\n{parsed}\")\n \n # Trim off any whitespace from the parsed components.\n for part in parsed:\n parsed[part] = parsed[part].strip()\n\n reqd_address_parts = ['AddressNumber', 'StreetName', 'PlaceName']\n if any(address_part not in parsed for address_part in reqd_address_parts):\n raise ValueError(f\"The address must have at least a house number, street, and city.\")\n \n # Initialize the resulting address string with the address number (aka house/street number)\n new_address = parsed['AddressNumber']\n \n # If the streetname is just a number, make it ordinal\n if parsed['StreetName'].isnumeric():\n parsed['StreetName'] = ordinal(parsed['StreetName'])\n \n # Get the whole street name\n for k, v in [(k, v) for k, v in parsed.items() if k.startswith(\"StreetName\")]:\n new_address += f\" {v}\"\n \n # Add the city...\n new_address += f\", {parsed['PlaceName']}\"\n # Add the state, if it exists\n if 'StateName' in parsed:\n new_address += f\", {parsed['StateName']}\"\n # And the zip code, if it exists\n if 'ZipCode' in parsed:\n new_address += f\" {parsed['ZipCode']}\"\n \n # Now try to geocode this improved address\n g = geocoder.osm(new_address)\n\n if g.json is not None:\n\n # Geocoding was successful. Return the result\n return (\n # First part is a nicely formatted address\n f\"{g.json['housenumber']} {g.json['street']}, {g.json['city']}, {g.json['state']} {g.json['postal']}\",\n # Second is the latitude\n g.json['lat'],\n # And third is the longitude\n g.json['lng']\n )\n \n # Still can't geocode the address. Throw an error\n else:\n raise ValueError(f\"Could not geocode this address: {address}\")",
"def address_regex(self) -> Any:",
"def parse_address_campus(address_str, address_campus_re, address_campus_room_re):\n address = {}\n errors = []\n if '$' not in address_str:\n match = re.search(address_campus_room_re, address_str)\n if match:\n address['addressLine1'] = match.group(1)\n else:\n # This leftover is either an erroneous email address or a building name\n if '@' in address_str:\n errors.append('Campus address seems to be email: {}'.format(address_str))\n #FIXME: Should this be saved to addressLine1 anyway.\n else:\n # It seems to be a building address\n address['addressLine2'] = address_str\n else:\n match = re.search(address_campus_re, address_str)\n if match:\n address['addressLine2'] = match.group(1)\n address['addressLine1'] = match.group(2)\n #else:\n # FIXME: here just for debug\n #errors.append('Cannot parse campus address: {}'.format(address_str))\n return (address, errors)",
"def __parse__(self, filter):\n \n if filter == 'zipcode':\n # Return 5 digit zip or, if applicable, Concatenate 5 digit and \n # 4 digit zipcode\n if self.data['Mailing Zip 4']:\n return \"%s-%s\" %(str(self.data['Mailing Zip Code'])[:-2],\n str(self.data['Mailing Zip 4'])[:-2]\n )\n else:\n return str(self.data['Mailing Zip Code'])[:-2]\n elif filter == 'employee_count':\n # Convert employee count string to digit\n pattern = '.+to\\s([0-9]+)'\n try:\n return re.findall(\n pattern, self.data['Location Employee Size Range'])[0]\n except IndexError:\n pass\n elif filter == 'phone':\n # Regex phone number digits and concatenate\n number = ''.join(re.findall('[0-9]+', \n self.data['Phone Number Combined']))\n return number if len(number) == 10 else 0",
"def street_type():\r\n\r\n cursor.execute('SELECT * FROM street_types \\\r\n order by RANDOM() limit 1;')\r\n return cursor.fetchone()[0]",
"def parse_building_address(addr_string):\n addr_string = re.sub(_regexp, '', addr_string)\n addr_string = re.sub(r'(?P<key>[a-zA-Z]+)', _replace_dir, addr_string)\n addr_string = re.sub(r'(?P<key>[a-zA-Z]+)', _replace_suffix, addr_string)\n addr_string = re.sub(_regexp_extra_space, ' ', addr_string)\n return addr_string.strip().upper()",
"def phone_parser(phone, mode='PL'):\n\n if not phone:\n raise WrongInput(\"Input cannot be blank\")\n if not isinstance(phone, str):\n raise WrongInput(\"Invalid phone format\")\n\n if mode == 'PL':\n gsm_prefixes = ['50', '51', '53', '57', '60', '66', '69', '72', '73', '78', '79', '88']\n if phone[:2] in gsm_prefixes:\n phone_pattern = re.compile(r'''\n # don't match beginning of string\n (\\d{0,2}) # area code of 2 digits (e.g. '42')\n \\D* # optional separator\n (\\d{3}\\D*\\d{3}\\D*\\d{3}) # rest of number - divide into 3 3-digit sequences with optional separators\n # (e.g. '605-789-567')\n $ # end of string\n ''', re.VERBOSE)\n else:\n phone_pattern = re.compile(r'''\n # don't match beginning of string\n (\\d{0,2}) # area code of 2 digits (e.g. '42')\n \\D* # optional separator\n (\\d{3}\\D*\\d{2}\\D*\\d{2}) # rest of number - divide into 3 2-digit sequences with optional separators\n # (e.g. '605-78-56')\n $ # end of string\n ''', re.VERBOSE)\n else:\n phone_pattern = re.compile(r'''\n # don't match the beginning of the string\n (\\d{3}) # area code of 3 digits (e.g. '800')\n \\D* # optional separator\n (\\d{3}\\D*\\d{4}\\D*\\d+) # rest of number - divide into 3 sequences with optional separators: two obligatory\n # with 3 and 4 digits, one optional with any number of digits\n $ # end of string\n ''', re.VERBOSE)\n if not re.search(phone_pattern, phone):\n raise WrongInput(\"Invalid phone format.\")\n\n phone_obj = phone_pattern.search(phone)\n phone_area, phone_num = phone_obj.groups()\n phone = re.sub(r'\\D', '', phone_num)\n return phone, phone_area, phone_num",
"def postcode(full_address):\n return capture_address_element(POSTCODE_PATTERN, full_address)",
"def update_street_name(name, mapping):\r\n m = street_type_re.search(name)\r\n if m:\r\n street_type = m.group()\r\n if street_type in list(mapping.keys()):\r\n better_street_type = mapping[street_type]\r\n name = street_type_re.sub(better_street_type, name)\r\n return name",
"def parse_phone(s):\n pattern = '''\n ^\\s* # Leading spaces\n (?P<areacode>\n \\d{3}-? # \"xxx\" or \"xxx-\"\n | \\(\\d{3}\\)\\s* # OR \"(xxx) \"\n )\n (?P<prefix>\\d{3}) # xxx\n -? # Dash (optional)\n (?P<suffix>\\d{4}) # xxxx\n \\s*$ # Trailing spaces\n '''\n matcher = re.compile(pattern, re.VERBOSE)\n matches = matcher.match(s)\n if matches is None:\n print(s)\n return s\n else:\n areacode = re.search('\\d{3}', matches.group ('areacode')).group()\n prefix = matches.group ('prefix')\n suffix = matches.group ('suffix')\n return areacode+'-'+prefix+'-'+suffix"
] | [
"0.66808707",
"0.6464034",
"0.63617694",
"0.61796737",
"0.6092625",
"0.60736203",
"0.6047013",
"0.5983775",
"0.5950236",
"0.574458",
"0.56168497",
"0.5609434",
"0.5587649",
"0.557688",
"0.550519",
"0.548075",
"0.5472097",
"0.54645866",
"0.54608816",
"0.54602647",
"0.5409267",
"0.536113",
"0.53302985",
"0.5294926",
"0.52698916",
"0.5221772",
"0.5215344",
"0.5209974",
"0.5195666",
"0.5188649"
] | 0.73535156 | 0 |
Parse a City and State | def parse_citystate(self):
index = self.index
if self.words[index]['tag'] != Vocabulary.NAME:
return None, None, 0, 0
if self.words[index]['word'] == 'mt':
city = "mountain"
else:
city = self.words[index]['word']
start = index
index += 1
if index == self.length:
return None, None, 0, 0
if self.words[index]['word'] == ',':
index += 1
if index == self.length:
return None, None, 0, 0
elif self.words[index]['tag'] == Vocabulary.NAME:
# Hack
state, n = self.state_hack(index)
if n > 0:
index += n
return city, state, index - start + 1, index
#if self.words[index]['word'] == 'medical doctor':
#return city, "ISO3166-2:US-MD", index - start + 1, index
try:
state = self._state_dict[self.words[index]['word']]
return city, state, index - start + 1, index
except:
city += ' ' + self.words[index]['word']
index += 1
if index == self.length:
return None, None, 0, 0
if self.words[index]['word'] == ',':
index += 1
if index == self.length:
return None, None, 0, 0
# Hack
state, n = self.state_hack(index)
if n > 0:
index += n
if index == self.length: index -= 1 # Hack
return city, state, index - start + 1, index
if self.words[index]['tag'] not in [Vocabulary.NAME, Vocabulary.ACRONYM]:
return None, None, 0, 0
try:
state = self._state_dict[self.words[index]['word']]
return city, state, index - start + 1, index
except:
return None, None, 0, 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def city_parser(city: str = None):\n return city.text.strip().split(',')[1]",
"def parse_city_state_from_row(row):\n city, state = ['NULL', 'NULL']\n if row['Geographic area'].count(' - ') == 2:\n state, city = row['Geographic area'].lower().split(' - ')[-2:]\n city = remove_substring_from_end_of_string(city, [' city', ' cdp'])\n return pandas.Series([city, state])",
"def seperate_City_Data(data, us_state_abbrev):\n assert data is not None\n dictionary = dict(data)\n keys = dictionary.keys()\n tmp = list(keys)\n values = dictionary.values()\n res = []\n for elem in keys:\n state = elem[1].strip()\n city = elem[0].strip()\n# print(city)\n if state in us_state_abbrev:\n res.append(city)\n return res, list(values)",
"def seperate_City_State_Data(data, us_state_abbrev):\n assert data is not None\n dictionary = dict(data)\n keys = dictionary.keys()\n tmp = list(keys)\n v = list(dictionary.values())\n values = []\n res = []\n for i in range(len(keys)):\n state = tmp[i][1].strip()\n city = tmp[i][0].strip()\n# print(city)\n if state in us_state_abbrev:\n res.append((state, city))\n values.append(v[i])\n return res, list(values)",
"def process_city(state, city, locations=Locations, perror=None, do_exit=None):\n c=ConfigParser.ConfigParser()\n c.read(locations)\n l=c.options('US_%s' % state)\n d = {}\n condition_station = None\n zone = None\n for x in l:\n # info: city condition-station zone radar-code\n info = string.split(c.get('US_%s' % state, x))\n if city == string.lower(info[0]):\n if verbose:\n print 'info:', info\n if info[1] != '-'*len(info[1]):\n condition_station = info[1]\n\n if info[2] != '-'*len(info[2]):\n zone = string.upper(info[2])\n zone = zone[3:]\n\n return (condition_station, zone)\n\n if perror:\n dp_io.eprintf(\"Don't know this state/city: %s/%s\\n\",\n self.state,\n self.city)\n if do_exit:\n sys.exit(1)\n \n return None",
"def test_addr_city_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_addr_city(input_val)\n self.assertEqual(output_val, self.line.addr_city)",
"def extract_city(full_address):\n full_address = full_address.strip()\n last_comma_index = full_address.rindex(\",\")\n mid_comma_index = full_address.rindex(\",\", 0, last_comma_index)\n city = full_address[mid_comma_index + 1 : last_comma_index]\n city = city.strip()\n return city",
"def parse_location(location):\n city, state = location.strip().split(',')\n return f\"{city.strip().replace(' ', '-')}-{state.strip().replace(' ', '-')}\"",
"def extract_state(full_address):\n full_address = full_address.strip()\n last_comma_index = full_address.rindex(\",\")\n last_space_index = full_address.rindex(\" \")\n state = full_address[last_comma_index + 1 : last_space_index]\n state = state.strip()\n return state",
"def ad_rep_city_state(obj):\n return '%s, %s' % (obj.ad_rep.geolocation_object.us_city.name,\n obj.ad_rep.geolocation_object.us_state.abbreviation)",
"def parse_state(self, state: str):\r\n state = state.strip()\r\n state = state.split(';')\r\n\r\n if len(state) < 2:\r\n print(state)\r\n return\r\n\r\n for field in state:\r\n split = field.split(':')\r\n if len(split) < 2:\r\n continue\r\n\r\n key = split[0]\r\n value = split[1]\r\n\r\n if key in Tello.state_field_converters:\r\n try:\r\n value = Tello.state_field_converters[key](value)\r\n except Exception as e:\r\n print('Error parsing state value for {}: {} to {}'\r\n .format(key, value, Tello.state_field_converters[key]))\r\n self.state[key] = value\r\n return",
"def __convert_query(self, job, city, state):\r\n\r\n job = '+'.join(job.split(\" \"))\r\n city = city.lower()\r\n\r\n # State must be valid two letter code\r\n if len(state) != 2:\r\n raise Exception(\"State must be valid two letter code.\")\r\n state = state.upper()\r\n\r\n return job, city, state",
"def parse_usa(text: str, state: str) -> tuple:\n pattern = re.compile(\n r'\\\"statistic-module--statistic--QKc9M\\\">.*?'\n r'\\\"statistic-module--title--MZHLl\\\">(.*?)<.*?'\n r'\\\"statistic-module--value--2qXQD.*?\\\">(.*?)<'\n )\n result = pattern.findall(text)\n final_result = [state.capitalize(), -1, -1, -1, -1]\n for i, res in enumerate(result):\n n = res[1].replace(',', '')\n if not n.isdigit():\n continue\n if res[0] == 'Total cases':\n final_result[1] = int(n)\n elif res[0] == 'Recovered':\n final_result[3] = int(n)\n elif res[0] == 'Deaths' or res[0] == 'Total deaths':\n final_result[4] = int(n)\n final_result = tuple(final_result)\n return final_result",
"def get_state_info(city, state, population=''):\n if population:\n city_info = city + ' ' + state + ' - ' + population\n city_info.title()\n else:\n city_info = city + ' ' + state\n city_info.title()\n return city_info",
"def parse_province(self, pyName=None):\n self.status = {}\n if pyName:\n for city in self.root.findall(\"city\"):\n if city.get(\"pyName\") == pyName:\n self.status[\"cityname\"] = city.get(\"cityname\")\n self.status[\"stateDetailed\"] = city.get(\"stateDetailed\")\n self.status[\"temHigh\"] = city.get(\"tem2\")\n self.status[\"temLow\"] = city.get(\"tem1\")\n self.status[\"temNow\"] = city.get(\"temNow\")\n self.status[\"windState\"] = city.get(\"windState\")\n self.status[\"humidity\"] = city.get(\"humidity\")\n self.status[\"time\"] = city.get(\"time\")\n\n break\n\n return self.status\n else:\n extractMethod = lambda d: {\n k: v for k, v in d.items() if k in [\n \"stateDetailed\", \"tem1\", \"tem2\", \"windState\"]}\n for city in self.root.findall(\"city\"):\n self.status[city.get(\"cityname\")] = extractMethod(city.attrib)\n\n return self.status",
"def parse(self, src, line):\n r = line.split('\\t')\n p = {}\n if src == 'sf':\n p['businessID'] = r[0]\n p['name'] = r[1]\n p['address'] = r[2]\n p['city'] = r[3]\n p['state'] = r[4]\n p['zip'] = r[5]\n p['latitude'] = r[6]\n p['longitude'] = r[7]\n p['phone'] = r[8]\n elif src == 'nyc':\n p['businessID'] = r[0]\n p['name'] = r[1]\n # nyc separates the building number from the street name\n p['address'] = ' '.join([r[3].strip(), r[4].strip()])\n p['city'] = 'NYC'\n p['state'] = 'NY'\n p['zip'] = r[5]\n p['latitude'] = None\n p['longitude'] = None\n p['phone'] = r[6]\n return p",
"def get_coordinates_for_city(city, state=None):\n search_str = ', '.join([city, state]) if state else city\n db_coords = get_coordinates_from_db(search_str)\n if db_coords:\n return (search_str, db_coords)\n else:\n page_title, coords = get_coordinates_from_wikipedia(search_str)\n add_coordinates_to_db(coords, search_str)\n return (page_title, coords)",
"def parse_html(city, html):\n return city.parse_html(html)",
"def parse_address(address: str) -> OrderedDict[str, str]:\n\n address = address.replace(\"\\n\", \", \")\n\n parsed_address, address_type = usaddress.tag(address)\n if address_type != \"Street Address\":\n logger.warning(\n f\"Couldn't parse address '{address}' of type {address_type}; best guess: {parsed_address}\"\n )\n\n # Fixup: At least one address has \"WI, USA\" in the \"StateName\" component.\n # Strip non-state components\n if parsed_address.get(\"StateName\"):\n parsed_address[\"StateName\"] = parsed_address[\"StateName\"].partition(\",\")[0]\n\n return parsed_address",
"def _parse_user_input(self):\n user_input = self.user_input.strip()\n if user_input:\n if user_input.find(',') > -1:\n # Location is either city/state or latitude/longitude.\n if user_input[0].isalpha():\n # City, state (lat/long handled elsewhere)\n city, state = [x.strip() for x in user_input.split(',')]\n self.city = city\n self.state = state\n elif (len(user_input) <= 10 and\n user_input[1].isdigit()): # 2nd char in US/Can. postal codes\n # Postal code\n self.postal_code = user_input.strip()",
"def get_city(string):\n city = \"\"\n previous_ch = None;\n\n #For each character in string\n for ch in string:\n #break if it is a comma, the city has been completed\n if ch == \",\":\n break\n #if the character is a letter, add it to the \"city\" string\n elif ch.isalpha():\n city += ch\n #if the character is a space, and the previous character is a letter, add the space to the \"city\" string. (This prevents duplicate spaces)\n elif ch.isspace() & previous_ch.isalpha():\n city += ch\n\n #update previous character\n previous_ch = ch\n\n return city",
"def parse(self, item: str) -> Tuple[str, str]:\n try:\n city, *_, country = item.split(',')\n except ValueError:\n return '', item\n return city.strip(), country.strip()",
"def seperate_Loc_Data(data, us_state_abbrev):\n assert data is not None\n dictionary = dict(data)\n keys = dictionary.keys()\n tmp = list(keys)\n values = dictionary.values()\n res = []\n for elem in keys:\n state = elem[1].strip()\n if state in us_state_abbrev:\n res.append(us_state_abbrev[state])\n return res, list(values)",
"def parse_address_street(address_str, address_zip_us_re, address_zip_us_lax_re):\n address = {}\n errors = []\n parts = address_str.split('$')\n if DEBUG:\n address['debug_address_str'] = address_str\n address['debug_part_1'] = parts[0]\n address['debug_part_last'] = parts[-1]\n address['debug_length'] = len(parts)\n #if len(parts) == 1:\n #print('cannot split: {}: {}'.format(debug_type, address_str))\n match = re.search(address_zip_us_re, parts[-1])\n if match:\n if DEBUG:\n address['debug_parser'] = 'A'\n address['city'] = match.group(1)\n address['region'] = match.group(2).upper()\n address['postalCode'] = match.group(3)\n address['countryId'] = 'US'\n if len(parts) == 2:\n if DEBUG:\n address['debug_parser'] = 'B'\n address['addressLine1'] = parts[0]\n else:\n if len(parts) == 3:\n if DEBUG:\n address['debug_parser'] = 'C'\n address['addressLine1'] = parts[0]\n if parts[0] != parts[1]:\n if DEBUG:\n address['debug_parser'] = 'D'\n address['addressLine2'] = parts[1]\n else:\n match2 = re.search(address_zip_us_lax_re, address_str)\n if match2:\n if DEBUG:\n address['debug_parser'] = 'E'\n address['region'] = match2.group(2).upper()\n address['postalCode'] = match2.group(3)\n address['countryId'] = 'US'\n # FIXME: Cannot reliably parse the remainder for city and street address\n errors.append('Partial parse street address: {}'.format(address_str))\n address['addressLine1'] = match2.group(1)\n else:\n # This is the remainder that we could not parse.\n # So just put it all into \"addressLine1\" to be manually adjusted later.\n if DEBUG:\n address['debug_parser'] = 'F'\n errors.append('Cannot parse street address: {}'.format(address_str))\n address['addressLine1'] = address_str\n return (address, errors)",
"def citystate(self):\n if self.index >= self.length:\n return False\n \n self._cty, self._sta, n, idx_sta = self.parse_citystate()\n if self._cty is not None:\n self.idx_cty = self.index\n self.idx_sta = idx_sta\n self.index += n\n if self.index < self.length and self.words[self.index]['word'] == ',':\n self.index += 1\n if self._debug: \n print(\"CTY\", self._cty, self.idx_cty) \n print(\"STA\", self._sta, self.idx_sta)\n self.isaddr = True\n return True\n return False",
"def test_city_country(self):\n formatted_city = get_full_city(\"santiago\", \"chile\")\n self.assertEqual(formatted_city, \"Santiago, Chile\")",
"def resolve_query_place(query):\n\n\tallTokens = [token for token in query.replace(',', ' ').split(' ') if token]\n\tif not len(allTokens):\n\t\treturn ('', None)\n\tcomponents = [component.split(' ') for component in query.split(',') if component]\n\tcities = None\n\tregions = None\n\tcountries = None\n\tplaces = None\n\tconsumed = 0\n\t\n\tdef get_component():\n\t\t\"\"\" Returns a sub sequence of a component. This makes use of the commas as hard delimiters to separate city, state, etc. \"\"\"\n\t\tcomponentConsumed = consumed\n\t\tfor i in range(len(components)):\n\t\t\tif componentConsumed < len(components[-i]):\n\t\t\t\treturn components[-i][:-componentConsumed if componentConsumed else None]\n\t\t\telse:\n\t\t\t\tcomponentConsumed -= len(components[-i])\n\t\treturn []\n\n\tif len(allTokens[-1]) == 2 and allTokens[-1].isalpha():\n\t\tif len(allTokens) >= 2 and len(allTokens[-2]) == 2 and allTokens[-2].isalpha():\n\t\t\t# A county and region code were given. e.g. CA US -> US.CA\n\t\t\tregions = Region.objects.filter(code='%s.%s' % (allTokens[-1].upper(), allTokens[-2].upper()))\n\t\t\tconsumed = 2\n\t\telse:\n\t\t\t# A single region or country code was given\n\t\t\tregions = Region.objects.filter(code__endswith=allTokens[-1].upper())\n\t\t\tif not len(regions):\n\t\t\t\tcountries = Country.objects.filter(code=allTokens[-1].upper()).order_by('-population')\n\t\t\tconsumed = 1\n\t\tif len(regions):\n\t\t\t# Found a region, also try to find the city that goes with the region\n\t\t\tplaces = regions\n\t\t\tcityConsumed = __parse_city(get_component())\n\t\t\tif cityConsumed:\n\t\t\t\tcities = City.objects.filter(name__iexact=' '.join(allTokens[-(consumed + cityConsumed):-consumed if consumed else None]), region__in=regions).order_by('-population')\n\t\t\t\tif len(cities):\n\t\t\t\t\tplaces = cities\n\t\t\t\t\tconsumed += cityConsumed\n\t\telif len(countries):\n\t\t\t# Found a country, also try to find the city that goes with the country\n\t\t\tplaces = countries\n\t\t\tcityConsumed = __parse_city(get_component())\n\t\t\tif cityConsumed:\n\t\t\t\tcities = City.objects.filter(name__iexact=' '.join(allTokens[-(consumed + cityConsumed):-consumed if consumed else None]), country=countries[0]).order_by('-population')\n\t\t\t\tif len(cities):\n\t\t\t\t\tplaces = cities\n\t\t\t\t\tconsumed += cityConsumed\n\telse:\n\t\t# No codes were given, the query is more free form\n\t\t# Match the country first\n\t\tcountryConsumed = __parse_country(get_component())\n\t\tif countryConsumed:\n\t\t\tcountries = Country.objects.filter(name__iexact=' '.join(allTokens[-(consumed + countryConsumed):-consumed if consumed else None])).order_by('-population')\n\t\t\tif len(countries):\n\t\t\t\tplaces = countries\n\t\t\t\tconsumed += countryConsumed\n\t\t# Try region then city matching\n\t\tregionConsumed = __parse_region(get_component())\n\t\tif regionConsumed:\n\t\t\tif countries and len(countries):\n\t\t\t\tregions = Region.objects.filter(name__iexact=' '.join(allTokens[-(consumed + regionConsumed):-consumed if consumed else None]), country=countries[0])\n\t\t\telse:\n\t\t\t\tregions = Region.objects.filter(name__iexact=' '.join(allTokens[-(consumed + countryConsumed):-consumed if consumed else None]))\n\t\t\tif len(regions):\n\t\t\t\tplaces = regions\n\t\t\t\tconsumed += regionConsumed\n\t\tcityConsumed = __parse_city(get_component())\n\t\tif cityConsumed:\n\t\t\tif regions and len(regions):\n\t\t\t\tcities = City.objects.filter(name__iexact=' '.join(allTokens[-(consumed + cityConsumed):-consumed if consumed else None]), region__in=regions).order_by('-population')\n\t\t\telif len(countries):\n\t\t\t\tcities = City.objects.filter(name__iexact=' '.join(allTokens[-(consumed + cityConsumed):-consumed if consumed else None]), country=countries[0]).order_by('-population')\n\t\t\telse:\n\t\t\t\tcities = City.objects.filter(name__iexact=' '.join(allTokens[-(consumed + cityConsumed):-consumed if consumed else None])).order_by('-population')\n\t\t\tif len(cities):\n\t\t\t\tplaces = cities\n\t\t\t\tconsumed += cityConsumed\n\t\t# If region was found without a city go back to just try to resolve it to a city instead\n\t\tif (regions and len(regions)) and (not cities or not len(cities)):\n\t\t\tconsumed -= regionConsumed\n\t\t\tcityConsumed = __parse_city(get_component())\n\t\t\tif cityConsumed:\n\t\t\t\tif len(countries):\n\t\t\t\t\tcities = City.objects.filter(name__iexact=' '.join(allTokens[-(consumed + cityConsumed):-consumed if consumed else None]), country=countries[0]).order_by('-population')\n\t\t\t\telse:\n\t\t\t\t\tcities = City.objects.filter(name__iexact=' '.join(allTokens[-(consumed + cityConsumed):-consumed if consumed else None])).order_by('-population')\n\t\t\t\tif len(cities):\n\t\t\t\t\tplaces = cities\n\t\t\t\t\tconsumed += cityConsumed\n\t\t\tif not cities or not len(cities):\n\t\t\t\t# No city found, region is the best match\n\t\t\t\tconsumed -= regionConsumed\n\n\treturn (' '.join(allTokens[:-consumed if consumed else None]), places)",
"def loadCity(fileid):\n dinf = {}\n root = etree.Element(\"city\")\n text = None\n statename = \"\"\n statefile = \"\"\n cityname = \"\"\n dinf['m'] = {}\n dinf['m']['events'] = {}\n # TODO: put this in a global variable, and make a function to populate it from the DTD.\n tags = [\"name\",\"state\",\"statefile\",\"start\",\"scue\",\"end\",\"ecue\",\"place\",\"aspects\"]\n for tag in tags:\n dinf[tag] = [\"\",False]\n dinf['aspects'] = {}\n if not dinf.get(\"places\"): dinf['places'] = {}\n if not idExists(fileid):\n status.push(0,\"new city created... '%s'\" % fileid)\n return dinf\n fn = os.path.join(config['realmdir'],\"%s.xml\" % fileid)\n status.push(0,\"loading city from XML... '%s'\" % fn)\n try:\n with codecs.open(fn,'rU','utf-8') as f:\n tree = etree.parse(f)\n f.close()\n root = tree.getroot()\n except IOError as e:\n print \"c: Could not open configuration file: %s\" % e\n\n ir = 0\n for i in range(len(root)):\n if root[i].tag is not None:\n if root[i].tag == \"place\":\n if len(root[i]) > 0:\n node = \"\"\n node = root[i].find(\"file\")\n if node.text:\n node = node.text.strip()\n node = common.validateFileid(node)\n dinf['places'][node] = {}\n for j in root[i]:\n if j.tag and j.text and j.tag != \"file\":\n dinf['places'][node][j.tag] = [j.text.strip(),False]\n if config['debug'] > 3: print dinf['places'][node]\n else:\n if config['debug'] > 0:\n print \"Invalid place tag:\"\n for c in root[i]:\n print c.tag + ': ' + c.text,\n else: # no relat length\n if config['debug'] > 0: print \"Empty place tag.\"\n elif root[i].tag == \"events\":\n if len(root[i]) > 0:\n nodes = root[i]\n for node in nodes:\n k = str(len(dinf['m']['events']))\n dinf['m']['events'][k] = {}\n for j in node:\n if j.tag and j.text:\n dinf['m']['events'][k][j.tag] = [j.text.strip(),False]\n else:\n if config['debug'] > 0:\n print \"Invalid milestone tag:\"\n for c in node:\n print c.tag + ': ' + c.text,\n if config['debug'] > 3: printPretty(dinf['m']['events'])\n else: # no relat length\n if config['debug'] > 0: print \"Empty milestone tag.\"\n elif root[i].tag == \"aspects\":\n if len(root[i]) > 0:\n nodes = root[i]\n for node in nodes:\n k = str(len(dinf['aspects']))\n dinf['aspects'][k] = {}\n if node.tag and node.text:\n dinf['aspects'][k] = [node.text.strip(),False]\n else:\n if config['debug'] > 0:\n print \"Invalid aspects tag:\"\n print node.tag + ': ' + node.text,\n else: # no aspects length\n if config['debug'] > 0: print \"Empty aspects tag.\"\n elif root[i].text is not None:\n if root[i].tag == \"statefile\":\n statefile = root[i].text.strip()\n statefile = common.validateFileid(statefile)\n if statefile is None: statefile = \"\"\n elif root[i].tag == \"state\":\n statename = root[i].text.strip()\n elif root[i].tag == \"name\":\n cityname = root[i].text.strip()\n dinf[root[i].tag] = [root[i].text.strip(), False]\n if config['debug'] > 2: print str(i) + \" \",\n if len(statefile) > 0: pushLoc(statefile,statename,fileid,cityname)\n return dinf",
"def street_parser(*street_data):\n\n # parsing tuples\n if len(street_data) == 2:\n if not isinstance(street_data[0], str) and not isinstance(street_data[1], str):\n raise WrongInput(\"Invalid format\")\n # street name as the tuple's first item\n strname, strnumber = street_data\n # street number as the tuple's first item\n if street_data[0][0] in digits:\n strname, strnumber = strnumber, strname\n\n # parsing strings\n else:\n if not isinstance(street_data[0], str):\n raise WrongInput(\"Invalid format\")\n if not street_data[0]:\n raise WrongInput(\"Input cannot be blank\")\n\n # string starting with street number\n if street_data[0][0] in digits:\n street_pattern = re.compile(r'''\n ^ # beginning of string\n (\\d+) # street number is any number of digits\n \\W+ # separator\n (\\w+\\W*\\w*\\W*) # street name is one or more words with optional separators\n $ # end of string\n ''', re.VERBOSE)\n street_obj = street_pattern.search(street_data[0])\n strnumber, strname = street_obj.groups()\n\n # string starting with street name\n else:\n street_pattern = re.compile(r'''\n ^ # beginning of string\n (\\w+\\W*\\w*\\s*) # street name is one or more words with optional separators\n \\W+ # separator\n (\\d+) # street number is any number of digits\n $ # end of string\n ''', re.VERBOSE)\n street_obj = street_pattern.search(street_data[0])\n (strname, strnumber) = street_obj.groups()\n\n # replace specific words in street name with their abbreviates\n strname = strname.lower()\n special = {r'\\baleje\\b': 'Al.', r'\\bavenue\\b': 'Av.', r'\\broad\\b': 'Rd.', r'\\bsquare\\b': 'Sq.',\n r'\\bstreet\\b': 'St.', r'\\bdrive\\b': 'Dr.'}\n for key in special:\n strname = re.sub(key, special[key], strname)\n return strname.title(), strnumber",
"def to_location(city: str = None, state: str = None) -> str:\n if city is None and state is None:\n return \"USA\"\n elif city is None or state is None:\n return f\"{city or ''}{state or ''}, USA\"\n return f\"{city}, {state} USA\""
] | [
"0.7167144",
"0.7103394",
"0.69272184",
"0.68638027",
"0.6724114",
"0.6408183",
"0.6401508",
"0.63388",
"0.6293635",
"0.6257335",
"0.6255003",
"0.6098398",
"0.6051419",
"0.5989935",
"0.5964835",
"0.5951598",
"0.5909424",
"0.5863681",
"0.5793439",
"0.57704103",
"0.5766389",
"0.5723738",
"0.5698773",
"0.56963664",
"0.5674444",
"0.5647972",
"0.56427413",
"0.56358427",
"0.5619225",
"0.5614443"
] | 0.7390942 | 0 |
Parse a US Postal Code | def parse_postalUS(self):
index = self.index
# US Postal Code
if len(self.words[index]['word']) != 5 or not self.words[index]['word'].isdigit():
return None, 0
postal = self.words[index]['word']
if index + 1 < self.length:
if self.words[index+1]['word'] == '-':
index += 2
if index == self.length:
return None, 0
if len(self.words[index]['word']) == 4 and self.words[index]['word'].isdigit():
postal += '-' + self.words[index]['word']
return postal, 3
else:
return postal, 1
return postal, 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extract_postcode(s):\n pc_regex = r'([Gg][Ii][Rr] 0[Aa]{2})|((([A-Za-z][0-9]{1,2})|(([A-Za-z][A-Ha-hJ-Yj-y]'\n pc_regex += r'[0-9]{1,2})|(([A-Za-z][0-9][A-Za-z])|([A-Za-z][A-Ha-hJ-Yj-y][0-9]?[A-Za-z]'\n pc_regex += r'))))\\s?[0-9][A-Za-z]{2})'\n\n re_search = re.search(pc_regex, s)\n if re_search:\n p = re_search.group(0)\n else:\n p = ''\n return p",
"def postcode(full_address):\n return capture_address_element(POSTCODE_PATTERN, full_address)",
"def normalise_postcode(postcode):\n\n postcode = NON_ALPHA_RE.sub(\"\", postcode.upper())\n postcode = postcode[:-3] + \" \" + postcode[-3:]\n if POSTCODE_RE.match(postcode):\n return postcode\n return None",
"def country(alpha_2_code: str) -> None:",
"def parse_postalCA(self):\n \n index = self.index\n \n if len(self.words[index]['word']) != 3:\n return None, 0\n postal = self.words[index]['word']\n index += 1\n if index == self.length:\n return None, 0\n \n if len(self.words[index]['word']) != 3:\n return None, 0\n postal += self.words[index]['word']\n \n return postal, 2",
"def USCode(self, short):\n states = {\n 'AK': 'Alaska',\n 'AL': 'Alabama',\n 'AR': 'Arkansas',\n 'AS': 'American Samoa',\n 'AZ': 'Arizona',\n 'CA': 'California',\n 'CO': 'Colorado',\n 'CT': 'Connecticut',\n 'DC': 'District of Columbia',\n 'DE': 'Delaware',\n 'FL': 'Florida',\n 'GA': 'Georgia',\n 'GU': 'Guam',\n 'HI': 'Hawaii',\n 'IA': 'Iowa',\n 'ID': 'Idaho',\n 'IL': 'Illinois',\n 'IN': 'Indiana',\n 'KS': 'Kansas',\n 'KY': 'Kentucky',\n 'LA': 'Louisiana',\n 'MA': 'Massachusetts',\n 'MD': 'Maryland',\n 'ME': 'Maine',\n 'MI': 'Michigan',\n 'MN': 'Minnesota',\n 'MO': 'Missouri',\n 'MP': 'Northern Mariana Islands',\n 'MS': 'Mississippi',\n 'MT': 'Montana',\n 'NA': 'National',\n 'NC': 'North Carolina',\n 'ND': 'North Dakota',\n 'NE': 'Nebraska',\n 'NH': 'New Hampshire',\n 'NJ': 'New Jersey',\n 'NM': 'New Mexico',\n 'NV': 'Nevada',\n 'NY': 'New York',\n 'OH': 'Ohio',\n 'OK': 'Oklahoma',\n 'OR': 'Oregon',\n 'PA': 'Pennsylvania',\n 'PR': 'Puerto Rico',\n 'RI': 'Rhode Island',\n 'SC': 'South Carolina',\n 'SD': 'South Dakota',\n 'TN': 'Tennessee',\n 'TX': 'Texas',\n 'UT': 'Utah',\n 'VA': 'Virginia',\n 'VI': 'Virgin Islands',\n 'VT': 'Vermont',\n 'WA': 'Washington',\n 'WI': 'Wisconsin',\n 'WV': 'West Virginia',\n 'WY': 'Wyoming'\n }\n return states.get(short)",
"def test_parse_post_code_field(self):\n fields = {'Post code': {'offset': 171,\n 'length': 4}}\n p = top.Parser(fields=fields)\n received = p.parse_line(self._line)\n expected = {'Post code': '2048'}\n msg = 'Post code field parse incorrect'\n self.assertEqual(received, expected, msg)",
"def get_postal_code(self):\n element = self.driver.find_element(*self.postalcode_textbox_selector)\n return element.get_attribute(\"value\")",
"def extract_zipcode(full_address):\n full_address = full_address.strip()\n last_space_index = full_address.rindex(\" \")\n zipcode = full_address[last_space_index + 1 : ]\n return zipcode",
"def get_postal_code(url):\n user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'\n headers = {'User-Agent': user_agent, }\n request = Request(url, None, headers) # The assembled request\n response = urlopen(request)\n data = response.read()\n\n soup = BeautifulSoup(data, \"lxml\")\n tables = soup.find_all('table')\n table_body = tables[0].find('tbody')\n\n rows_tables = table_body.find_all(\"tr\")\n if len(rows_tables) > 2:\n col = rows_tables[2].find_all(\"td\")\n data_col = [elem.text.strip() for elem in col]\n result = data_col[1]\n else:\n result = \"\"\n\n return result",
"def country_codes(country):\n countryObject = None\n try:\n countryObject = pycountry.countries.search_fuzzy(country)\n return countryObject[0].alpha_2\n except LookupError:\n pass\n try:\n splittedCountry = country.split(',')[0]\n countryObject = pycountry.countries.search_fuzzy(splittedCountry)\n return countryObject[0].alpha_2\n except LookupError:\n return 'No Code'",
"def compute_zip_code(zip_code_text):\n zip_code = None\n if zip_code_text and len(zip_code_text) >= 5 and zip_code_text.isdigit():\n zip_code = zip_code_text[:5]\n return zip_code",
"def clean_postal_code(self):\n return self.cleaned_data['postal_code'].strip()",
"def checkPostalCode(self, code, country):\n if country == 'US':\n USZipCodeField().clean(code)",
"def update_postcode(postcode, invalid = True):\r\n m = postcode_format_re.search(postcode)\r\n if m:\r\n invalid = False\r\n postcode= postcode[:5]\r\n return (invalid, postcode)",
"def postal_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"postal_code\")",
"def postal_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"postal_code\")",
"def postal_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"postal_code\")",
"def formatPostalCode(string):\n if string.isdigit():\n return int(string)\n else :\n return 0",
"def validate_postcode_format(self):\n\n assert type(self.postcodes) == str, \"To use this method, the postcode cannot be an iterable.\"\n pcd = self.postcodes.replace(' ', '')\n # The following regular expression matches are in order to adhere to the rules for UK postcodes given in the\n # documentation.\n first_char_alpha = re.match(r'^[a-zA-Z]', pcd)\n last_char_match = re.match(r'[a-zA-Z]', pcd[-1])\n alpha_match = re.search(r'[a-zA-Z]', pcd)\n numeric_match = re.search(r'[0-9]', pcd)\n special_chars_match = re.search(r'[!#,£$%^&*¬-]', pcd)\n if len(pcd) == 0:\n response = 'Null'\n elif (5 <= len(pcd) <= 7) and first_char_alpha and alpha_match and numeric_match \\\n and last_char_match and not special_chars_match:\n response = 'Valid Postcode Format'\n else:\n response = 'Invalid Postcode Format'\n return response",
"def postal_code(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"postal_code\")",
"def parse_station_name (station_name):\n try:\n _,chinese_name,code,full_pinyin,short_pinyin = station_name.split('|')\n except ValueError:\n # print(station_name)\n _,chinese_name,code,full_pinyin,short_pinyin,_ = station_name.split('|')\n return {chinese_name:code,full_pinyin:code,short_pinyin:code}",
"def is_valid_postal_code(postal_code):\n assert postal_code is not None\n postal_code = postal_code.replace(\" \", \"\")\n postal_code_re = re.compile(r\"\\s*(\\w\\d\\s*){3}\\s*\")\n return postal_code_re.match(postal_code) is not None",
"def postcode(self):\n return self._postcode",
"def postcode(self):\n return self._postcode",
"def postal_code(self):\n if \"postalCode\" in self._prop_dict:\n return self._prop_dict[\"postalCode\"]\n else:\n return None",
"def postal_code(self):\n if \"postalCode\" in self._prop_dict:\n return self._prop_dict[\"postalCode\"]\n else:\n return None",
"def geocode(postcode):\n key = current_app.config.get(\"OS_PLACES_API_KEY\")\n formatted_addresses = FormattedAddressLookup(key=key).by_postcode(postcode)\n response = [{\"formatted_address\": address} for address in formatted_addresses if address]\n return Response(json.dumps(response), mimetype=\"application/json\")",
"def grab_area_code(phone_number):\r\n #number of form +1 XXX XXX XXXX (this should be the form get_twilio_client provides)\r\n if \"+1\" == phone_number[:2]:\r\n return phone_number[2:5]\r\n # number of form 1 XXX XXX XXXX\r\n if len(phone_number) == 11 and phone_number[0] == '1':\r\n return phone_number[1:4]\r\n # number of form XXX XXX XXXX\r\n if len(phone_number) == 10:\r\n return phone_number[:3]\r\n raise BadPhoneNumberError('\"%s\" is an invalid phone number.' % phone_number)",
"def postal_code(self, instance):\r\n return instance.user.profile.postal_code"
] | [
"0.68169475",
"0.6787212",
"0.66331065",
"0.6465904",
"0.62327194",
"0.6120119",
"0.6045608",
"0.60441333",
"0.5938737",
"0.5917331",
"0.5902725",
"0.58611923",
"0.5851294",
"0.582264",
"0.5816804",
"0.57870555",
"0.57870555",
"0.57870555",
"0.57870024",
"0.5727899",
"0.5709292",
"0.5701922",
"0.5693404",
"0.5682764",
"0.5682764",
"0.56815344",
"0.56815344",
"0.56607956",
"0.5649053",
"0.5624287"
] | 0.74377286 | 0 |
Insert a line of Updates data into the database. | def insert_updates(self, values):
assert len(values) == len(COLUMNS_BGPEVENTS)
bound = self.prep_stmt_insert_bgpevents.bind(values)
self.futures.append(self.session.execute_async(bound))
if (len(self.futures) > MAX_ASYNC_REQUESTS):
self.check_deferred_responses() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dbUpdate():\n dbAddress = config.get('database', 'dbAddress')\n dbUser = config.get('database', 'dbUser')\n dbPassword = config.get('database', 'dbPassword')\n dbName = config.get('database', 'dbName')\n dbPort = config.getint('database', 'dbPort')\n con = MySQLdb.connect(host=dbAddress, port=dbPort, user=dbUser, passwd=dbPassword,\n db=dbName)\n c = con.cursor()\n\n date = datetime.datetime.now()\n c.execute(\"INSERT INTO sensor_data (date, dht_temp, dht_humidity, cpu_temp, \"\n \"solar_voltage, solar_current, battery_voltage, battery_current, \"\n \"load_voltage, load_current) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,\"\n \"%s)\",\n (date, dht_temp, dht_humidity, cpu_temp, sol_volt_v, sol_curr_ma,\n bat_volt_v, bat_curr_ma, load_volt_v, load_curr_ma))\n\n con.commit()\n con.close()",
"def update(self, line):",
"def insertUpdate(self, e):\n syncJSONtoUI()",
"def commandroute(self, operation, lines, updatedline=None):\n if operation == 'Insert':\n fileoperation = \"a\"\n else:\n fileoperation = \"w\"\n try:\n with open(self.filedb, fileoperation) as f:\n if operation == 'Insert':\n f.write(\"\\r\")\n f.write(updatedline)\n code = 201\n else:\n for line in lines:\n if line != \"\":\n f.write(line)\n f.write(\"\\r\")\n if updatedline is not None and operation != 'Delete':\n f.write(updatedline)\n code = 200\n message = operation + ' ok: ' + updatedline\n logging.info(message)\n f.close()\n return False, message, code\n except Exception as e:\n logging.error('Database is not accessible.' + str(e))\n return True, 'Database is not accessible', 503",
"async def add_entry(self, **values):\r\n query = \"INSERT OR IGNORE INTO {table_name} ({table_headers}) VALUES({entry_values})\"\r\n\r\n headers = \", \".join([e for e in values.keys()])\r\n entry_val = \", \".join(\"?\"*len(values.values()))\r\n attrs = [e for e in values.values()]\r\n\r\n query = query.format(table_name = self.name, table_headers=headers, entry_values=entry_val)\r\n\r\n await self.data.db.execute(query, attrs)\r\n await self.data.db.commit()",
"def do_update_data(self, *args):\n print(\"Provide data to update :\")\n id_field = dict()\n id_field['id'] = input(\"Provide id to update :\")\n values = {**id_field, **self.__class__.populate_data()}\n self.connection_obj.update_into_table(**values)\n print(\"Data Update Successful\")",
"def update(self, sql):\n try:\n # Execute the SQL command\n self.cursor.execute(sql)\n # Commit your changes in the database\n self.db.commit()\n except:\n # Rollback in case there is any error\n self.db.rollback()",
"def insert_line(self, t):\n if type(t) == tuple:\n t = [t]\n\n c = self.conn.cursor()\n for l in t:\n c.execute('INSERT INTO lines VALUES (?,?)', l[:2])\n lineid = c.lastrowid\n for s in l[2]:\n try:\n c.execute('insert into relation values (?,?)', (s, lineid))\n except sqlite3.OperationalError as e:\n print e.message\n\n self.conn.commit()\n c.close()",
"def updateLine(self, column: str, value: [str,int], lineId: int):\n value = f\"'{value}'\" if type(value) == str else value\n query = f\"\"\"\n UPDATE lines\n SET {column} = {value}\n WHERE id = {lineId};\n \"\"\"\n sql.executeQuery(self.connection, query)",
"def update(table, id_):\n return common.update_line(table, id_, [(\"Title: \", None),\n (\"Price: \", common.validate_int),\n (\"Month of sale: \", common.validate_month),\n (\"Day of sale: \", common.validate_day),\n (\"Year of sale: \", common.validate_byear),\n (\"Customer ID: \", common.validate_id_possible)])",
"def add_entry(db, table, columns, values):\n mycursor = db.cursor()\n\n sql = \"INSERT INTO \" + table + \" (\" + parse_sql_param_from_array(columns) + \") VALUES (\" + parse_sql_param_from_array(values, escape=True) + \")\"\n mycursor.execute(sql)\n\n db.commit()",
"def set_updates(self, updates: dict, dry=False):\n self._updates += (UpdateQueryExpression(updates),)\n return self",
"def update_row(self, row_id, update_data):\n #Check to make sure all the column names given by user match the column names in the table.\n data = self.__scrub_data(update_data)\n path = self.__data_file_for_row_id(row_id)\n if data:\n #Create a temp data file with the updated row data.\n if self.__modify_data_file(path, {row_id: data}, 'update'):\n print('Row ' + str(row_id) + ' has been updated.') \n else:\n raise Exception('There was a problem updating row at ' + str(row_id) +'.')\n else:\n raise Exception('Sorry, the data you tried to insert is invalid.')",
"def commit_updates(session, update_key, update_statements, table, commit_frequency = 1000):\n primary_key = table.primary_key.columns.values()[0]\n update_key = table.columns[update_key]\n u = table.update().where(primary_key==bindparam('pk')).values({update_key: bindparam('update')})\n numgroups = len(update_statements) / commit_frequency\n for ng in range(numgroups):\n if numgroups == 0:\n break\n chunk = update_statements[ng*commit_frequency:(ng+1)*commit_frequency]\n session.connection().execute(u, *chunk)\n print \"committing chunk\",ng+1,\"of\",numgroups,\"with length\",len(chunk),\"at\",datetime.now()\n session.commit()\n last_chunk = update_statements[numgroups*commit_frequency:]\n if last_chunk:\n print \"committing last\",len(last_chunk),\"records at\",datetime.now()\n session.connection().execute(u, *last_chunk)\n session.commit()",
"def insert_into_tweets(self, infos):\n query = \"insert into tweets(tweet_id, insert_date, created_at, hashtag) values(?, ?, ?, ?);\"\n with sql.connect('./{}.db'.format(self.name)) as conn:\n conn.executemany(query, infos)",
"def update_db_record(self, update_body: dict):\n for attribute, value in update_body.items():\n if attribute in self._update_allowed_fields:\n setattr(self, attribute, value)\n self.updated_at = datetime.now()\n self.save()",
"def update_updated_data_sqlite_db(self, table_name: str):\n # go through indicators and get updated data in dataframe\n print('start downloading queries')\n df = self.__get_updated_data(table_name)\n print('api download completed')\n\n # get list of sql queries to insert to sqlite db\n print('start creating queries')\n q_list = self.__get_sql_insert_query_list(df, table_name)\n\n # insert data to sqlite\n print('start inserting data')\n AccessDB().run_insert_query(q_list)\n return 'Process Completed'",
"def test_update(self):\n query = \"insert into cds values(%s,%s,%s,%s)\"\n values = (156098,\"haha\",\"haha 5\",2)\n self.a.insert(query, values)\n query1 = \"update cds set Quantity=%s where id=%s\"\n values1 = (5, 156098)\n self.a.update(query1, values1)\n query2 = \"select * from cds where id=156609\"",
"def update_records(self, something):\n print(\"Some logic (not shown) to update database of units\")",
"def update(self):\n self.__execute(self.pkgin_bin, \"update\")",
"def update(\n server: str,\n pokemon_number: str,\n pokemon_name: str,\n user: str,\n notes: str = \"\",\n board: str = \"TRADE_BOARD\",\n) -> str:\n return (\n f\"INSERT OR REPLACE INTO {board}\"\n f\" values('{uuid4()}', '{server}', '{user}',\"\n f\" '{pokemon_name}',{pokemon_number},\"\n f\" '{notes}')\"\n )",
"def insert_or_update(self, table, connection, row):\n\n # find line, if it exist\n dbrow = self.find(connection, table, row)\n\n # TODO XXX use actual database function instead of this stupid thing\n now = datetime.datetime.now()\n\n column_names = table.columns.keys()\n\n # UpdatedAt field configured ? Let's set the value in source hash\n if self.updated_at_field in column_names:\n row[self.updated_at_field] = now # XXX not pure ...\n\n # Update logic\n if dbrow:\n if not UPDATE in self.allowed_operations:\n raise ProhibitedOperationError('UPDATE operations are not allowed by this transformation.')\n\n query = table.update().values(\n **{col: row.get(col)\n for col in self.get_columns_for(column_names, row, dbrow)}\n ).where(and_(*(getattr(table.c, col) == row.get(col) for col in self.discriminant)))\n\n # INSERT\n else:\n if not INSERT in self.allowed_operations:\n raise ProhibitedOperationError('INSERT operations are not allowed by this transformation.')\n\n if self.created_at_field in column_names:\n row[self.created_at_field] = now # XXX UNPURE\n else:\n if self.created_at_field in row:\n del row[self.created_at_field] # UNPURE\n\n query = table.insert().values(**{col: row.get(col) for col in self.get_columns_for(column_names, row)})\n\n # Execute\n try:\n connection.execute(query)\n except Exception:\n connection.rollback()\n raise\n\n # Increment stats TODO\n # if dbrow:\n # self._output._special_stats[UPDATE] += 1\n # else:\n # self._output._special_stats[INSERT] += 1\n\n # If user required us to fetch some columns, let's query again to get their actual values.\n if self.fetch_columns and len(self.fetch_columns):\n if not dbrow:\n dbrow = self.find(row)\n if not dbrow:\n raise ValueError('Could not find matching row after load.')\n\n for alias, column in self.fetch_columns.items():\n row[alias] = dbrow[column]\n\n return row",
"def update(self):\n db.session.commit()",
"def update(self):\n db.session.commit()",
"def add_all_lines(conn, table_values):\n\n column_list = table_values[0]\n column_row = \",\".join(column_list)\n qmark = \"?\"\n col_count = len(column_list)\n for cols in range(1, col_count):\n qmark += \", ?\"\n cols = cols\n\n cur = conn.cursor()\n cur.execute(\"DROP TABLE IF EXISTS ayasdi_table;\")\n cur.execute(\"CREATE TABLE ayasdi_table (\" + column_row + \");\")\n cur.executemany(\\\n \"INSERT INTO ayasdi_table (\" + column_row + \") VALUES (\" + qmark + \");\", \\\n table_values)",
"def insert(self,table,values):\n self.connect.execute(self.insert_disc[table],values)\n self.connect.commit()",
"def _execute_update(self, updateQuery, updateValues):\n with self as plasticDB:\n cursor = plasticDB.connection.cursor()\n cursor.execute(updateQuery, updateValues)",
"def do_update(self, line):\n\n args = line.split()\n\n if not args:\n print(\"** class name missing **\")\n elif args[0] not in HBNBCommand.class_list:\n print(\"** class doesn't exist **\")\n elif len(args) == 1:\n print(\"** instance id missing **\")\n elif len(args) > 1:\n key = args[0] + \".\" + args[1]\n dict_objects = storage.all()\n obj = dict_objects.get(key)\n if obj is None:\n print(\"** no instance found **\")\n else:\n if len(args) == 2:\n print(\"** attribute name missing **\")\n elif len(args) == 3:\n print(\"** value missing **\")\n else:\n setattr(obj, args[2], str(args[3].replace('\"', '')))\n storage.save()",
"def updateStepLog(self, data: Dict) -> None:\n step_payload = {\n **data,\n **{\n \"step_end_ts\": str(datetime.datetime.now()),\n \"upsert_by\": \"DLoaderMS\",\n \"upsert_ts\": str(datetime.datetime.now()),\n },\n }\n UpdateQuery = \"\"\"\n UPDATE file_process_step_log\n SET step_status = '{step_status}',\n step_status_detail = '{step_status_detail}',\n step_end_ts = timestamp '{step_end_ts}',\n upsert_by = '{upsert_by}',\n upsert_ts = timestamp '{upsert_ts}'\n WHERE step_id = {step_id}\n \"\"\"\n cursor = self.engine.cursor()\n try:\n cursor.execute(UpdateQuery.format(**step_payload))\n except Exception as e:\n raise DLoaderException(\n \"Failed while inserting data into audit table {0}\".format(e)\n )\n finally:\n cursor.close()",
"def insert(self, sql):\n try:\n # Execute the SQL command\n self.cursor.execute(sql)\n # Commit your changes in the database\n self.db.commit()\n except:\n # Rollback in case there is any error\n self.db.rollback()"
] | [
"0.59201044",
"0.5902824",
"0.5817334",
"0.5641299",
"0.56094337",
"0.5608667",
"0.55930495",
"0.5567229",
"0.552885",
"0.55259585",
"0.552431",
"0.5485754",
"0.547772",
"0.5477715",
"0.5470331",
"0.546635",
"0.5462415",
"0.54596007",
"0.5446681",
"0.5443039",
"0.5435018",
"0.54299194",
"0.54234636",
"0.54234636",
"0.54142976",
"0.54086435",
"0.5381702",
"0.53646785",
"0.5357928",
"0.5349114"
] | 0.5996981 | 0 |
Query the table to determine if a file with the given name has already been ingested. | def is_file_ingested(self, original_name, tablename):
prep_stmt = self.session.prepare(
'SELECT * FROM {0} WHERE {1}=?'.format(tablename, COLUMNS_META[2])
)
bound = prep_stmt.bind([original_name])
results = self.session.execute(bound)
return True if len(results.current_rows) > 0 else False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_file(self, name):\n self.assertTrue(os.path.exists(name), \"Could not find table %s.\" % name)",
"def check_if_row_already_loaded(self, row, file_name):\n\t\tquery = \"SELECT count(*) FROM \" + TABLE_NAME + \" WHERE GLOBALEVENTID = \" + \"'\" + row[0] + \"'\"\n\n\t\ttry:\t\t\t\n\t\t\t# print query\n\t\t\tcursor = self.connection.cursor()\n\t\t\texecuted_cur = cursor.execute(query)\n\n\t\t\tif executed_cur:\t\t\t\n\t\t\t\tresult_cur = cursor.fetchall()\n\t\t\t\tfor row in result_cur:\n\t\t\t\t\tif int(row[0]) > 0:\n\t\t\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tprint \"[e] Something wrong with execution.\"\n\t\texcept Exception, e:\n\t\t\tprint '[e] Exeption: %s while processing \"%s\" file in method %s' % \\\n (str(e), DATA_DIRECTORY + '/' + file_name, \"check_if_row_already_loaded\")\n\t\t\tprint '\\t[q] Query that caused exception \\n %s' % (query)\n\n\n\t\treturn False",
"def has_file(self, name):\n return name in self.files",
"def table_exists(self, table_name):\n\t\tif self.__dbfile is None:\n\t\t\treturn False\n\n\t\tcursor = self._conn.cursor()\n\t\tcursor.execute(\"SELECT name FROM sqlite_master WHERE type='table' ORDER BY name\")\n\t\ttables = [t[0] for t in cursor.fetchall()]\n\t\treturn table_name in tables",
"def has_table(self, name: str) -> bool:\n try:\n self.execute(\"select * from {table} limit 1\", name)\n return True\n except sqlite3.OperationalError:\n return False",
"def exists(self, name):\n # django 判断文件名是否可用\n return False # 代表就是可用的新文件",
"def check_table_exists(self, table_name):\n query_success, query_resp = self.run_athena_query(\n query='SHOW TABLES LIKE \\'{}\\';'.format(table_name),\n database=self.DATABASE_STREAMALERT\n )\n\n if query_success and query_resp['ResultSet']['Rows']:\n return True\n\n LOGGER.info('The streamalert table \\'%s\\' does not exist. '\n 'For alert buckets, create it with the following command: \\n'\n '$ python stream_alert_cli.py athena create-table '\n '--type alerts --bucket s3.bucket.id',\n table_name)\n return False",
"def checkTable(self, in_table_name):\n phrase1 = \"SELECT count(*) FROM sqlite_master\"\n phrase2 = \"type='table' AND name='{}';\".format(in_table_name)\n self.cursor.execute(\"{} WHERE {}\".format(phrase1, phrase2))\n return self.cursor.fetchone()[0] == 1",
"def check_if_table_exists(self, table_name):\n cursor = self.conn.cursor()\n cursor.execute(\"SELECT EXISTS(SELECT * FROM information_schema.tables WHERE table_name=%s)\", (table_name,)), \n self.conn.commit()\n return cursor.fetchone()[0]",
"def ifAlreadyDone(self, cxRepo, schemaRepo, schema, tablename):\n logging.debug(f\"\"\"check if {schema}.{tablename} has been analyzed\"\"\")\n conn = self.connect(cxRepo)\n sql = f\"\"\"select table_name from {schemaRepo}.tablediff where lower\n (table_name) = lower('{tablename}') and schema1 = '{schema}' and\n server1_status = 'ready' and server1_status = 'ready' and result in\n ('ready', 'init')\"\"\"\n with conn:\n with conn.cursor() as curs:\n curs.execute(sql)\n row = curs.fetchone()\n if row is None:\n return 1\n else:\n return 0",
"def table_exist(self, name):\n sql = \"\"\"\n SELECT table_name\n FROM information_schema.tables\n WHERE table_schema = '{}'\n AND table_name = '{}'\"\"\".format(self.db, name)\n try:\n self.cursor.execute(sql)\n result = self.cursor.fetchone()\n if result:\n return True\n else:\n return False\n\n except MySQLdb.Error as e:\n self.connection.rollback()\n try:\n print(\"MySQL Error {}: {}\".format(e.args[0], e.args[1]))\n except IndexError:\n print(\"MySQL Error: {}\".format(str(e)))",
"def table_exists(self, table_name):\n\n if table_name is not None and table_name != '':\n query = \"SELECT count(*) found FROM information_schema.tables WHERE table_schema='public' AND table_type='BASE TABLE' and table_name=%s\"\n self._cursor.execute(query, [table_name])\n recs = self._cursor.fetchall()\n result = True if (recs[0]['found'] == 1) else False\n return result",
"def check_table_exists(curs, table_name):\n return curs.execute(\"\"\"SELECT COUNT(*)\n FROM sqlite_master\n WHERE type='table' AND name=?;\"\"\", (table_name,)).fetchone()[0] != 0",
"def does_table_exist(table_name):\n result = call_bq(\n ['show', table_name], project='khan-academy', raise_exception=False)\n\n if \"Not found: Table\" in result:\n return False\n else:\n return True",
"def FileExists(DSLModel, table, filename):\n fullpath=\"%s%s%s%s%s\" % (DSLModel['GENERAL']['target_folder'], os.sep,\n table['name'], os.sep, filename)\n return os.access(fullpath, os.F_OK)",
"def check_table(self, table_name):\n table_exists = False\n connected = False\n if not self.connected:\n self.connect()\n connected = True\n\n # Check if the database already exists\n if self.dbmi.__name__ == \"sqlite3\":\n\n self.cursor.execute(\"SELECT name FROM sqlite_master WHERE \"\n \"type='table' AND name='%s';\" % table_name)\n name = self.cursor.fetchone()\n if name and name[0] == table_name:\n table_exists = True\n else:\n # Check for raster_base table\n self.cursor.execute(\"SELECT EXISTS(SELECT * FROM information_schema.tables \"\n \"WHERE table_name=%s)\", ('%s' % table_name,))\n if self.cursor.fetchone()[0]:\n table_exists = True\n\n if connected:\n self.close()\n\n return table_exists",
"def check_file_existence(self, filename):\n try:\n for sample in TimeoutingSampler(\n config.GAHOOKS_TIMEOUT, 1, self.machine.fs.exists,\n \"/tmp/%s\" % filename\n ):\n if sample:\n return True\n except APITimeout:\n return False",
"def check_table(self, table_name: str) -> bool:\n try:\n if self.engine.dialect.has_table(self.engine.connect(), table_name):\n return self.get_input(table_name)\n return False\n except Exception as err:\n logger.error(\"check_table [error] -> %s\" % err)\n return False",
"def table_exists(self, table_name):\n return table_name in self.tables",
"def check_for_file(self):\n if self.task.file_name in os.listdir(self.task.file_storage):\n return True\n return False",
"def table_check(tablename, path):\n instance = arcno(path)\n tablelist = [i for i,j in instance.actual_list.items()]\n return True if tablename in tablelist else False",
"def has_file(self, name):\n return bool(self.input(name).__class__.__name__ == 'cgi_FieldStorage')",
"def file_exists(self, file_name):\n already_exists = False\n for file in os.listdir('saves'):\n if file.endswith('.json'):\n if file[:-5] == file_name:\n return True\n return False",
"def table_exists( table_name):\n query = \"select * from \" + table_name + \" limit 1\";\n cursor = connection.cursor()\n try:\n cursor.execute(query)\n except:\n return False\n return True",
"def exists(self, table, cursor):\n cursor.execute(f\"SELECT name FROM sqlite_master WHERE type='table' AND name='{table}'\")\n res = cursor.fetchone()\n return True if res else False",
"def table_exists(self, table_name):\n table_exists = self.query(\n sql.table_existence,\n (\n table_name,\n self._current_db,\n ),\n )\n return bool(table_exists)",
"def _table_exists(conn, table_name):\n # Query for the table.\n with conn:\n cur = conn.cursor()\n cur.execute(('SELECT name FROM sqlite_master'\n ' WHERE type=\"table\" AND name=\"%s\"') % table_name)\n return len(cur.fetchall()) == 1",
"def check_table_exist(conn, tablename: str):\n cur = conn.cursor()\n statement = f\"\"\"\n SELECT count(name)\n FROM sqlite_master\n WHERE type ='table' AND name = '{tablename}';\n \"\"\"\n cur.execute(statement)\n if cur.fetchone()[0] == 1:\n return True\n else:\n return False",
"def checkIfTableExists(self, table):\n\n self.__open()\n query = \"SHOW TABLES LIKE '{}'\".format(table)\n self.__cursor.execute(query)\n result = self.__cursor.fetchall()\n self.__close()\n if result:\n return True\n else:\n return False",
"def file_exists(session, ds_browser, ds_path, file_name):\n client_factory = session._get_vim().client.factory\n search_spec = vm_util.search_datastore_spec(client_factory, file_name)\n search_task = session._call_method(session._get_vim(),\n \"SearchDatastore_Task\",\n ds_browser,\n datastorePath=ds_path,\n searchSpec=search_spec)\n try:\n task_info = session._wait_for_task(search_task)\n except error_util.FileNotFoundException:\n return False\n\n file_exists = (getattr(task_info.result, 'file', False) and\n task_info.result.file[0].path == file_name)\n return file_exists"
] | [
"0.6910452",
"0.6595102",
"0.6392822",
"0.63660973",
"0.63485706",
"0.631363",
"0.62860954",
"0.6237048",
"0.6236886",
"0.6213956",
"0.61991674",
"0.6077491",
"0.6071262",
"0.6054262",
"0.60468805",
"0.60399336",
"0.5992716",
"0.5989239",
"0.59744495",
"0.59658456",
"0.59586686",
"0.59491616",
"0.594393",
"0.5933794",
"0.593296",
"0.59277064",
"0.59196395",
"0.5909241",
"0.59007823",
"0.58941233"
] | 0.7780518 | 0 |
Checks ResponseFuture objects stored in self.futures. Exceptions that occurred during async query executions will occur here. | def check_deferred_responses(self):
for future in self.futures:
results = future.result()
self.futures = [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __check_futures(cls, output, futures):\n for _ in range(len(futures)):\n future = futures.popleft()\n if future.done():\n try:\n port, status = future.result()\n output[port] = status\n except socket_error:\n pass\n else:\n futures.append(future)",
"def _check_result(self, fut, *data):\n return fut",
"def handle_not_done_requests(not_done_futures):\n pass",
"def _extract_completed_runs_from_futures(self) -> None:\n\n # In code check to make sure we don;t exceed resource allocation\n if len(self.futures) > sum(self.client.nthreads().values()):\n warnings.warn(\"More running jobs than resources available \"\n \"Should not have more futures/runs in remote workers \"\n \"than the number of workers. This could mean a worker \"\n \"crashed and was not able to be recovered by dask. \"\n )\n\n # A future is removed to the list of futures as an indication\n # that a worker is available to take in an extra job\n done_futures = [f for f in self.futures if f.done()]\n for future in done_futures:\n self.results.append(future.result())\n self.futures.remove(future)",
"def _check_job_completeness(self, jobs):\n for job in concurrent.futures.as_completed(jobs):\n if job.exception():\n raise (job.exception())",
"def _async_wait(self, **kwargs):\n # Reset first query ID. It will be found from the logs below.\n self.query_id = None\n r = super().execute(self.q.query, async_=True, **kwargs)\n\n # Thrift statuses, with human names.\n statuses = TOperationState._VALUES_TO_NAMES\n\n full_error = []\n # Start marker of the useful error message\n short_prefix = 'info=['\n short_error = None\n\n status = None\n full_status_interval = 5\n last_full_status = datetime.datetime.utcnow()\n while status in [\n None,\n TOperationState.INITIALIZED_STATE,\n TOperationState.PENDING_STATE,\n TOperationState.RUNNING_STATE\n ]:\n\n time.sleep(0.5)\n new_status = self.poll().operationState\n\n if new_status != status:\n if status is not None:\n # Keep the last line of status, which was written with \\r\n print()\n print(f\"Status change. Was {statuses[status] if status is not None else 'None'},\" +\n f\" is now {statuses[new_status]}.\")\n status = new_status\n\n logs = self.fetch_logs()\n for message in logs:\n # Need to extract the query ID to talk to yarn\n if self.query_id is None:\n # The id is displayed many times, in a few different formats.\n # It looks like the earliest and most repeated is eg.\n # (queryId=hive_20190628102710_bf894b75-f6d4-4da2-b0a5-2a2d44045711)\n m = re.search(r'\\(queryId=(?P<qid>hive.*?)\\)', message)\n if m:\n self.query_id = m.group('qid')\n\n # If ERROR begins a line, let's remember and all after the full error message (contains massive\n # useless stacktrace and all attempts).\n # Extract as well the one relevant human-friendly line.\n if message.strip().startswith('ERROR') or (full_error and not message.strip().startswith('INFO')):\n full_error.append(message)\n if short_prefix in message and not short_error:\n short_error = message.partition(short_prefix)[2]\n # Sometimes the error is only one line long, without error_prefix\n if not short_error and full_error:\n short_error = full_error[0]\n\n logging.debug(message)\n if last_full_status + datetime.timedelta(seconds=full_status_interval) < datetime.datetime.utcnow():\n last_full_status = datetime.datetime.utcnow()\n try:\n self._print_progress_info()\n except Exception as e:\n # Whatever happens, trying to display progress should never stop the actual query run.\n # Furthermore, some of those errors are transient (mostly at query start)\n print(\"Error fetching progress info (query is probably not actually started yet): \" + str(e),\n end='\\r')\n if full_error:\n self.q.full_error('\\n'.join(full_error))\n self.q.short_error(short_error)\n print(self.q.short_error())\n\n print(f\"Final status is {statuses[status]}.\")\n return r",
"def send_async_requests(self):\n\t\tif len(self._async_http_requests) <= 0:\n\t\t\treturn ()\n\n\t\tif self._session is None:\n\t\t\tself.start_new_session()\n\t\tsession = self._session\n\n\t\tresponses = [None] * len(self._async_http_requests)\n\t\t\":type : list\"\n\n\t\tfutures = []\n\t\tfor req, uri, host, auth, decode, ignored in self._async_http_requests:\n\t\t\tif host is None:\n\t\t\t\thost = self._host\n\t\t\t_log_http_request(req, uri, host, auth, self.log_full_request)\n\t\t\tf = self._async_executor.submit(session.send, req)\n\t\t\t# mini data-structure, Tuple[done_yet, future]\n\t\t\tfutures.append((False, f, decode, ignored))\n\t\tself._async_http_requests = []\n\n\t\t# now wait for them to complete\n\t\twhile len([x for x in futures if not x[0]]) > 0:\n\t\t\tnext_futures = []\n\t\t\tfor idx, f in enumerate(futures):\n\t\t\t\tdone_now = f[0]\n\t\t\t\tif not done_now:\n\t\t\t\t\tif f[1].done():\n\t\t\t\t\t\tr = f[1].result()\n\t\t\t\t\t\t_log_http_response(r, self.log_full_response)\n\t\t\t\t\t\tresponses[idx] = (r, f[2], f[3])\n\t\t\t\t\t\tdone_now = True\n\t\t\t\tnext_futures.append((done_now, f[1], f[2], f[3]))\n\t\t\tfutures = next_futures\n\t\t\ttime.sleep(0.01)\n\t\t# they are now done\n\n\t\t# we need to re-raise any exceptions that occur\n\t\tbad_responses = []\n\t\tfor idx, resp_items in enumerate(responses):\n\t\t\tresp, decode, ignored = resp_items\n\t\t\tif resp.status_code not in ignored:\n\t\t\t\ttry:\n\t\t\t\t\tresp.raise_for_status()\n\t\t\t\texcept requests.HTTPError as e:\n\t\t\t\t\t_log.exception(\"HTTPError in request #\" + str(idx) + \": \" + str(e))\n\t\t\t\t\tbad_responses.append(idx)\n\t\tif len(bad_responses) > 0:\n\t\t\tself._async_transforms = []\n\t\t\traise AsyncHTTPError(bad_responses)\n\n\t\t# finally, call the transform function on each one\n\t\ttransformed = []\n\t\tfor r_items, xform in zip(responses, self._async_transforms):\n\t\t\tr, decode, ignored = r_items\n\t\t\tdata = None\n\t\t\tif r.content is not None:\n\t\t\t\tif decode == 'text':\n\t\t\t\t\tdata = r.text\n\t\t\t\telif decode == 'json':\n\t\t\t\t\tdata = r.json(parse_float=decimal.Decimal)\n\t\t\t\telif decode == 'binary':\n\t\t\t\t\tdata = r.content\n\t\t\t\telse:\n\t\t\t\t\traise ValueError(\"Bad response_payload encoding: \" + decode)\n\t\t\t\tdata = xform(data)\n\t\t\ttransformed.append(data)\n\t\tself._async_transforms = []\n\t\treturn transformed",
"def test_check_for_errors(self):\n downloader = _MultithreadedDownloader(mock.Mock(), mock.Mock(), 5)\n\n request = mock.Mock()\n exception = ValueError(\"failed\")\n\n successful_future = mock.Mock(exception=mock.Mock(return_value=None))\n failed_future = mock.Mock(exception=mock.Mock(return_value=exception))\n completed_futures = (\n ([successful_future] * 2) + [failed_future] + [successful_future]\n )\n\n with pytest.raises(exception.__class__):\n downloader._check_for_errors(request, completed_futures)",
"def test_async_call(self):\n actors = [Actor.remote(i) for i in range(4)]\n manager = FaultTolerantActorManager(actors=actors)\n\n results = []\n for _ in range(10):\n manager.foreach_actor_async(lambda w: w.call())\n results.extend(manager.fetch_ready_async_reqs(timeout_seconds=None))\n # Wait for actors to recover.\n wait_for_restore()\n\n # Note that we can hardcode the numbers here because of the deterministic\n # lists of random numbers we use.\n # 7 calls succeeded, 4 failed.\n # The number of results back is much lower than 40, because we do not probe\n # the actors with this test. As soon as an actor errors out, it will get\n # taken out of the lineup forever.\n self.assertEqual(len([r for r in results if r.ok]), 7)\n self.assertEqual(len([r for r in results if not r.ok]), 4)\n\n manager.clear()",
"def test_check_for_errors__no_errors(self):\n downloader = _MultithreadedDownloader(mock.Mock(), mock.Mock(), 5)\n\n request = mock.Mock()\n completed_futures = [mock.Mock(exception=mock.Mock(return_value=None))] * 3\n\n # does not raise error\n downloader._check_for_errors(request, completed_futures)",
"def _process_fetch_failure(self):\n logger.info('DataFetcher: No valid result is received')\n if len(self.urls_processed) == len(self.urls):\n raise NoDataReceivedFromCaster()\n for _, error_code, error_text in self._curls_failed:\n if error_code == PYCURL_TIMEOUT_ERRNO:\n raise ExceededTimeoutError(error_text)\n if self._curls_failed:\n _, _, error_text = self._curls_failed[0]\n raise UnableToConnect(error_text)\n raise NoDataReceivedFromCaster()",
"def test_multipleConcurrentFailure(self):\n resolver = client.Resolver(servers=[('example.com', 53)])\n resolver.protocol = StubDNSDatagramProtocol()\n queries = resolver.protocol.queries\n\n query = dns.Query('foo.example.com', dns.A)\n firstResult = resolver.query(query)\n secondResult = resolver.query(query)\n\n class ExpectedException(Exception):\n pass\n\n queries.pop()[-1].errback(failure.Failure(ExpectedException()))\n\n return defer.gatherResults([\n self.assertFailure(firstResult, ExpectedException),\n self.assertFailure(secondResult, ExpectedException)])",
"def check_done(self):\n if len(self._calls) != 0:\n raise MockException(\"Still expecting more function calls\")",
"def _done_handler(base_future):\n if not base_future.done():\n # this should never ever be true.\n # having this code here just to avoid infinite timeout\n self.cancel()\n return\n\n if base_future.cancelled():\n self.cancel()\n return\n\n try:\n result = base_future.result()\n if isinstance(result, Future):\n self._chain_to_another_future(result)\n else:\n self.set_result(result)\n return\n except BaseException:\n # note, that exception may come from self.result()\n # and from on_fulfilled(result) calls.\n ex, trace_back = sys.exc_info()[1:]\n self.set_exception_info(ex, trace_back)\n return",
"def has_finished(self):\n return hasattr(self, '_result') or hasattr(self, '_result_exc')",
"def __CheckResponse(self, response):\n\n status = response.status\n if (status == httplib.OK or status == httplib.CREATED\n or status == httplib.NO_CONTENT):\n return\n elif (status == httplib.UNAUTHORIZED):\n raise BadCredentialsException\n elif (status == httplib.SERVICE_UNAVAILABLE):\n raise ServerBusyException\n elif (status == httplib.BAD_REQUEST\n or status == httplib.UNPROCESSABLE_ENTITY):\n raise BadArgumentsException\n elif (status == httplib.NOT_FOUND):\n raise NotFoundException\n else:\n raise BadOperationException",
"def _callbackChooser(self, future):\n assert(self.done())\n try:\n self._resultFuture.result()\n except TimeoutError:\n for c in self._callbackTimeout:\n c()\n except CancelledError:\n for c in self._callbackCancelled:\n c()\n if self._callbackSuccess:\n for c in self._callbackSuccess:\n c()",
"def _wait_for_futures(self, futures_to_cb):\n for f in futures.as_completed(futures_to_cb):\n # Call the associated callback for this future passing the future itself as the only argument.\n # Other required arguments to the callback should already partially applied to the callback\n # at this point.\n futures_to_cb[f](f)",
"def _wait_for_results(self) -> RemoteCallableResult:\n if (\n self.subscriber is None or\n self.started is None or\n self.process is None\n ):\n raise dbt.exceptions.InternalException(\n '_wait_for_results() called before handle()'\n )\n\n try:\n msg = self.subscriber.dispatch_until_exit(\n started=self.started,\n timeout=self.timeout,\n )\n except dbt.exceptions.Exception as exc:\n raise dbt_error(exc)\n except Exception as exc:\n raise server_error(exc)\n if isinstance(msg, QueueErrorMessage):\n raise RPCException.from_error(msg.error)\n elif isinstance(msg, QueueTimeoutMessage):\n if not self._single_threaded:\n self.process.terminate()\n raise timeout_error(self.timeout)\n elif isinstance(msg, QueueResultMessage):\n return msg.result\n else:\n raise dbt.exceptions.InternalException(\n 'Invalid message type {} (result={})'.format(msg)\n )",
"def _process_results(self, *args, **kwargs): # noqa: E501\n # Lock before processing results to prevent conflicts\n if not self._acquire_pr_lock():\n return\n\n # Get the future instance\n future = self.future\n\n # Skip if no Future\n if not future:\n return\n\n # Skip processing results if forget\n if self.forget:\n # Clean up client\n self.client.close()\n return\n\n try:\n # Get results using the client\n result = self.client.gather(future)\n except Exception as e:\n # Tell scheduler to stop sending updates about this key\n self.client.set_metadata(self.key, False)\n # Clean up client\n self.client.close()\n result = e\n log.warning(\n 'Exception encountered when retrieving results: \"{}\"'.format(str(e))\n )\n\n # Tell scheduler to stop sending updates about this key\n self.client.set_metadata(self.key, False)\n\n # Handle custom process results function\n if self.process_results_function:\n # Get the process_results_function in TethysJob and call it with the result retrived\n try:\n result = self.process_results_function(result)\n except Exception as e:\n log.exception(\"Process Results Function Error\")\n self._status = \"ERR\"\n result = str(e)\n\n # Serialize the result\n try:\n self.result = result\n except Exception:\n log.exception(\"Results Serialization Error\")\n self._status = \"ERR\"\n else:\n self._status = \"COM\" if self._status != \"ERR\" else \"ERR\"\n\n # Erase the key to avoid problem with dask recycle key\n self.key = \"\"\n\n # save the results or status in the database\n self.save()\n\n # Clean up client\n self.client.close()\n\n if client_fire_forget:\n client_fire_forget.close()\n\n self._release_pr_lock()",
"def check_response_errors(self, resp):\n return True",
"def _wait_for_futures(\n self, futs: List[futures.Future], name: str = \"futures\", timeout: Optional[float] = None\n ) -> bool:\n waited = futures.wait(futs, timeout=timeout)\n value = True\n\n # Log futures still running after timeout\n if waited.not_done:\n LOG.info(\n \"Waiting for %s timed out before completion [Experiment ID: %s].\",\n name,\n self.experiment_id,\n )\n value = False\n\n # Check for futures that were cancelled or errored\n excepts = \"\"\n for fut in waited.done:\n ex = fut.exception()\n if ex:\n excepts += \"\\n\".join(traceback.format_exception(type(ex), ex, ex.__traceback__))\n value = False\n elif fut.cancelled():\n LOG.debug(\n \"%s was cancelled before completion [Experiment ID: %s]\",\n name,\n self.experiment_id,\n )\n value = False\n elif not fut.result()[1]:\n # The job/analysis did not succeed, and the failure reflects in the second\n # returned value of _add_job_data/_run_analysis_callback. See details in Issue #866.\n value = False\n if excepts:\n LOG.error(\n \"%s raised exceptions [Experiment ID: %s]:%s\", name, self.experiment_id, excepts\n )\n\n return value",
"def _result_already_returned(self):\n return self.deferred.called",
"async def _finish_async_handler(self, future, event):\n try:\n await future\n except Exception:\n self._handle_exception(future=future, csbot_event=event)",
"async def check_retrieve(self) -> None:\n async with self.lock:\n for upcoming_event in self.upcoming_events.values():\n if not isinstance(upcoming_event, RecurringEvent):\n continue\n\n if not upcoming_event.time_to_notify():\n continue\n\n if isinstance(upcoming_event, RecurringEvent):\n try:\n await upcoming_event.retrieve_content()\n except NoMoreItems:\n continue",
"def test_later_failure_result(self):\n d = Deferred()\n dr = EventualResult(d, None)\n result_list = []\n done = append_in_thread(result_list, dr.wait, 100)\n time.sleep(0.1)\n d.errback(RuntimeError())\n done.wait(100)\n self.assertEqual(\n (result_list[0], result_list[1].__class__), (False, RuntimeError))",
"def test_lookup_added_callbacks_work_when_cancelled(self):\n # Reset event_loop so we start in a clean state.\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n self.event_loop = asyncio.get_event_loop()\n lookup = Lookup(FindValue, self.target, self.node, self.event_loop)\n lookup._handle_response = mock.MagicMock()\n lookup._cancel_pending_requests()\n for k, v in lookup.pending_requests.items():\n v.set_result('foo')\n self.event_loop.run_until_complete(v)\n self.assertEqual(lookup._handle_response.call_count, 0)",
"async def test_error_in_callback(self):\n validation_future = self.loop.create_future()\n\n async def test_handler(request: bytes, context: aio.ServicerContext):\n self.assertEqual(_REQUEST, request)\n\n def exception_raiser(unused_context):\n raise RuntimeError(\"A test RuntimeError\")\n\n context.add_done_callback(exception_raiser)\n validation_future.set_result(inject_callbacks(context))\n return _RESPONSE\n\n await self._register_method_handler(\n grpc.unary_unary_rpc_method_handler(test_handler)\n )\n\n response = await self._channel.unary_unary(_TEST_METHOD)(_REQUEST)\n self.assertEqual(_RESPONSE, response)\n\n # Following callbacks won't be invoked, if one of the callback crashed.\n validation = await validation_future\n with self.assertRaises(asyncio.TimeoutError):\n await validation\n\n # Invoke RPC one more time to ensure the toxic callback won't break the\n # server.\n with self.assertRaises(aio.AioRpcError) as exception_context:\n await self._channel.unary_unary(_FAKE_METHOD)(_REQUEST)\n rpc_error = exception_context.exception\n self.assertEqual(grpc.StatusCode.UNIMPLEMENTED, rpc_error.code())",
"async def execute(self):\n raise Exception()",
"def test_check_complete(self):\n self.cmd._update_ralph()\n self.cmd._cleanup()\n\n # Objects add/modification\n for flavor_id, flavor in OPENSTACK_FLAVOR.items():\n ralph_flavor = CloudFlavor.objects.get(flavor_id=flavor_id)\n self.assertEqual(ralph_flavor.name, flavor['name'])\n self.assertEqual(ralph_flavor.cloudprovider, self.cloud_provider)\n\n for project_id, project in OPENSTACK_DATA.items():\n ralph_project = CloudProject.objects.get(project_id=project_id)\n self.assertEqual(project['name'], ralph_project.name)\n self.assertEqual(self.cloud_provider, ralph_project.cloudprovider)\n for host_id, host in OPENSTACK_DATA[project_id]['servers'].items():\n ralph_host = CloudHost.objects.get(host_id=host_id)\n ips = ralph_host.ip_addresses\n self.assertEqual(ralph_host.hostname, host['hostname'])\n self.assertIn(host['tag'], ralph_host.tags.names())\n self.assertEqual(set(host['ips']), set(ips))\n\n # projects removal\n for project_id in ['project_id2', 'project_id3']:\n self.assertRaises(\n ObjectDoesNotExist,\n CloudProject.objects.get,\n project_id=project_id,\n )\n self.assertRaises(\n ObjectDoesNotExist,\n CloudFlavor.objects.get,\n flavor_id='flavor_id2',\n )\n self.assertRaises(\n ObjectDoesNotExist,\n CloudHost.objects.get,\n host_id='host_id1',\n )"
] | [
"0.63616014",
"0.62636966",
"0.62554723",
"0.58126837",
"0.5771574",
"0.57408834",
"0.5710376",
"0.5639147",
"0.5591878",
"0.55765843",
"0.55355513",
"0.55083793",
"0.5503552",
"0.55019253",
"0.5415703",
"0.5412277",
"0.5355434",
"0.535387",
"0.53446335",
"0.5322889",
"0.5311772",
"0.5282767",
"0.5254316",
"0.5227553",
"0.51883745",
"0.5168478",
"0.51627016",
"0.5160985",
"0.5159164",
"0.5113033"
] | 0.7792941 | 0 |
Download a file from the SweetTea API If the destination path has no file extension, it is assumed to be a directory. | def download(self, api_route, dest_path, payload=None, file_type_header=None, extract_archives=True):
resp = None
payload = payload or {}
file_type_header = file_type_header or self.default_file_type_header
try:
# Fetch the file from the API.
resp = self.api.get(api_route, payload=payload, stream=True)
except KeyboardInterrupt:
exit()
# Extract further info about the downloaded file.
file_ext, file_is_archive = self._analyze_response_file(resp, file_type_header)
# Calculate the final path to save downloaded file at.
save_to, extract_to = self._calc_final_dest(
dest_path,
file_ext,
file_is_archive,
extract_archives,
payload.get('name') or self.default_name
)
# Stream file to save path.
self._stream_to_path(save_to, resp.response_obj, int(resp.headers.get('Content-Length')))
# If no archive extraction needed, just return the path at which the file was saved.
if not extract_to:
return save_to
# Extract archive to final destination.
extract_zip(save_to, extract_to)
return extract_to | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def download(self, url, destination):\n fileDownloader = utils.HttpFileDownloader(url, destination)\n fileDownloader.download()",
"def filedownload(source, destination):\n\n # Initiate the download\n urllib.request.urlretrieve(source, destination)",
"def download_file(self, source, dest=None):\n if dest is None or self.download_root not in dest:\n dest = self.download_root + source\n\n # dest is a directory if ending with '/' or '.', otherwise it's a file\n if dest.endswith(\".\"):\n dest += \"/\"\n\n blob_dest = dest\n if dest.endswith(\"/\"):\n blob_dest = dest + os.path.basename(source)\n\n print(f\" Downloading {source} to {blob_dest}\")\n os.makedirs(os.path.dirname(blob_dest), exist_ok=True)\n bc = self.client.get_blob_client(blob=source)\n with open(blob_dest, \"wb\") as file:\n data = bc.download_blob()\n file.write(data.readall())\n return blob_dest",
"def download(self, url: str, dest: PathLike, force: bool = False):",
"def download_file(src_url, dst_path):\n logger.info(f'Downloading file from: {src_url}')\n with src_url.open(mode='r') as in_file:\n with open(dst_path, 'wb') as out_file:\n out_file.write(in_file.read())\n logger.info(f'Downloaded file path on disk: {dst_path}')\n return dst_path",
"def _download_from_url(self, url):\n target_file_name = self.dir + \"/\" + url.split('/')[-1].split('?')[0]\n urllib.urlretrieve (url, target_file_name)",
"def _download_file(file_url: str, file_path: str) -> str:\n if os.path.exists(file_path):\n return file_path\n op_desc = f\"Downloading {os.path.basename(file_path)}\"\n try:\n with requests.Session() as req_sess:\n req_res = req_sess.get(file_url, stream=True)\n total_length = int(req_res.headers.get(\"Content-Length\"))\n with tqdm.wrapattr(req_res.raw, \"read\", total=total_length, desc=op_desc) as raw:\n with open(file_path , \"wb\") as file:\n shutil.copyfileobj(raw,file)\n return file_path\n except Exception as network_error:\n if os.path.exists(file_path):\n os.remove(file_path)\n raise network_error",
"def download(url, path):\n response = requests.get(url)\n\n if response.ok:\n print(\"response is ok file is downloading ... \")\n # start to download file from url.\n with open(path, \"wb\") as f:\n f.write(response.content)\n else:\n print(\"Error!\", response.status_code)\n return False\n\n print(\"File downloaded succusfully.\")\n return True",
"def download_file(dwn_url, dwn_folder):\n # Prepare path\n _, dwn_fil = split(dwn_url)\n dwn_dir = join(dwn_folder, dwn_fil)\n\n # download_tile = requests.get(dwn_url)\n open(dwn_dir, 'wb').write(requests.get(dwn_url).content)\n\n # Message for successful download\n status_msg = dwn_fil + ' succsesfully downloaded'\n\n return status_msg, dwn_fil",
"def download(filename, work_directory, source_url, overwrite=False):\n\n if not gfile.Exists(work_directory):\n gfile.MakeDirs(work_directory)\n\n filepath = os.path.join(work_directory, filename)\n\n if overwrite or not gfile.Exists(filepath):\n _filename, _ = urlretrieve_with_retry(source_url + filename)\n #print('_filename:', _filename)\n gfile.Copy(_filename, filepath, overwrite=overwrite)\n with gfile.GFile(filepath) as f:\n size = f.size()\n print('Successfully downloaded', filename, size, 'bytes.')\n\n return filepath",
"def download_url(url, destination_filename=None, progress_updater=None, \n force_download=False, verbose=True):\n \n if progress_updater is not None and isinstance(progress_updater,bool):\n if not progress_updater:\n progress_updater = None\n else:\n progress_updater = DownloadProgressBar()\n \n url_no_sas = url.split('?')[0]\n \n if destination_filename is None:\n target_folder = get_temp_folder()\n url_without_sas = url.split('?', 1)[0]\n \n # This does not guarantee uniqueness, hence \"semi-best-effort\"\n url_as_filename = re.sub(r'\\W+', '', url_without_sas)\n n_folder_chars = len(ai4e_utils_temp_dir)\n if len(url_as_filename) + n_folder_chars > max_path_len:\n print('Warning: truncating filename target to {} characters'.format(max_path_len))\n url_as_filename = url_as_filename[-1*(max_path_len-n_folder_chars):]\n destination_filename = \\\n os.path.join(target_folder,url_as_filename)\n \n if (not force_download) and (os.path.isfile(destination_filename)):\n if verbose:\n print('Bypassing download of already-downloaded file {}'.format(os.path.basename(url_no_sas)))\n else:\n if verbose:\n print('Downloading file {} to {}'.format(os.path.basename(url_no_sas),destination_filename),end='')\n urllib.request.urlretrieve(url, destination_filename, progress_updater) \n assert(os.path.isfile(destination_filename))\n nBytes = os.path.getsize(destination_filename)\n if verbose:\n print('...done, {} bytes.'.format(nBytes))\n \n return destination_filename",
"def download_file(dwn_url, dwn_folder):\n download_tile = requests.get(dwn_url)\n\n # Save the content as file\n _, dwn_fil = split(dwn_url)\n dwn_dir = join(dwn_folder, dwn_fil)\n open(dwn_dir, \"wb\").write(download_tile.content)\n # Message for successful download\n status_msg = f\"{dwn_fil} succsesfully downloaded\"\n\n return status_msg, dwn_fil",
"def download_file_from_url(url, PATH, file_name):\n with requests.get(url) as r:\n with open(PATH+'/'+file_name, 'wb') as f:\n f.write(r.content)",
"def download_file(self, source_file_name, destination_file_name, **keyword_args):\n blob = self.bucket.blob(source_file_name)\n blob.download_to_filename(destination_file_name, **keyword_args)\n print(f\"Download file {source_file_name} and save as {destination_file_name}\")",
"def download(path):\n return send_from_directory(UPLOAD_DIRECTORY, path, as_attachment=True)",
"def download(path):\n return send_from_directory(UPLOAD_DIRECTORY, path, as_attachment=True)",
"def download(url, to):\n filename = url.rstrip('/').split('/')[-1] + '.zip'\n r = requests.get(url, stream=True)\n\n outpath = os.path.join(to, filename)\n\n with open(outpath, 'wb') as fd:\n for chunk in r.iter_content(1024 * 1024):\n fd.write(chunk)\n\n return outpath",
"def download_file(url, target_path):\n\n r = requests.get(url, stream=True)\n\n with open(target_path, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)",
"def download_file(url, download_path):\n\n # Extract the filename from the URL\n parsed = urlparse(url)\n filename = basename(parsed.path)\n\n # Ensure the output directory exists\n if not os.path.exists(download_path):\n os.makedirs(download_path)\n\n # Get a temporary file path for the compressed file download\n downloaded_file = os.path.join(tempfile.gettempdir(), filename)\n\n # Download the file\n urlretrieve(url, downloaded_file)\n\n # Move the file to the destination folder\n destination_path = os.path.join(download_path, filename)\n os.rename(downloaded_file, destination_path)",
"def __download_file(file_path, url, extension=''):\r\n auth = (DaemonServer._user['_email'], DaemonServer._user['_token'])\r\n res = requests.get(DaemonServer._base_url + url, auth=auth, stream=True)\r\n with open(file_path + extension, 'wb') as dfile:\r\n for chunk in res.iter_content(chunk_size=1024):\r\n if chunk:\r\n dfile.write(chunk)",
"def download_file(url, destination_dir='./', desc=None, force=False):\n # Convert path to pathlib object if not already\n destination_dir = Path(destination_dir)\n # Get filename from url\n fname = url.split('/')[-1]\n # Construct path to file in local machine\n local_filepath = Path(destination_dir) / fname\n\n if local_filepath.is_file() and not force:\n logger.info(\n \"File(s) already downloaded. Use force=True to download again.\")\n return local_filepath\n else:\n # Safely create nested directory - https://stackoverflow.com/a/273227\n destination_dir.mkdir(parents=True, exist_ok=True)\n\n if desc is None:\n desc = f\"Downloading {fname}\"\n\n # Download large file with requests - https://stackoverflow.com/a/16696317\n with requests.get(url, stream=True) as r:\n r.raise_for_status()\n total_size_in_bytes = int(r.headers.get('content-length', 0))\n block_size = 1024\n # Progress bar for downloading file - https://stackoverflow.com/a/37573701\n pbar = tqdm(total=total_size_in_bytes,\n unit='iB',\n unit_scale=True,\n desc=desc)\n with open(local_filepath, 'wb') as f:\n for data in r.iter_content(block_size):\n pbar.update(len(data))\n f.write(data)\n pbar.close()\n\n # TODO Add SHA256 or MD5 comparison\n\n return local_filepath",
"def download(path):\n\treturn send_from_directory(\"results\", path, as_attachment=True)",
"def download_file(self, url, filename):\n r = requests.get(url, stream=True)\n r.raise_for_status()\n\n with open(filename, 'wb') as f:\n for chunk in r.iter_content():\n if chunk:\n f.write(chunk)\n f.flush()",
"def __download_file(self, filename):\r\n \r\n respons = requests.get(self.__url + filename, stream=True)\r\n save_filename = os.path.join(self.__folder, os.path.basename(filename))\r\n with open(save_filename, 'wb') as output_file:\r\n for chunk in respons.iter_content(chunk_size=128):\r\n output_file.write(chunk)",
"def download_url(url, path=None, name=None):\n r = requests.get(url, allow_redirects=True)\n if path:\n paths = []\n paths.append(path)\n make_dir_from_list(paths)\n open(os.path.join(paths[0], name), 'wb').write(r.content)\n return r.content.decode('utf-8')",
"def download(self, download_path):\n return",
"def download_file(url, local_path):\n try:\n local_filename = normalizeFilenameToCommonDateFormat(url.split('/')[-1])\n \n destination_dir = local_path #os.path.join(local_path, os.path.splitext(os.path.basename(local_filename))[0])\n \n #if not os.path.exists(destination_dir):\n # os.makedirs(destination_dir)\n \n destination_file = os.path.join(destination_dir, local_filename)\n \n if not os.path.exists(destination_file):\n # NOTE the stream=True parameter \n r = requests.get(url, stream=True)\n with open(destination_file, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024): \n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n #f.flush() commented by recommendation from J.F.Sebastian\n # Sleep so that we aren't rude\n sleep(1)\n else:\n return destination_file + ' already '\n \n return destination_file\n except ValueError as err:\n return \"Skipping %s, not \" % (url.split('/')[-1])",
"def http_download(url, target_path):\n try:\n resp = urllib2.urlopen(url)\n except urllib2.URLError, e:\n if not hasattr(e, 'code'):\n raise\n resp = e\n if resp.code != 200:\n raise IOError(\"Request url(%s) expect 200 but got %d\" %(url, resp.code))\n\n with open(target_path, 'wb') as f:\n shutil.copyfileobj(resp, f)\n return target_path",
"def download():\n try:\n cli.run(\n [URL, '--output', TEMP_DIR],\n )\n except SystemExit:\n return None",
"def download_file(download_url, save_path):\n url = \"https://www.encodeproject.org/\" + download_url\n urllib.request.urlretrieve(url, save_path)"
] | [
"0.74462014",
"0.717291",
"0.70779574",
"0.707318",
"0.70136386",
"0.68301034",
"0.68121547",
"0.6757707",
"0.67238814",
"0.6722531",
"0.6690708",
"0.6689046",
"0.66746974",
"0.666172",
"0.66608715",
"0.66608715",
"0.6646723",
"0.6643791",
"0.6632525",
"0.66285616",
"0.6610641",
"0.6602498",
"0.66001177",
"0.6591039",
"0.6589773",
"0.65710753",
"0.6564026",
"0.65464115",
"0.6531154",
"0.65286505"
] | 0.7325945 | 1 |
This is used for count of total record of product queue line. | def _compute_queue_line_record(self):
for product_queue in self:
queue_lines = product_queue.product_data_queue_lines
product_queue.queue_line_total_records = len(queue_lines)
product_queue.queue_line_draft_records = len(
queue_lines.filtered(lambda x:x.state == 'draft'))
product_queue.queue_line_fail_records = len(
queue_lines.filtered(lambda x:x.state == 'failed'))
product_queue.queue_line_done_records = len(
queue_lines.filtered(lambda x:x.state == 'done'))
product_queue.queue_line_cancel_records = len(
queue_lines.filtered(lambda x:x.state == 'cancel')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def num_items(self):\n num_items = 0\n for line in self.lines.all():\n num_items += line.quantity\n return num_items",
"def _calc_line_quantity(self, cr, uid, line, context=None):\n return line.product_qty",
"def _numQueuedTotal(self):\n queueSize = len(self.__queue) + len(self.__clientQueue)\n return queueSize",
"def __len__(self):\n return sum(item['qty'] for item in self.cart.values())",
"def __len__(self):\n return sum(item['qty'] for item in self.basket.values()) # counts all the values of the key qty",
"def __len__(self):\n return sum(item['quantity'] for item in self.cart.values())",
"def __len__(self):\n \n return sum(item['quantity'] for item in self.cart.values())",
"def count(self):\n with self.pdq:\n (count,)=self.pdq.cursor().execute('select count(*) from pdq').next()\n return count",
"def __len__(self):\n return sum(item[\"quantity\"] for item in self.carro.values())",
"def count(cls, resq):\n first = MultipleBackend.classes[0]\n return first.count(resq)",
"def GetTotalQueueCount(handler, query):\n # pylint: disable=unused-argument\n\n json_config = {}\n json_config['count'] = 0\n\n with active_tivos_lock:\n for tivoIP in active_tivos:\n with active_tivos[tivoIP]['lock']:\n json_config['count'] += len(active_tivos[tivoIP]['queue'])\n\n handler.send_json(json.dumps(json_config))",
"def __len__(self):\n return self.total",
"def _compute_log_count(self):\n log_line_obj = self.env['common.log.lines.ept']\n model_id = log_line_obj.get_model_id('amazon.vcs.tax.report.ept')\n records = log_line_obj.search_read([('model_id', '=', model_id), ('res_id', '=', self.id)],\n [\"id\"])\n self.log_count = len(records)",
"def product_count(self) -> int:\n return self._product_count",
"def total_qty(self):\n return sum(self.quantities)",
"def get_total_line_counts(self):\n return get_total_line_counts(self.files.all())",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def count_items(self):\n count = 0\n for o in self.order_lst:\n count += o.count()\n \n return count",
"def count(self):\n\n raise NotImplementedError",
"def _grand_total(self):\n count = 0\n for product in self.products:\n count += product.price\n return count",
"def get_product_count(self):\n return self.products.count()",
"def qty(self):\n return self._qty",
"def qty(self):\n return self._qty",
"def qty(self):\n return self._qty",
"def question_count_total(self, obj):\n return obj.questions.count()",
"def __len__(self):\n return int(self.total)",
"def total_record_count(self) -> int:\n return pulumi.get(self, \"total_record_count\")"
] | [
"0.6728332",
"0.6686901",
"0.6675879",
"0.661672",
"0.6524404",
"0.65208846",
"0.6467402",
"0.6437851",
"0.628259",
"0.6237059",
"0.6163172",
"0.6153888",
"0.6095286",
"0.6084754",
"0.60840166",
"0.6058307",
"0.60508925",
"0.60508925",
"0.60508925",
"0.60508925",
"0.6024711",
"0.6016144",
"0.5980995",
"0.5973837",
"0.5970982",
"0.5970982",
"0.5970982",
"0.5957274",
"0.5957223",
"0.59539914"
] | 0.74769735 | 0 |
Computes state from different states of queue lines. | def _compute_queue_state(self):
for record in self:
if record.queue_line_total_records == record.queue_line_done_records + record.queue_line_cancel_records:
record.state = "completed"
elif record.queue_line_draft_records == record.queue_line_total_records:
record.state = "draft"
elif record.queue_line_total_records == record.queue_line_fail_records:
record.state = "failed"
else:
record.state = "partially_completed" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_state(self):\n\t\tif self.state_type == 'Queues':\n\t\t\t#self.queue_state =\\\n\t\t\t#[0. if movement.AttValue('QLen(Current, Last)') is None else movement.AttValue('QLen(Current, Last)') for movement in self.lanes_movement]\n\n\t\t\tself.queue_state =\\\n\t\t\t[0. if queue.AttValue('QLen(Current, Last)') is None else queue.AttValue('QLen(Current, Last)') for queue in self.queues_counters]\n\n\t\t\tstate = np.array(self.queue_state)[np.newaxis,:]\n\n\t\tif self.state_type == \"QueuesSig\":\n\n\t\t\tself.queue_state =\\\n\t\t\t[0. if queue.AttValue('QLen(Current, Last)') is None else queue.AttValue('QLen(Current, Last)') for queue in self.queues_counters]\n\n\t\t\tstate = np.array(self.queue_state+[self.next_action_key])[np.newaxis,:]\n\t\n\t\treturn(state)",
"def state_prepare(q: List[QRegStorage], i: int):\n\n RX(0.1)(q[i])\n RZ(0.4)(q[i + 1])\n CX(q[i], q[i + 1])\n RY(0.8)(q[i])\n RZ(1.2)(q[i])",
"def update_state(self):\n\n # Start off assuming no space in the queues and no pointer to a\n # shortest queue.\n self.min_queue = None\n self.has_space_in_a_server_queue = False\n self.queue_size = 0\n self.online_server_count = 0\n\n # Loop through all the servers.\n for server in self.server_list:\n\n # If server is online....\n if server.online is True:\n\n # Increment count of online servers\n self.online_server_count += 1\n\n # If any server has space...\n if len(server.queue) < server.max_queue_size:\n\n # 'Has Space' is True and remains true.\n if self.has_space_in_a_server_queue is False:\n self.has_space_in_a_server_queue = True\n\n # First non-full server we come to.\n if self.min_queue is None:\n self.min_queue = server\n\n # If we already had a non-full queue in hand,\n # compare it to the present one.\n elif len(server.queue) < len(self.min_queue.queue):\n self.min_queue = server\n\n # Increment the count of the parallel server block.\n self.queue_size += len(server.queue)",
"def _compute_state(self):\n # TODO: duplicated code with stock_picking.py\n for production in self:\n if not production.move_raw_ids:\n production.state = 'draft'\n elif all(move.state == 'draft' for move in production.move_raw_ids):\n production.state = 'draft'\n elif all(move.state == 'cancel' for move in production.move_raw_ids):\n production.state = 'cancel'\n elif all(move.state in ('cancel', 'done') for move in production.move_raw_ids):\n production.state = 'done'\n elif production.workorder_ids and all(wo_state in ('done', 'cancel') for wo_state in production.workorder_ids.mapped('state')):\n production.state = 'to_close'\n elif not production.workorder_ids and production.qty_producing >= production.product_qty:\n production.state = 'to_close'\n elif any(wo_state in ('progress', 'done') for wo_state in production.workorder_ids.mapped('state')):\n production.state = 'progress'\n elif not float_is_zero(production.qty_producing, precision_rounding=production.product_uom_id.rounding):\n production.state = 'progress'\n elif any(not float_is_zero(move.quantity_done, precision_rounding=move.product_uom.rounding or move.product_id.uom_id.rounding) for move in production.move_raw_ids):\n production.state = 'progress'\n else:\n production.state = 'confirmed'\n\n # Compute reservation state\n # State where the reservation does not matter.\n production.reservation_state = False\n # Compute reservation state according to its component's moves.\n if production.state not in ('draft', 'done', 'cancel'):\n relevant_move_state = production.move_raw_ids._get_relevant_state_among_moves()\n if relevant_move_state == 'partially_available':\n if production.bom_id.operation_ids and production.bom_id.ready_to_produce == 'asap':\n production.reservation_state = production._get_ready_to_produce_state()\n else:\n production.reservation_state = 'confirmed'\n elif relevant_move_state != 'draft':\n production.reservation_state = relevant_move_state",
"def _getState(self, board):\r\n mySide = board.mySide(self.id)\r\n oppSide = board.oppSide(self.id)\r\n myMancala = board.stonesInMyMancala(self.id)\r\n oppMancala = board.stonesInOppMancala(self.id)\r\n \r\n state = [] # size should be inputSize - 1\r\n state.append(float(myMancala))\r\n# for i in range(self.rowSize):\r\n# state.append(mySide[i])\r\n for my in mySide:\r\n state.append(float(my))\r\n state.append(float(oppMancala))\r\n# for i in range(self.rowSize):\r\n# state.append(oppSide[i])\r\n for op in oppSide:\r\n state.append(float(op))\r\n return state",
"def StateMachine(self):\n if self.mode is ALL:\n self.which_state()\n\n if self.current_state == FB:\n # print(\"FORWARD/BACKWARD\")\n self.FB()\n elif self.current_state == LAT:\n # print(\"LATERAL\")\n self.LAT()\n elif self.current_state == ROT:\n # print(\"ROTATION\")\n self.ROT()\n elif self.current_state == COMBI:\n # print(\"COMBINED\")\n self.COMBI()\n\n return self.return_bezier_params()",
"def calculateState (self):\r\n newState = 0\r\n # print (\"Inside state function the states DNs are: \\n\")\r\n # print (\"Before starting \\n\")\r\n self.stateDanglingNodes()\r\n #for i in range(len(self.metaSpikes)):\r\n # if self.metaSpikes[i].typeSpike == 1:\r\n # print (\"Meta atom number is: \" + str(self.atomNumber) + \"\\n\")\r\n \r\n insideMetState = []\r\n # To calculate the state we need to update every atom the metaatom consistrs off then see\r\n # the states of every dangling node in the metaspikes\r\n for i in range(len(self.metaSpikes)):\r\n if self.metaSpikes[i].typeSpike == 1:\r\n #print (\"Inside type 1 \\n\")\r\n #print (\"Number of type 1 nodes: \" + str(len(self.metaSpikes[i].danglingNodeList)) + \"\\n\")\r\n for j in range(len(self.metaSpikes[i].danglingNodeList)):\r\n insideMetState.append(self.metaSpikes[i].danglingNodeList[j].state)\r\n if self.metaSpikes[i].danglingNodeList[j].state == 1:\r\n # print (\"Adding one \\n\" )\r\n newState += 1\r\n else:\r\n # print (\"Subracting one \\n\")\r\n newState -= 1\r\n else:\r\n \r\n # print (\"Inside type 2 \\n\")\r\n # print (\"Number od type 1 tales: \" + str(len(self.metaSpikes[i].danglingTailList)) + \"\\n\")\r\n for j in range(len(self.metaSpikes[i].danglingTailList)):\r\n #print (\"Size of tail: \" + str(len(self.metaSpikes[i].danglingTailList[j].nodeList)) + \"\\n\")\r\n for k in range(len(self.metaSpikes[i].danglingTailList[j].nodeList)):\r\n insideMetState.append(self.metaSpikes[i].danglingTailList[j].nodeList[k].state)\r\n if self.metaSpikes[i].danglingTailList[j].nodeList[k].state == 1:\r\n newState += 1\r\n else:\r\n newState -= 1 \r\n \r\n # print (\"The state of analysed nodes: \\n\" + str(insideMetState) + \"\\n\")\r\n # print (\"The length of analysed nodes: \\n\" + str(len(insideMetState)) + \"\\n\")\r\n # print (\"The new state is: \" + str(newState) + \"\\n\") \r\n self.state = newState",
"def __call__(self, inputs, states):\n \"\"\"Now we have multiple states, state->states\"\"\"\n sigmoid = tf.sigmoid\n # Parameters of gates are concatenated into one multiply for efficiency.\n # states: size = time_lag\n if self._state_is_tuple:\n hs = ()\n for state in states:\n c, h = state # c and h: tensor_size = (batch_size, hidden_size)\n hs += (h,) # hs : size = time_lag, i.e. time_lag * (batch_size, hidden_size)\n else:\n hs = ()\n for state in states:\n c, h = array_ops.split(value=state,\n num_or_size_splits=2,\n axis=1)\n hs += (h,)\n \n meta_variable_size = 4 * self.output_size\n concat = Symmetric_MPS_wavefn(inputs,\n hs,\n meta_variable_size,\n self._num_orders,\n self._virtual_dim,\n True)\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n i, j, f, o = array_ops.split(value=concat,\n num_or_size_splits=4,\n axis=1)\n\n new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) * self._activation(j))\n new_h = self._activation(new_c) * sigmoid(o)\n\n if self._state_is_tuple:\n new_state = LSTMStateTuple(new_c, new_h)\n else:\n new_state = array_ops.concat([new_c, new_h], 1)\n return new_h, new_state",
"def get_new_game_state(self, game_state, line, vector, current_color): \n\n #Determine if the move is parallel to the line\n parallel = False\n \n if len(line) > 1:\n if (line[0][0]+vector[0], line[0][1]+vector[1]) == line[1]:\n parallel = True\n if (line[-1][0]+vector[0], line[-1][1]+vector[1]) == line[-2]:\n parallel = True\n\n\n if parallel:\n\n #Find the rear marble in the motion\n start = line[0] if sum_tuples(line[0], vector) == line[1] else line[-1]\n end = line[-1] if start==line[0] else line[0]\n\n off_end = sum_tuples(end, vector)\n if coord_in_board(off_end) and game_state[off_end] == current_color: return None\n\n counting_others = False\n self_count = 0\n other_count = 0\n current = start\n chain = [2]\n #Put the marbles in chain until an empty space or the edge is reached\n while coord_in_board(current) and game_state[current]!=2:\n\n current_marble = game_state[current]\n if current_marble == current_color:\n if counting_others: \n return None\n else:\n self_count+=1\n else:\n other_count+=1\n counting_others=True\n \n if self_count>3 or other_count > 3 or other_count>=self_count: return None\n\n chain.append(current_marble)\n current = (current[0] + vector[0], current[1]+vector[1])\n\n #Check if ball is being pushed off\n if not counting_others and not coord_in_board(current): \n return None\n \n #Lay down the chain onto the new game state\n new_game_state = game_state.copy()\n current = start\n for marble in chain:\n x,y = current\n if ((1<=x<=9) and (1<=y<=9) and (y-4 <= x <= y+4)):\n new_game_state[current] = marble\n current = current[0]+vector[0], current[1]+vector[1]\n\n return new_game_state\n\n else: #Perpendicular moves\n\n for coord in line:\n move_coord = coord[0]+vector[0], coord[1]+vector[1]\n \n x,y = move_coord\n in_board = ((1<=x<=9) and (1<=y<=9) and (y-4 <= x <= y+4))\n if in_board and game_state[move_coord] != 2:\n return None\n elif not in_board:\n return None\n\n new_game_state = game_state.copy()\n for coord in line:\n new_game_state[coord] = 2\n move_coord = coord[0]+vector[0], coord[1]+vector[1]\n x,y = coord\n if (1<=x<=9) and (1<=y<=9) and (y-4 <= x <= y+4):\n new_game_state[move_coord] = current_color\n\n return new_game_state",
"def next_states(self, state):\n import copy\n\n ans = []\n current_array = state.board.array\n space_pos = state.board.space\n\n up_pos = [space_pos[0] - 1, space_pos[1]]\n down_pos = [space_pos[0] + 1, space_pos[1]]\n left_pos = [space_pos[0], space_pos[1] - 1]\n right_pos = [space_pos[0], space_pos[1] + 1]\n\n # down position\n if self.__is_valid(down_pos):\n down_array = [copy.copy(row) for row in current_array]\n down_board = Board(array=down_array, space=space_pos.copy())\n down_board.swap(down_pos)\n ans.append(State(board=down_board, came_from=state, move='U'))\n\n # up position\n if self.__is_valid(up_pos):\n up_array = [copy.copy(row) for row in current_array]\n up_board = Board(array=up_array, space=space_pos.copy())\n up_board.swap(up_pos)\n ans.append(State(board=up_board, came_from=state, move='D'))\n\n # right position\n if self.__is_valid(right_pos):\n right_array = [copy.copy(row) for row in current_array]\n right_board = Board(array=right_array, space=space_pos.copy())\n right_board.swap(right_pos)\n ans.append(State(board=right_board, came_from=state, move='L'))\n\n # left position\n if self.__is_valid(left_pos):\n left_array = [copy.copy(row) for row in current_array]\n left_board = Board(array=left_array, space=space_pos.copy())\n left_board.swap(left_pos)\n ans.append(State(board=left_board, came_from=state, move='R'))\n\n return ans",
"def rl():\n q_table = build_q_table(N_STATES, ACTIONS)\n for episode in range(MAX_EPISODES):\n step_counter = 0\n # initial\n S = 0\n is_terminated = False\n update_env(S, episode, step_counter)\n while not is_terminated:\n A = choose_action(S, q_table)\n S_, R = get_env_feedback(S, A)\n q_predict = q_table.ix[S, A]\n if S_ != 'terminated':\n q_target = R + LAMBDA * q_table.iloc[S_,:].max() # iloc: chose the specific columns based on integer\n else:\n q_target = R\n is_terminated = True\n q_table.ix[S, A] += ALPHA * (q_target - q_predict)\n # next_state <- old_state\n S = S_\n update_env(S, episode, step_counter + 1)\n step_counter += 1\n return q_table",
"def reward_min_queue_squared(state):\n try:\n queue_lagqueue = state.feature_map(\n filter_by=('queue', 'lag[queue]'),\n split=True\n )\n except AttributeError:\n queue_lagqueue = state\n\n ret = {}\n for tls_id, q_lq in queue_lagqueue.items():\n queue, lagqueue = q_lq\n\n ret[tls_id] = -(np.dot(queue, queue) - np.dot(lagqueue, lagqueue))\n return ret",
"def _compute_queue_line_record(self):\n for product_queue in self:\n queue_lines = product_queue.product_data_queue_lines\n product_queue.queue_line_total_records = len(queue_lines)\n product_queue.queue_line_draft_records = len(\n queue_lines.filtered(lambda x:x.state == 'draft'))\n product_queue.queue_line_fail_records = len(\n queue_lines.filtered(lambda x:x.state == 'failed'))\n product_queue.queue_line_done_records = len(\n queue_lines.filtered(lambda x:x.state == 'done'))\n product_queue.queue_line_cancel_records = len(\n queue_lines.filtered(lambda x:x.state == 'cancel'))",
"def solve(self, state, times):",
"def _update_state(self, currentPhase, phasetime, time):\n # compute new state without registered action\n tToNearGreenPhase = self._get_toNearGreenPhase(currentPhase, phasetime, self.extended)\n\n if self.numbus > 0:\n # last available checkout for this intersection\n if self.numbus > 1:\n # bunch, use current time as last checkout\n last_available_checkout_time = time\n elif self.last_checkout_bus is None:\n # no checked out bus, assume perfect headway\n last_available_checkout_time = time - self.CONFIG['target_headway']\n else:\n last_available_checkout_time = self.last_checkout_bus.check_out_time\n # check in time of the last bus checked in\n last_check_in_time = self.bus_list[-1].check_in_time\n check_in_hdy = self.bus_list[-1].check_in_headway\n new_state = [last_available_checkout_time, last_check_in_time, check_in_hdy, self.numbus, self.allnumvel,\n tToNearGreenPhase]\n else:\n if self.last_checkout_bus:\n last_available_checkout_time = self.last_checkout_bus.check_out_time\n check_in_hdy = self.last_checkout_bus.check_in_headway\n last_check_in_time = self.last_checkout_bus.check_in_time\n new_state = [last_available_checkout_time, last_check_in_time, check_in_hdy, 0, self.allnumvel, tToNearGreenPhase]\n else:\n new_state = [0, 0, 0, 0, self.allnumvel, tToNearGreenPhase]\n\n self.state = new_state\n return",
"def state(params1):\n variational_circuit(params1)\n return qml.state()",
"def __init__(self, policy , nstates , initial_stock , incomingOrdersQueue, outgoingOrdersQueue, incomingDeliveriesQueue, outgoingDeliveriesQueue):\r\n self.currentStock = initial_stock\r\n self.currentOrders = 0\r\n self.costsIncurred = 0\r\n \r\n self.incomingOrdersQueue = incomingOrdersQueue\r\n self.outgoingOrdersQueue = outgoingOrdersQueue\r\n self.incomingDeliveriesQueue = incomingDeliveriesQueue\r\n self.outgoingDeliveriesQueue = outgoingDeliveriesQueue\r\n \r\n self.lastOrderQuantity = 0\r\n\r\n\r\n self.policy = policy\r\n self.nstates = nstates\r\n self.states = []\r\n\r\n\r\n return",
"def test_input_stream_state_statewp():\n state_t1 = StateTask1(Direction.EAST, 0, 0)\n state_t2 = StateTask2([1, 10], [0, 0])\n\n instructions = tuple(read_instructions(input_stream()))\n assert state_t1.manhatam_distance == 0\n\n assert instructions[0] == Instruction(Direction.FWD, 10)\n state_t1.apply(instructions[0])\n state_t2.apply(instructions[0])\n assert state_t1.north == 0 and state_t1.east == 10\n assert state_t2.waypoint == [1, 10]\n assert state_t2.position == [10, 100]\n\n assert instructions[1] == Instruction(Direction.NORTH, 3)\n state_t1.apply(instructions[1])\n state_t2.apply(instructions[1])\n assert state_t1.north == 3 and state_t1.east == 10\n assert state_t2.waypoint == [4, 10]\n assert state_t2.position == [10, 100]\n\n assert instructions[2] == Instruction(Direction.FWD, 7)\n state_t1.apply(instructions[2])\n state_t2.apply(instructions[2])\n assert state_t1.north == 3 and state_t1.east == 17\n assert state_t2.waypoint == [4, 10]\n assert state_t2.position == [38, 170]\n\n assert instructions[3] == Instruction(Turn.RIGHT, 90)\n state_t1.apply(instructions[3])\n state_t2.apply(instructions[3])\n assert state_t1.north == 3 and state_t1.east == 17\n assert state_t2.waypoint == [-10, 4]\n assert state_t2.position == [38, 170]\n\n assert instructions[4] == Instruction(Direction.FWD, 11)\n state_t1.apply(instructions[4])\n state_t2.apply(instructions[4])\n assert state_t1.north == -8 and state_t1.east == 17\n assert state_t2.waypoint == [-10, 4]\n assert state_t2.position == [-72, 214]",
"def _getStates(self):\n feature_states = []\n # for i, sim in enumerate(self.sims):\n # state = sim.getState()\n\n # long_id = self._make_id(state.scanId, state.location.viewpointId)\n # if self.features:\n # feature = self.features[long_id] # Get feature for\n # feature_states.append((feature, state))\n # else:\n # feature_states.append((None, state))\n for i in range(self.batch_size):\n while not self.qout[i].empty():\n self.qout[i].get()\n while not self.qtraj[i].empty():\n self.qtraj[i].get()\n\n self.qin[i].put(('state',None))\n \n for i in range(self.batch_size):\n state = self.qout[i].get()\n # print(state)\n long_id = self._make_id(state.scanId, state.location.viewpointId)\n if self.features:\n feature = self.features[long_id] # Get feature for\n feature_states.append((feature, state))\n else:\n feature_states.append((None, state))\n\n return feature_states",
"def _getStates(self):\n feature_states = []\n # for i, sim in enumerate(self.sims):\n # state = sim.getState()\n\n # long_id = self._make_id(state.scanId, state.location.viewpointId)\n # if self.features:\n # feature = self.features[long_id] # Get feature for\n # feature_states.append((feature, state))\n # else:\n # feature_states.append((None, state))\n for i in range(self.batch_size):\n while not self.qout[i].empty():\n self.qout[i].get()\n while not self.qtraj[i].empty():\n self.qtraj[i].get()\n\n self.qin[i].put(('state',None))\n \n for i in range(self.batch_size):\n state = self.qout[i].get()\n # print(state)\n long_id = self._make_id(state.scanId, state.location.viewpointId)\n if self.features:\n feature = self.features[long_id] # Get feature for\n feature_states.append((feature, state))\n else:\n feature_states.append((None, state))\n\n return feature_states",
"def fromState(state):",
"def calc_q_values(self, state, Q, flag):\n\n if flag == 0:\n self.train_data = np.concatenate([st.state for st in state])\n return Q.predict_on_batch(self.train_data)\n elif flag == 1:\n return Q.predict_on_batch(np.concatenate([st.next_state for st in state]))",
"def _state_convert(self, raw_state):\n variables_dict = dict()\n variables_dict[\"s_t\"] = np.hstack((0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.0, 0.0, 1.0))\n variables_dict[\"v_t\"] = np.hstack((0.0, 0.0, 0.0, 0.0, 0.0, 0.0))\n variables_dict[\"add_s_t\"] = np.hstack((0.8, 0.8))\n variables_dict[\"add_v_t\"] = np.hstack((0.0, 0.0))\n variables_dict[\"flag_t\"] = 0.0\n variables_dict[\"add_dist_min\"] = np.hstack((1000.0, 1000.0, 1000.0, 1000.0))\n variables_dict[\"dist_min\"] = np.hstack((1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0))\n variables_dict[\"ego_lane\"] = raw_state[16]\n variables_dict[\"lane_ids\"] = raw_state[18]\n variables_dict[\"ego_lane\"] = variables_dict[\"lane_ids\"].index(variables_dict[\"ego_lane\"])\n if variables_dict[\"ego_lane\"] == 0 or variables_dict[\"ego_lane\"] == 2:\n variables_dict[\"s_t\"][variables_dict[\"ego_lane\"]] = 1.0\n variables_dict[\"s_t\"][variables_dict[\"ego_lane\"] + 3] = 1.0\n variables_dict[\"flag_t\"] = 1 if variables_dict[\"ego_lane\"] == 0 else -1\n\n variables_dict[\"ego_raw_speed\"] = raw_state[3]\n variables_dict[\"filter_speed\"] = (variables_dict[\"ego_raw_speed\"]\n if variables_dict[\"ego_raw_speed\"] >= 10.0 else 10.0)\n variables_dict[\"s_t\"][6] = variables_dict[\"ego_raw_speed\"] / SPEED_RANGE\n objects = raw_state[-1]\n # print(\"ego_speed\",ego_raw_speed,\"ego_lane\",ego_lane)\n if objects[0] is not None:\n # for i in range(len(objects)):\n for i, _object in enumerate(objects):\n lane_id = objects[i][0]\n dist = abs(objects[i][1]) * np.sign(objects[i][1])\n speed = objects[i][2]\n pre_post = np.sign(dist)\n flag = 0 if pre_post == 1.0 else 1\n\n if abs(dist) < VIEW_RANGE:\n for j in range(3):\n adjacent_lane = variables_dict[\"ego_lane\"] - 1 + j\n dist_index = j + flag * 3\n if (lane_id == adjacent_lane and abs(dist) < variables_dict[\"dist_min\"][dist_index]):\n self.min_dist(\n variables_dict[\"v_t\"],\n variables_dict[\"s_t\"],\n dist_index,\n speed,\n dist,\n variables_dict[\"filter_speed\"],\n )\n variables_dict[\"dist_min\"][dist_index] = abs(dist)\n\n if abs(dist) < variables_dict[\"add_dist_min\"][flag]:\n if (variables_dict[\"ego_lane\"] == 0 and lane_id == variables_dict[\"ego_lane\"] + 2\n or variables_dict[\"ego_lane\"] == len(variables_dict[\"lane_ids\"]) - 1\n and lane_id == variables_dict[\"ego_lane\"] - 2):\n self.min_dist(\n variables_dict[\"add_v_t\"],\n variables_dict[\"add_s_t\"],\n flag,\n speed,\n dist,\n variables_dict[\"filter_speed\"],\n )\n\n state = np.hstack((\n variables_dict[\"s_t\"],\n variables_dict[\"v_t\"],\n variables_dict[\"add_s_t\"],\n variables_dict[\"add_v_t\"],\n variables_dict[\"flag_t\"],\n ))\n return state",
"def call(self, states):\n # TODO: implement this ~\n l1 = tf.nn.relu(self.Q_1(states))\n l2 = tf.nn.relu(self.Q_2(l1))\n qVals = self.Q_3(l2)\n return qVals\n # return tf.argmax(qVals, 1)",
"def state(self):\n lines = self.state_lines()\n for line in lines:\n if set(line) == {State.X_WON}:\n return State.X_WON\n if set(line) == {State.O_WON}:\n return State.O_WON\n if not any(map(lambda line: State.IN_PROGRESS in line, lines)):\n return State.DRAW\n return State.IN_PROGRESS",
"def calculate_next_board_state(self):\n new_board_state = np.zeros_like(self.board_state)\n\n for x in range(self.board_size[0]):\n for y in range(self.board_size[0]):\n new_board_state[x][y] = self.next_state_of_cell(x,y)\n \n self.set_state(new_board_state)",
"def next_step(self):\n\n c = 1\n dt = 0.001\n dx = 1 / 20**2\n\n # copy current state first\n next_state = np.copy(self.state)\n\n # iterate over matrix\n for i in range(self.width - 1):\n for j in range(self.height - 1):\n\n if not self.shape == \"circle\" or self.circle[i, j] == 1:\n\n # left bottom corner\n if i == 0 and j == 0:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (self.state[i + 1, j] + 0\\\n + 0 + self.state[i, j + 1]\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i, j]\n # right top corner\n elif i == 0 and j == self.height - 1:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (0 + self.state[i + 1, j]\\\n + self.state[i, j - 1] + 0\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n # right bottom corner\n elif i == self.width - 1 and j == 0:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (self.state[i, j - 1] + 0\\\n + 0 + self.state[i - 1, j]\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n # left bottom corner\n elif i == self.width - 1 and j == self.height - 1:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (self.state[i, j - 1] + self.state[i - 1, j]\\\n + 0 + 0\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n elif i == 0: # j is not 0\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (self.state[i + 1, j] + 0\\\n + self.state[i, j - 1] + self.state[i, j + 1]\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n elif j == 0:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (self.state[i + 1, j] + self.state[i - 1, j]\\\n + 0 + self.state[i, j + 1]\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n elif i == self.width - 1:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (0 + self.state[i - 1, j]\\\n + self.state[i, j - 1] + self.state[i, j + 1]\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n elif j == self.height - 1:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (self.state[i + 1, j] + self.state[i - 1, j]\\\n + self.state[i, j - 1] + 0\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n else:\n next_state[i, j] = ((c * dt)/ dx)** 2\\\n * (self.state[i + 1, j] + self.state[i - 1, j]\\\n + self.state[i, j - 1] + self.state[i, j + 1]\\\n - 4 * self.state[i, j])\\\n + 2 * self.state[i, j] - self.prev_state[i , j]\n\n self.prev_state = np.copy(self.state)\n self.state = np.copy(next_state)\n\n self.timestep += 1",
"def solution(self) -> State:",
"def assignState(self):\n\t\tblack = ['r', 'n', 'b','q','k','b','n','r']\n\t\twhite = ['R','N','B','Q','K','B','N','R']\n\n\t\tfor i in range(8):\n\t\t\tself.squares[8*i + 0].state = black[i]\n\t\t\tself.squares[8*i + 1].state = 'p'\n\t\t\tself.squares[8*i + 2].state = '.'\n\t\t\tself.squares[8*i + 3].state = '.'\n\t\t\tself.squares[8*i + 4].state = '.'\n\t\t\tself.squares[8*i + 5].state = '.'\n\t\t\tself.squares[8*i + 6].state = 'P'\n\t\t\tself.squares[8*i + 7].state = white[i]\n\n\t\tfor square in self.squares:\n\t\t\tself.boardMatrix.append(square.state)",
"def qtc2state(self, qtc):\n \n state_rep = []\n for idx, element in enumerate(qtc):\n# val_qtc = validateQtcSequences(element)\n d = element.shape[1]\n mult = 3**np.arange(d-1, -1, -1)\n state_num = np.append(\n 0,\n ((element + 1)*np.tile(mult, (element.shape[0], 1))).sum(axis=1) + 1\n )\n state_num = np.append(state_num, 82)\n state_char = ''\n for n in state_num:\n state_char += chr(int(n)+32)\n state_rep.append(state_num.tolist())\n \n return state_rep"
] | [
"0.73550963",
"0.61763006",
"0.6168835",
"0.5914047",
"0.5880033",
"0.572045",
"0.571621",
"0.57087564",
"0.57029647",
"0.5688683",
"0.568061",
"0.56745815",
"0.56616175",
"0.56164336",
"0.56039417",
"0.5602903",
"0.559187",
"0.5574628",
"0.55613846",
"0.55613846",
"0.55602586",
"0.55533963",
"0.5529962",
"0.5510614",
"0.5497926",
"0.54968226",
"0.5494008",
"0.5451716",
"0.54276776",
"0.54163647"
] | 0.75306606 | 0 |
This method used to create a product queue as per the split requirement of the queue. It is used for process the queue manually. | def shopify_create_product_queue(self, instance, created_by='import'):
#Added created_by field which is used to identify the queue is created from which process import or webhook : Dipak Gogiya
product_queue_vals = {
'shopify_instance_id':instance and instance.id or False,
'state':'draft',
'created_by': created_by
}
product_queue_data_id = self.create(product_queue_vals)
return product_queue_data_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_queue(self):\n # Instantiate\n queue = pbs.queue(verbose=not self.quiet)\n\n if self.q == 'ember':\n # Submitting to Utah ember cluster\n ppn = 12\n cpus = ppn if self.cpus is None else min(self.cpus, ppn)\n walltime = self.walltime if int(self.walltime.split(':')[0]) < 72 else '72:00:00'\n queue.create(label=self.label, nodes=self.nodes, qos=self.qos, umask=self.umask,\n walltime=walltime, ppn=ppn, cpus=cpus, partition='ember', alloc='sdss')\n elif self.q is not None:\n # All other self.q values expected for Portsmouth cluster,\n # sciama. In this case, the number of nodes is queue\n # dependent, and qos is not set\n if self.q == 'sciama1.q':\n ppn = 12\n elif self.q == 'sciama3.q':\n ppn = 20\n else:\n ppn = 16\n cpus = ppn if self.cpus is None else min(self.cpus, ppn)\n queue.create(label=self.label, nodes=self.nodes, umask=self.umask,\n walltime=self.walltime, queue=self.q, ppn=ppn, cpus=cpus)\n else:\n # self.q can be None when submitting to both the Portsmouth\n # and Utah clusters. In this case, the default queue\n # destination and ppn is correct. qos is also set, but this\n # should only be used when submitting to Utah.\n ppn = 16\n cpus = ppn if self.cpus is None else min(self.cpus, ppn)\n queue.create(label=self.label, nodes=self.nodes, qos=self.qos, umask=self.umask,\n walltime=self.walltime, ppn=ppn, cpus=cpus)\n\n return queue",
"def new_queue() -> Queue:\n return multiprocessing.Queue()",
"def _queue_create(self, **kwargs):\n name = self.generate_random_name()\n return self.clients(\"zaqar\").queue(name, **kwargs)",
"def create(self, vals):\n sequence_id = self.env.ref('shopify_ept.seq_product_queue_data').ids\n if sequence_id:\n record_name = self.env['ir.sequence'].browse(sequence_id).next_by_id()\n else:\n record_name = '/'\n vals.update({'name':record_name or ''})\n return super(ShopifyProductDataqueue, self).create(vals)",
"def shopify_create_product_data_queue_line(self, result, instance, product_queue_data_id):\n product_data_queue_line_obj = self.env[\"shopify.product.data.queue.line.ept\"]\n product_queue_line_vals = {}\n #doesn't need to convert the response into dictionary while response is getting from webhook [Add Changes] Dipak Gogiya\n if type(result) is not dict:\n result = result.to_dict()\n data = json.dumps(result)\n product_queue_line_vals.update({'product_data_id':result.get('id'),\n 'shopify_instance_id':instance and instance.id or False,\n 'synced_product_data':data,\n 'name': result.get('title'),\n 'product_data_queue_id':product_queue_data_id and product_queue_data_id.id or False,\n 'state':'draft',\n })\n product_data_queue_line_obj.create(product_queue_line_vals)\n return True",
"def create_queue(self):\n queue_name = self.generate_name()\n try:\n queue = self.sqs.create_queue(QueueName=queue_name)\n except Exception as e:\n raise RuntimeError('SQS could create queue: %s' % e)\n self.queue_name, self.queue = queue_name, queue",
"def creator(data, q):\n print('Creating data and putting it on the queue')\n for item in data:\n q.put(item)",
"def test_create_qos_queue(self):\r\n resource = 'qos_queue'\r\n cmd = qos.CreateQoSQueue(\r\n test_cli20.MyApp(sys.stdout), None)\r\n myid = 'myid'\r\n name = 'my_queue'\r\n default = False\r\n args = ['--default', default, name]\r\n position_names = ['name', 'default']\r\n position_values = [name, default]\r\n self._test_create_resource(resource, cmd, name, myid, args,\r\n position_names, position_values)",
"def process_product_queue_line_data(self):\n shopify_product_template_obj = self.env['shopify.product.template.ept']\n comman_log_obj = self.env[\"common.log.book.ept\"]\n shopify_tmpl_id = False\n\n product_queue_dict = {}\n queue_id = self.product_data_queue_id if len(self.product_data_queue_id) == 1 else False\n if queue_id:\n if queue_id.common_log_book_id:\n log_book_id=queue_id.common_log_book_id\n else:\n log_book_id=comman_log_obj.create({'type': 'import',\n 'module':'shopify_ept',\n 'shopify_instance_id':queue_id.shopify_instance_id.id,\n 'active':True})\n commit_count = 0\n for product_queue_line in self:\n commit_count += 1\n shopify_product_template_obj.shopify_sync_products(product_queue_line,shopify_tmpl_id,\n product_queue_line.shopify_instance_id,log_book_id)\n if commit_count == 10:\n self._cr.commit()\n commit_count = 0\n queue_id.common_log_book_id = log_book_id\n # draft_or_failed_queue_line = self.filtered(lambda line: line.state in ['draft', 'failed'])\n # if draft_or_failed_queue_line:\n # queue_id.write({'state': \"partially_completed\"})\n # else:\n # queue_id.write({'state': \"completed\"})\n if queue_id.common_log_book_id and not queue_id.common_log_book_id.log_lines:\n queue_id.common_log_book_id.unlink()\n return True",
"def add_a_queue(self, size):\n \tself.queues.append(ContextModalityQueue(size))",
"def pre_qos_queue_create(self, resource_dict):\n pass",
"def runQueueEnqueue(self):\n raise NotImplementedError",
"def queue_my_new(self, is_buy, qty, price): \n \n self.my_last_uid -= 1 \n message = self.OrdTuple(ordtype=\"new\",\n uid=self.my_last_uid,\n is_buy=is_buy,\n qty=qty,\n price=price, \n timestamp=self._arrival_time()) \n self.my_queue.append(message) \n return self.my_last_uid",
"def _create(self):\n with self.pdq:\n c=self.pdq.cursor() \n c.execute('CREATE TABLE pdq (item blob,priority int)')\n c.execute('CREATE INDEX priority_index ON pdq (priority)')",
"def post_qos_queue_create(self, resource_dict):\n pass",
"def _buildPriorityQueue(self, queueType):\n jobReleases = {}\n\n for job in self.taskSet.jobs:\n r = job.releaseTime\n\n if r not in jobReleases:\n jobReleases[r] = [job]\n else:\n jobReleases[r].append(job)\n\n self.priorityQueue = queueType(jobReleases)",
"def queue_my_modif(self, uid, new_qty):\n \n message = self.OrdTuple(ordtype=\"modif\",\n uid=uid,\n is_buy=np.nan,\n qty=new_qty,\n price=np.nan, \n timestamp=self._arrival_time()) \n self.my_queue.append(message)",
"def create_product_queue_from_webhook(self, product_data, instance):\n product_data_queue = self.search([\n (\"created_by\", \"=\", \"webhook\"), (\"state\", \"=\", \"draft\"),\n (\"shopify_instance_id\", \"=\", instance.id)], limit=1)\n\n if product_data_queue:\n _logger.info(\"Product %s added into Queue %s.\" % (product_data.get(\"id\"), product_data_queue.name))\n else:\n product_data_queue = self.shopify_create_product_queue(instance, \"webhook\")\n _logger.info(\"Imported product {0} of {1} via Webhook Successfully.\".format(\n product_data.get(\"id\"), instance.name))\n\n self.shopify_create_product_data_queue_line(product_data, instance, product_data_queue)\n\n if len(product_data_queue.product_data_queue_lines) >= 50:\n product_data_queue.product_data_queue_lines.process_product_queue_line_data()\n _logger.info(\"Processed product {0} of {1} via Webhook Successfully.\".format(\n product_data.get(\"id\"), instance.name))\n\n return True",
"def populatereadyqueue():\n readyQueue.put(Process(\"P1\", time(0, 0, 1), time(0, 0, 4)))\n readyQueue.put(Process(\"P2\", time(0, 0, 2), time(0, 0, 6)))\n readyQueue.put(Process(\"P3\", time(0, 0, 3), time(0, 0, 2)))",
"def sync_products(self, product_data_queue_lines, woo_instance, common_log_book_id,\n skip_existing_products=False, order_queue_line=False,\n is_process_from_queue=True):\n common_log_line_obj = self.env[\"common.log.lines.ept\"]\n queue_counter = 0\n model_id = common_log_line_obj.get_model_id(self._name)\n if order_queue_line:\n self.env[\"woo.process.import.export\"].sync_woo_attributes(woo_instance)\n\n for product_data_queue_line in product_data_queue_lines:\n if is_process_from_queue:\n if queue_counter == 10:\n if not order_queue_line:\n product_queue_id = product_data_queue_line and \\\n product_data_queue_line.queue_id or False\n if product_queue_id:\n product_queue_id.is_process_queue = True\n self._cr.commit()\n queue_counter = 0\n queue_counter += 1\n line_failed = False # For not making done the queue line, which is already failed.\n template_updated = False\n if is_process_from_queue:\n data, product_queue_id, sync_category_and_tags = self.prepare_product_response(\n order_queue_line, product_data_queue_line)\n else:\n data = product_data_queue_lines[0]\n product_queue_id = False\n sync_category_and_tags = False\n product_data_queue_line = self.env[\"woo.product.data.queue.line.ept\"]\n woo_product_template_id = data.get(\"id\")\n woo_template = self.with_context(active_test=False).search(\n [(\"woo_tmpl_id\", \"=\", woo_product_template_id),\n (\"woo_instance_id\", \"=\", woo_instance.id)], limit=1)\n template_info = self.prepare_template_vals(woo_instance, data)\n template_title = data.get(\"name\")\n _logger.info(\n \"Process started for Product- %s||%s||Queue %s.\" % (woo_product_template_id,\n template_title,\n product_queue_id if order_queue_line else product_data_queue_line.queue_id.name))\n if data[\"variations\"]:\n new_woo_template = self.variation_product_sync(woo_instance, data,\n common_log_book_id,\n product_data_queue_line,\n order_queue_line,\n woo_template, product_queue_id,\n sync_category_and_tags,\n template_info,\n skip_existing_products)\n if new_woo_template:\n woo_template = new_woo_template\n if data[\"type\"] == \"simple\" or data[\"type\"] == \"bundle\":\n new_woo_template = self.simple_product_sync(woo_instance, data, common_log_book_id,\n product_queue_id, template_info,\n product_data_queue_line,\n template_updated,\n skip_existing_products=skip_existing_products,\n order_queue_line=order_queue_line)\n if not new_woo_template:\n continue\n elif not isinstance(new_woo_template, bool):\n woo_template = new_woo_template\n if not order_queue_line:\n if woo_template and not line_failed:\n product_data_queue_line.write({\"state\":\"done\",\n \"last_process_date\":datetime.now()})\n else:\n message = \"Misconfiguration at Woocommerce store for product named - '%s'.\\n \" \\\n \"- It seems this might be a variation product, but variations are \" \\\n \"not defined at store.\" % (template_title)\n common_log_line_obj.woo_create_product_log_line(message, model_id,\n product_data_queue_line if not order_queue_line\n else order_queue_line,\n common_log_book_id)\n _logger.info(\n \"Process Failed of Product {0}||Queue {1}||Reason is {2}\".format(\n woo_product_template_id, product_queue_id, message))\n product_data_queue_line.write(\n {\"state\":\"failed\", \"last_process_date\":datetime.now()})\n # Below two-line add by Haresh on date 6/1/2020 to manage the which queue is running in the background\n product_data_queue_line.queue_id.is_process_queue = False\n _logger.info(\n \"Process done for Product-{0}||{1}||Queue {2}.\".format(woo_product_template_id,\n template_title,\n product_queue_id if order_queue_line else\n product_data_queue_line.queue_id.name))\n return True",
"def auto_import_product_queue_line_data(self):\n # change by bhavesh jadav 03/12/2019 for process only one queue data at a time\n query = \"\"\"select product_data_queue_id from shopify_product_data_queue_line_ept where state='draft' ORDER BY create_date ASC limit 1\"\"\"\n self._cr.execute(query)\n product_data_queue_id = self._cr.fetchone()\n product_data_queue_line_ids = self.env['shopify.product.data.queue.ept'].browse(product_data_queue_id).product_data_queue_lines\n product_data_queue_line_ids.process_product_queue_line_data()",
"def ztest_sql_queue(self):\n \n sql_queue = SQLQueue()\n \n #insertion\n for i in range(10):\n item = NMSQueueItem(5,\"data %s\" % (i))\n item.set_uuid()\n sql_queue.put(item.dictify())\n \n size = sql_queue.size()\n \n while size != 0:\n the_dict = sql_queue.pop()\n item = NMSQueueItem.create_from_dict(the_dict)\n print(\"size = %d, item = %s\\n\" % (size, item))\n size = sql_queue.size()\n \n print(\"size = %s\" % size )",
"def shopify_create_product_data_queue(self, instance, template_ids=''):\n instance.connect_in_shopify()\n only_alphabets = []\n if template_ids:\n # Below one line is used to find only character values from template ids.\n only_alphabets = re.findall(\"[a-zA-Z]+\", template_ids)\n if len(template_ids.split(',')) <= 50:\n # template_ids is a list of all template ids which response did not given by\n # shopify.\n template_ids = list(set(re.findall(re.compile(r\"(\\d+)\"),template_ids)))\n results = shopify.Product().find(ids=','.join(template_ids))\n if results:\n _logger.info('Length of Shopify Products %s import from instance name: %s' % (\n len(results), instance.name))\n template_ids = [template_id.strip() for template_id in template_ids]\n # Below process to identify which id response did not give by Shopify.\n [template_ids.remove(str(result.id)) for result in results if str(result.id) in template_ids]\n else:\n raise Warning(_('Please enter the product template ids 50 or less'))\n else:\n if not instance.shopify_last_date_product_import:\n results = shopify.Product().find(status='active', limit=250)\n if len(results) >= 250:\n results = self.shopify_list_all_products(results)\n #results = self.get_product(results)\n else:\n # updated_at_min =datetime.strptime(pytz.utc.localize(instance.shopify_last_date_product_import).astimezone(\n # pytz.timezone(instance.shopify_store_time_zone[12:] or 'UTC')).strftime(\n # '%Y-%m-%d %H:%M:%S'), \"%Y-%m-%d %H:%M:%S\")\n results = shopify.Product().find(status='active',\n updated_at_min=instance.shopify_last_date_product_import,limit=250) # Change by bhavesh jadav 13/12/2019 limit=250\n if len(results) >= 250:\n results=self.shopify_list_all_products(results)\n if results:\n instance.shopify_last_date_product_import = datetime.now()\n without_gift_card_products = []\n for result in results:\n if result.to_dict().get('variants')[0].get('fulfillment_service') != 'gift_card':\n without_gift_card_products.append(result)\n results = without_gift_card_products\n if not results:\n _logger.info(\n 'No Products found to be imported from Shopify.')\n return False\n _logger.info('Total synced products - {}'.format(len(results)))\n count = 0\n one_time_create = True\n product_queue_list = []\n for result in results:\n if one_time_create:\n product_queue_id = self.shopify_create_product_queue(instance)\n product_queue_list.append(product_queue_id.id)\n _logger.info('Shopify Product Queue created. Queue name is {}'.format(\n product_queue_id.name))\n one_time_create = False\n if template_ids or only_alphabets:\n product_queue_id.message_post(body=\"%s products are not imported\" %(','.join(template_ids+only_alphabets)))\n self.shopify_create_product_data_queue_line(result, instance, product_queue_id)\n count = count + 1\n if count == 100:\n count = 0\n one_time_create = True\n return product_queue_list",
"def queue_maker(queue, bucket_name):\n scraper = key_scraper.KaleidoscopeKeyScraper(\n bucket_name=bucket_name,\n queue=queue,\n )\n scraper.add_keys_to_queue()\n\n return None",
"def register(self, queue, project=None, flavor=None):\n\n # NOTE(gengchc): if exist, get queue's pool.flavor:\n # if queue's pool.flavor is different, first delete it and add it.\n # Otherwise, if the flavor in the meteredata of the queue is\n # modified, the catalog will be inconsistent.\n if self._catalogue_ctrl.exists(project, queue):\n catalogue = self._catalogue_ctrl.get(project, queue)\n oldpoolids = catalogue['pool']\n oldpool = self._pools_ctrl.get(oldpoolids)\n oldflavor = oldpool['flavor']\n msgtmpl = _(u'register queue to pool: old flavor: %(oldflavor)s '\n ', new flavor: %(flavor)s')\n LOG.info(msgtmpl,\n {'oldflavor': oldflavor, 'flavor': flavor})\n if oldpool['flavor'] != flavor:\n self._catalogue_ctrl.delete(project, queue)\n\n if not self._catalogue_ctrl.exists(project, queue):\n if flavor is not None:\n flavor = self._flavor_ctrl.get(flavor, project=project)\n pools = self._pools_ctrl.get_pools_by_flavor(\n flavor=flavor,\n detailed=True)\n pool = select.weighted(pools)\n pool = pool and pool['name'] or None\n msgtmpl = _(u'register queue to pool: new flavor:%(flavor)s')\n LOG.info(msgtmpl,\n {'flavor': flavor.get('name', None)})\n else:\n # NOTE(flaper87): Get pools assigned to the default\n # group `None`. We should consider adding a `default_group`\n # option in the future.\n pools = self._pools_ctrl.get_pools_by_flavor(detailed=True)\n pool = select.weighted(pools)\n pool = pool and pool['name'] or None\n\n if not pool:\n # NOTE(flaper87): We used to raise NoPoolFound in this\n # case but we've decided to support automatic pool\n # creation. Note that we're now returning and the queue\n # is not being registered in the catalogue. This is done\n # on purpose since no pool exists and the \"dummy\" pool\n # doesn't exist in the storage\n if self.lookup(queue, project) is not None:\n return\n raise errors.NoPoolFound()\n msgtmpl = _(u'register queue to pool: new flavor: None')\n LOG.info(msgtmpl)\n\n msgtmpl = _(u'register queue: project:%(project)s'\n ' queue:%(queue)s pool:%(pool)s')\n LOG.info(msgtmpl,\n {'project': project,\n 'queue': queue,\n 'pool': pool})\n self._catalogue_ctrl.insert(project, queue, pool)",
"def queue (self):\n\n with self.__t.steps():\n import exception\n from lib import schema\n from lib import common\n from lib import process\n from lib import data\n from sqlalchemy import and_\n import json\n from collections import OrderedDict\n\n with schema.select(\"process_queue\", schema.table.process_queue.status==None) as select:\n for queued in select.limit(1000).all():\n blocked = False\n if len(queued.depend) > 0:\n for depend_id in queued.depend:\n depend = schema.select_one(\"series\", schema.table.series.id==depend_id)\n match_tags = json.dumps(OrderedDict(sorted(data.decode_tags(depend.tags).items())))\n if depend and schema.select_one(\"process_queue\", schema.table.process_queue.tags==match_tags):\n blocked = True\n break # queued dependencies\n if not blocked:\n queued.status = \"queued\"\n schema.save(queued)\n run.apply_async([queued.tags]) #queue process\n self.__t.ok()\n self.apply_async(queue=\"control\", countdown=30) #queue next",
"def __init__(self):\n self.queue = []\n self.queue.append(Queue())\n self.queue.append(Queue())\n self.tag = 0 # using to record which queue contain the data",
"def __init__(self, size):\n self.size = size\n self.queue = []",
"def __init__(self,size=10):\n \n self.inbound = Queue() #an internal queue to manage the class properly in a thread safe manner.\n self.index = Value('i',0) #index of next item to be added.\n self.manager = Manager()\n \n self.buffer = self.manager.list() #the buffer we will store things in.\n self.size = size #the maximum size of the buffer\n self.newitem = Queue() #a blocking event to control the pop method\n t = threading.Thread(target=self.worker) #the worker that will run when items are added.\n t.start() #start the worker\n self.newitemindex = 0 #index of items to pop",
"def generate_queue(self,pool):\n\t\tqueue = []\n\t\tfor ele in self.elements:\n\t\t\tif ele.pool == pool and ele.status == 'pending':\n\t\t\t\tele.abs_path = \"/%s/%s/%s/%s\" % (\n\t\t\t\t\tself.base_dir,\n\t\t\t\t\tself.parent_dir,\n\t\t\t\t\tself.project,\n\t\t\t\t\tele.filename\n\t\t\t\t\t)\n\t\t\t\tqueue.append(ele)\n\t\treturn queue"
] | [
"0.67427164",
"0.65279806",
"0.6462158",
"0.64437056",
"0.63512075",
"0.62755984",
"0.6275351",
"0.6238054",
"0.6213989",
"0.6196298",
"0.6086965",
"0.6037728",
"0.59987074",
"0.59665084",
"0.59183425",
"0.58791554",
"0.5871936",
"0.5869609",
"0.5861172",
"0.5843961",
"0.58432025",
"0.582693",
"0.5801137",
"0.5790284",
"0.5785147",
"0.5778124",
"0.57656735",
"0.5755274",
"0.57380235",
"0.5708616"
] | 0.73897463 | 0 |
This method used to create a product data queue line. | def shopify_create_product_data_queue_line(self, result, instance, product_queue_data_id):
product_data_queue_line_obj = self.env["shopify.product.data.queue.line.ept"]
product_queue_line_vals = {}
#doesn't need to convert the response into dictionary while response is getting from webhook [Add Changes] Dipak Gogiya
if type(result) is not dict:
result = result.to_dict()
data = json.dumps(result)
product_queue_line_vals.update({'product_data_id':result.get('id'),
'shopify_instance_id':instance and instance.id or False,
'synced_product_data':data,
'name': result.get('title'),
'product_data_queue_id':product_queue_data_id and product_queue_data_id.id or False,
'state':'draft',
})
product_data_queue_line_obj.create(product_queue_line_vals)
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_product_queue_line_data(self):\n shopify_product_template_obj = self.env['shopify.product.template.ept']\n comman_log_obj = self.env[\"common.log.book.ept\"]\n shopify_tmpl_id = False\n\n product_queue_dict = {}\n queue_id = self.product_data_queue_id if len(self.product_data_queue_id) == 1 else False\n if queue_id:\n if queue_id.common_log_book_id:\n log_book_id=queue_id.common_log_book_id\n else:\n log_book_id=comman_log_obj.create({'type': 'import',\n 'module':'shopify_ept',\n 'shopify_instance_id':queue_id.shopify_instance_id.id,\n 'active':True})\n commit_count = 0\n for product_queue_line in self:\n commit_count += 1\n shopify_product_template_obj.shopify_sync_products(product_queue_line,shopify_tmpl_id,\n product_queue_line.shopify_instance_id,log_book_id)\n if commit_count == 10:\n self._cr.commit()\n commit_count = 0\n queue_id.common_log_book_id = log_book_id\n # draft_or_failed_queue_line = self.filtered(lambda line: line.state in ['draft', 'failed'])\n # if draft_or_failed_queue_line:\n # queue_id.write({'state': \"partially_completed\"})\n # else:\n # queue_id.write({'state': \"completed\"})\n if queue_id.common_log_book_id and not queue_id.common_log_book_id.log_lines:\n queue_id.common_log_book_id.unlink()\n return True",
"def create(self, vals):\n sequence_id = self.env.ref('shopify_ept.seq_product_queue_data').ids\n if sequence_id:\n record_name = self.env['ir.sequence'].browse(sequence_id).next_by_id()\n else:\n record_name = '/'\n vals.update({'name':record_name or ''})\n return super(ShopifyProductDataqueue, self).create(vals)",
"def shopify_create_product_queue(self, instance, created_by='import'):\n #Added created_by field which is used to identify the queue is created from which process import or webhook : Dipak Gogiya\n product_queue_vals = {\n 'shopify_instance_id':instance and instance.id or False,\n 'state':'draft',\n 'created_by': created_by\n }\n product_queue_data_id = self.create(product_queue_vals)\n\n return product_queue_data_id",
"def creator(data, q):\n print('Creating data and putting it on the queue')\n for item in data:\n q.put(item)",
"def auto_import_product_queue_line_data(self):\n # change by bhavesh jadav 03/12/2019 for process only one queue data at a time\n query = \"\"\"select product_data_queue_id from shopify_product_data_queue_line_ept where state='draft' ORDER BY create_date ASC limit 1\"\"\"\n self._cr.execute(query)\n product_data_queue_id = self._cr.fetchone()\n product_data_queue_line_ids = self.env['shopify.product.data.queue.ept'].browse(product_data_queue_id).product_data_queue_lines\n product_data_queue_line_ids.process_product_queue_line_data()",
"def _compute_queue_line_record(self):\n for product_queue in self:\n queue_lines = product_queue.product_data_queue_lines\n product_queue.queue_line_total_records = len(queue_lines)\n product_queue.queue_line_draft_records = len(\n queue_lines.filtered(lambda x:x.state == 'draft'))\n product_queue.queue_line_fail_records = len(\n queue_lines.filtered(lambda x:x.state == 'failed'))\n product_queue.queue_line_done_records = len(\n queue_lines.filtered(lambda x:x.state == 'done'))\n product_queue.queue_line_cancel_records = len(\n queue_lines.filtered(lambda x:x.state == 'cancel'))",
"def _queue_create(self, **kwargs):\n name = self.generate_random_name()\n return self.clients(\"zaqar\").queue(name, **kwargs)",
"def __init__(self):\n self.data = Queue()",
"def sync_products(self, product_data_queue_lines, woo_instance, common_log_book_id,\n skip_existing_products=False, order_queue_line=False,\n is_process_from_queue=True):\n common_log_line_obj = self.env[\"common.log.lines.ept\"]\n queue_counter = 0\n model_id = common_log_line_obj.get_model_id(self._name)\n if order_queue_line:\n self.env[\"woo.process.import.export\"].sync_woo_attributes(woo_instance)\n\n for product_data_queue_line in product_data_queue_lines:\n if is_process_from_queue:\n if queue_counter == 10:\n if not order_queue_line:\n product_queue_id = product_data_queue_line and \\\n product_data_queue_line.queue_id or False\n if product_queue_id:\n product_queue_id.is_process_queue = True\n self._cr.commit()\n queue_counter = 0\n queue_counter += 1\n line_failed = False # For not making done the queue line, which is already failed.\n template_updated = False\n if is_process_from_queue:\n data, product_queue_id, sync_category_and_tags = self.prepare_product_response(\n order_queue_line, product_data_queue_line)\n else:\n data = product_data_queue_lines[0]\n product_queue_id = False\n sync_category_and_tags = False\n product_data_queue_line = self.env[\"woo.product.data.queue.line.ept\"]\n woo_product_template_id = data.get(\"id\")\n woo_template = self.with_context(active_test=False).search(\n [(\"woo_tmpl_id\", \"=\", woo_product_template_id),\n (\"woo_instance_id\", \"=\", woo_instance.id)], limit=1)\n template_info = self.prepare_template_vals(woo_instance, data)\n template_title = data.get(\"name\")\n _logger.info(\n \"Process started for Product- %s||%s||Queue %s.\" % (woo_product_template_id,\n template_title,\n product_queue_id if order_queue_line else product_data_queue_line.queue_id.name))\n if data[\"variations\"]:\n new_woo_template = self.variation_product_sync(woo_instance, data,\n common_log_book_id,\n product_data_queue_line,\n order_queue_line,\n woo_template, product_queue_id,\n sync_category_and_tags,\n template_info,\n skip_existing_products)\n if new_woo_template:\n woo_template = new_woo_template\n if data[\"type\"] == \"simple\" or data[\"type\"] == \"bundle\":\n new_woo_template = self.simple_product_sync(woo_instance, data, common_log_book_id,\n product_queue_id, template_info,\n product_data_queue_line,\n template_updated,\n skip_existing_products=skip_existing_products,\n order_queue_line=order_queue_line)\n if not new_woo_template:\n continue\n elif not isinstance(new_woo_template, bool):\n woo_template = new_woo_template\n if not order_queue_line:\n if woo_template and not line_failed:\n product_data_queue_line.write({\"state\":\"done\",\n \"last_process_date\":datetime.now()})\n else:\n message = \"Misconfiguration at Woocommerce store for product named - '%s'.\\n \" \\\n \"- It seems this might be a variation product, but variations are \" \\\n \"not defined at store.\" % (template_title)\n common_log_line_obj.woo_create_product_log_line(message, model_id,\n product_data_queue_line if not order_queue_line\n else order_queue_line,\n common_log_book_id)\n _logger.info(\n \"Process Failed of Product {0}||Queue {1}||Reason is {2}\".format(\n woo_product_template_id, product_queue_id, message))\n product_data_queue_line.write(\n {\"state\":\"failed\", \"last_process_date\":datetime.now()})\n # Below two-line add by Haresh on date 6/1/2020 to manage the which queue is running in the background\n product_data_queue_line.queue_id.is_process_queue = False\n _logger.info(\n \"Process done for Product-{0}||{1}||Queue {2}.\".format(woo_product_template_id,\n template_title,\n product_queue_id if order_queue_line else\n product_data_queue_line.queue_id.name))\n return True",
"def assign_data_product(self, input_resource_id='', data_product_id='', create_stream=False):\n # Verify that both ids are valid\n input_resource_obj = self.clients.resource_registry.read(input_resource_id)\n if not input_resource_obj:\n raise BadRequest(\"Source resource %s does not exist\" % input_resource_id)\n data_product_obj = self.clients.resource_registry.read(data_product_id)\n if not data_product_obj:\n raise BadRequest(\"Data Product resource %s does not exist\" % data_product_id)\n\n #find the data producer resource associated with the source resource that is creating the data product\n producer_ids, _ = self.clients.resource_registry.find_objects(input_resource_id, PRED.hasDataProducer, RT.DataProducer, id_only=True)\n if producer_ids is None:\n raise NotFound(\"No Data Producers associated with source resource ID \" + str(input_resource_id))\n #find the 'head' producer\n self.primary_producer = None\n for producer_id in producer_ids:\n producer_obj = self.clients.resource_registry.read(producer_id)\n if not producer_obj:\n raise NotFound(\"Data Producer %s does not exist\" % producer_id)\n if producer_obj.is_primary:\n self.primary_producer = producer_id\n\n if self.primary_producer is None:\n raise NotFound(\"No primary Data Producer associated with source resource ID \" + str(input_resource_id))\n\n #create data producer resource for this data product\n data_producer_obj = IonObject(RT.DataProducer,name=data_product_obj.name, description=data_product_obj.description)\n data_producer_id, rev = self.clients.resource_registry.create(data_producer_obj)\n\n\n # Associate the Product with the Producer\n self.clients.resource_registry.create_association(data_product_id, PRED.hasDataProducer, data_producer_id)\n # Associate the Producer with the main Producer\n self.clients.resource_registry.create_association(data_producer_id, PRED.hasParent, self.primary_producer)\n # Associate the input resource with the child data Producer\n self.clients.resource_registry.create_association(input_resource_id, PRED.hasDataProducer, data_producer_id)\n\n #Create the stream if requested\n log.debug(\"assign_data_product: create_stream %s\" % create_stream)\n if create_stream:\n stream_id = self.clients.pubsub_management.create_stream(name=data_product_obj.name, description=data_product_obj.description)\n log.debug(\"assign_data_product: create stream stream_id %s\" % stream_id)\n # Associate the Stream with the main Data Product\n self.clients.resource_registry.create_association(data_product_id, PRED.hasStream, stream_id)\n\n return",
"def prepare_product_response(self, order_queue_line, product_data_queue_line):\n sync_category_and_tags = False\n if order_queue_line:\n data = product_data_queue_line\n product_queue_id = \"from Order\"\n sync_category_and_tags = True\n else:\n product_queue_id = product_data_queue_line.queue_id.id\n if product_data_queue_line.queue_id.created_by == \"webhook\":\n sync_category_and_tags = True\n data = json.loads(product_data_queue_line.woo_synced_data)\n return data, product_queue_id, sync_category_and_tags",
"def create_pol(self, order, product):\n order.write({\n 'order_line': [(0, 0, {\n 'product_id': product.id,\n 'product_qty': 10.0,\n 'product_uom': product.uom_id.id,\n 'price_unit': product.price,\n 'name': product.name_template,\n 'sequence': len(order.order_line) + 1,\n 'date_planned': time.strftime('%Y-%m-%d')\n })]})",
"def _create(self):\n with self.pdq:\n c=self.pdq.cursor() \n c.execute('CREATE TABLE pdq (item blob,priority int)')\n c.execute('CREATE INDEX priority_index ON pdq (priority)')",
"def create_product_queue_from_webhook(self, product_data, instance):\n product_data_queue = self.search([\n (\"created_by\", \"=\", \"webhook\"), (\"state\", \"=\", \"draft\"),\n (\"shopify_instance_id\", \"=\", instance.id)], limit=1)\n\n if product_data_queue:\n _logger.info(\"Product %s added into Queue %s.\" % (product_data.get(\"id\"), product_data_queue.name))\n else:\n product_data_queue = self.shopify_create_product_queue(instance, \"webhook\")\n _logger.info(\"Imported product {0} of {1} via Webhook Successfully.\".format(\n product_data.get(\"id\"), instance.name))\n\n self.shopify_create_product_data_queue_line(product_data, instance, product_data_queue)\n\n if len(product_data_queue.product_data_queue_lines) >= 50:\n product_data_queue.product_data_queue_lines.process_product_queue_line_data()\n _logger.info(\"Processed product {0} of {1} via Webhook Successfully.\".format(\n product_data.get(\"id\"), instance.name))\n\n return True",
"def creator(self, q, data, num_sub_proc):\n for d in data:\n idx = d[0]\n q.put((idx, d[1]))\n\n for i in range(0, num_sub_proc):\n q.put('DONE')",
"def __init__(self, queueLength):\r\n self.queueLength = queueLength\r\n self.data = []\r\n return",
"def post_qos_queue_create(self, resource_dict):\n pass",
"def test_create_qos_queue(self):\r\n resource = 'qos_queue'\r\n cmd = qos.CreateQoSQueue(\r\n test_cli20.MyApp(sys.stdout), None)\r\n myid = 'myid'\r\n name = 'my_queue'\r\n default = False\r\n args = ['--default', default, name]\r\n position_names = ['name', 'default']\r\n position_values = [name, default]\r\n self._test_create_resource(resource, cmd, name, myid, args,\r\n position_names, position_values)",
"def create_data_producer(name='', description=''):\n pass",
"def AToPack(self, product_id, description, quantity, ship_id): \n command = amazon_pb2.ACommands()\n command.simspeed = 50000\n pack = command.topack.add()\n pack.whnum = 0\n pack.shipid = ship_id;\n pid = pack.things.add()\n pid.id = product_id\n pid.description = description\n pid.count = quantity\n self.send(command, self.sock)",
"def queue_my_new(self, is_buy, qty, price): \n \n self.my_last_uid -= 1 \n message = self.OrdTuple(ordtype=\"new\",\n uid=self.my_last_uid,\n is_buy=is_buy,\n qty=qty,\n price=price, \n timestamp=self._arrival_time()) \n self.my_queue.append(message) \n return self.my_last_uid",
"def __init__(self):\n self.queue = []\n self.queue.append(Queue())\n self.queue.append(Queue())\n self.tag = 0 # using to record which queue contain the data",
"def ztest_sql_queue(self):\n \n sql_queue = SQLQueue()\n \n #insertion\n for i in range(10):\n item = NMSQueueItem(5,\"data %s\" % (i))\n item.set_uuid()\n sql_queue.put(item.dictify())\n \n size = sql_queue.size()\n \n while size != 0:\n the_dict = sql_queue.pop()\n item = NMSQueueItem.create_from_dict(the_dict)\n print(\"size = %d, item = %s\\n\" % (size, item))\n size = sql_queue.size()\n \n print(\"size = %s\" % size )",
"def pre_qos_queue_create(self, resource_dict):\n pass",
"def add(self, data):\n wasquiet = True if (self.tail == self.curr) else False\n\n # Assert the queue is clean\n qtail = self.base + \".\" + str(self.tail)\n print \"creating %s\" % qtail\n assert not os.path.exists(qtail)\n qt = open(qtail, \"w\")\n qt.write(data)\n qt.close()\n\n # Where does the next item go\n self.tail += 1\n self._settail(self.tail)\n\n return wasquiet",
"def _create_queue(self):\n # Instantiate\n queue = pbs.queue(verbose=not self.quiet)\n\n if self.q == 'ember':\n # Submitting to Utah ember cluster\n ppn = 12\n cpus = ppn if self.cpus is None else min(self.cpus, ppn)\n walltime = self.walltime if int(self.walltime.split(':')[0]) < 72 else '72:00:00'\n queue.create(label=self.label, nodes=self.nodes, qos=self.qos, umask=self.umask,\n walltime=walltime, ppn=ppn, cpus=cpus, partition='ember', alloc='sdss')\n elif self.q is not None:\n # All other self.q values expected for Portsmouth cluster,\n # sciama. In this case, the number of nodes is queue\n # dependent, and qos is not set\n if self.q == 'sciama1.q':\n ppn = 12\n elif self.q == 'sciama3.q':\n ppn = 20\n else:\n ppn = 16\n cpus = ppn if self.cpus is None else min(self.cpus, ppn)\n queue.create(label=self.label, nodes=self.nodes, umask=self.umask,\n walltime=self.walltime, queue=self.q, ppn=ppn, cpus=cpus)\n else:\n # self.q can be None when submitting to both the Portsmouth\n # and Utah clusters. In this case, the default queue\n # destination and ppn is correct. qos is also set, but this\n # should only be used when submitting to Utah.\n ppn = 16\n cpus = ppn if self.cpus is None else min(self.cpus, ppn)\n queue.create(label=self.label, nodes=self.nodes, qos=self.qos, umask=self.umask,\n walltime=self.walltime, ppn=ppn, cpus=cpus)\n\n return queue",
"def create_queue(data_size,\n num_steps,\n capacity=128,\n dtype=tf.int32):\n\n # Feeds for inputs.\n input_data = tf.placeholder(\n dtype, shape=[data_size, num_steps], name=\"input_data\")\n\n targets = tf.placeholder(\n dtype, shape=[data_size, num_steps], name=\"targets\")\n\n queue = tf.FIFOQueue(\n capacity=capacity,\n # min_after_dequeue=min_after_dequeue,\n dtypes=[dtype, dtype],\n shapes=[[num_steps]] * 2)\n\n enqueue_op = queue.enqueue_many(\n [input_data, targets])\n\n placeholders = {\n \"input_data\": input_data,\n \"targets\": targets\n }\n\n return queue, enqueue_op, placeholders",
"def createOrder(self, item, units, quantity, delivery_time):\n self.order = Order(item, units, quantity, delivery_time)",
"def new_queue() -> Queue:\n return multiprocessing.Queue()",
"def test_the_queue_enqueue(the_queue):\n the_queue.enqueue(2)\n assert the_queue._new_dll.head.data == the_queue._new_dll.tail.data == 2"
] | [
"0.6933381",
"0.69173896",
"0.6856358",
"0.68060774",
"0.675189",
"0.62019813",
"0.6040637",
"0.5978801",
"0.59644425",
"0.59393656",
"0.59366614",
"0.59186864",
"0.5887491",
"0.5809255",
"0.58090496",
"0.5801302",
"0.5761373",
"0.57500213",
"0.57416767",
"0.56782055",
"0.5667402",
"0.56624305",
"0.56494355",
"0.5626479",
"0.559573",
"0.5564811",
"0.5533603",
"0.54791445",
"0.5477628",
"0.54725826"
] | 0.74084526 | 0 |
Build a pdf version of sa_summary. | def sa_summary_pdf(sa_id):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def produce_summary_pdf(model_name, img_path, hyperparams, model_arch, train_stats):\n # datetime object containing current date and time\n now = datetime.now()\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n pdf = FPDF()\n pdf.set_title(\"training_summary_{}_{}\".format(model_name.lower(), dt_string))\n pdf.add_page()\n pdf.set_xy(0, 10)\n pdf.set_font(\"Helvetica\", \"BI\", 16)\n pdf.set_text_color(25, 33, 78)\n pdf.set_draw_color(25, 33, 78)\n pdf.cell(20)\n pdf.cell(\n 200,\n 10,\n \"Model Training Summary: {}\".format(model_name.upper()),\n 0,\n 2,\n )\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(\n 200,\n 5,\n dt_string,\n 0,\n 2,\n )\n\n # Model Configuration Section\n pdf.cell(150, 10, \"Model Configuration:\", 0, 2)\n pdf.cell(30, 10, \"Parameter\", 1, 0)\n pdf.cell(140, 10, \"Value\", 1, 2)\n pdf.set_text_color(255, 96, 80)\n pdf.set_font(\"Helvetica\", \"\", 12)\n pdf.cell(-30)\n attributes = [\n \"model_dir\",\n \"log_dir\",\n \"check_dir\",\n \"current_epoch\",\n \"overwrite\",\n \"exp_name\",\n ]\n for i, val in enumerate(hyperparams):\n if val not in attributes:\n pdf.cell(30, 10, \"%s\" % (val), 1, 0)\n pdf.cell(140, 10, \"%s\" % (hyperparams[val]), 1, 2)\n pdf.cell(-30)\n pdf.cell(90, 3, \"\", 0, 2)\n\n # Model Performance Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 10, \"Model Performance Stats:\", 0, 2)\n pdf.set_font(\"Helvetica\", \"\", 12)\n\n loss = train_stats[\"test_loss\"]\n acc = train_stats[\"test_acc\"]\n\n pdf.set_text_color(255, 96, 80)\n pdf.cell(35, 6, \"Best Loss:\", 0, 0)\n pdf.cell(\n 45, 6, \"{:.3f} (Epoch {})\".format(min(loss), loss.index(min(loss)) + 1), 0, 0\n )\n pdf.cell(60, 6, \"Training Duration:\", 0, 0)\n pdf.cell(30, 6, \"{:.3f} (s)\".format(train_stats[\"total_dur\"]), 0, 2)\n pdf.cell(-140)\n pdf.cell(35, 6, f\"Best Accuracy:\", 0, 0)\n pdf.cell(45, 6, \"{:.3f} (Epoch {})\".format(max(acc), acc.index(max(acc)) + 1), 0, 0)\n pdf.cell(60, 6, \"Average Epoch Duration:\", 0, 0)\n pdf.cell(\n 30,\n 6,\n \"{:.3f} (s)\".format(train_stats[\"total_dur\"] / hyperparams[\"current_epoch\"]),\n 0,\n 2,\n )\n pdf.cell(-140)\n pdf.cell(90, 3, \"\", 0, 2)\n\n # Loss Curve Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 10, \"Model Loss Curve:\", 0, 2)\n pdf.image(img_path, x=None, y=None, w=160, h=0, type=\"PNG\", link=\"\")\n\n # Second Page of Report\n pdf.add_page()\n pdf.set_xy(0, 0)\n pdf.cell(20, 20)\n\n # Model Arch Section\n pdf.cell(150, 20, \"Model Configuration:\", 0, 2)\n pdf.set_font(\"Helvetica\", \"\", 12)\n if model_arch is None:\n model_arch = \"No model configuration was provided\"\n pdf.set_text_color(255, 96, 80)\n pdf.multi_cell(180, 8, str(model_arch))\n\n # Third Page of Report\n pdf.add_page()\n pdf.set_xy(0, 0)\n pdf.cell(20, 20, \" \")\n\n # Training Loss Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 20, \"Detailed Loss Output:\", 0, 2)\n pdf.cell(40, 8, \"Epoch\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Train Loss\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Test Loss\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Train Acc\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Test Acc\", 1, 2, \"C\")\n pdf.set_text_color(255, 96, 80)\n pdf.set_font(\"Helvetica\", \"\", 12)\n pdf.cell(-130)\n for i in range(0, len(train_stats[\"train_loss\"])):\n pdf.cell(40, 8, \"{}\".format((i + 1)), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"train_loss\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"test_loss\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"train_acc\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"test_acc\"][i])), 1, 2, \"C\")\n pdf.cell(-130)\n pdf.cell(90, 3, \"\", 0, 2)\n\n pdf.output(\n os.path.join(\n os.path.dirname(img_path),\n \"training_summary_{}.pdf\".format(model_name.lower()),\n ),\n \"F\",\n )",
"def pdf_gen(report, summary=None):\n with open(\"report_content.yaml\", \"r\") as stream:\n docs = yaml.safe_load(stream)\n\n style = g_stylesheet.get(\"styles\")\n elems = [] # elements array used to build pdf structure\n pdf = SimpleDocTemplate(\n f\"{report.replay_id}_report.pdf\",\n pagesize=letter,\n leftMargin=0.75 * inch,\n rightMargin=0.75 * inch,\n topMargin=0.75 * inch,\n bottomMargin=0.75 * inch,\n )\n\n # title and subtitle and cluster info table\n elems.append(Paragraph(docs[\"title\"], style[\"Title\"]))\n elems.append(\n Paragraph(sub_yaml_vars(report, docs[\"subtitle\"]), style[\"Heading4\"])\n )\n cluster_info = pd.DataFrame.from_dict(report.cluster_details, orient=\"index\")\n elems.append(\n Table(\n df_to_np(report.cluster_details.keys(), cluster_info.transpose()),\n hAlign=\"LEFT\",\n style=g_stylesheet.get(\"table_style\"),\n )\n )\n # replay summary\n if summary is not None:\n elems.append(Paragraph(f\"Replay Summary\", style[\"Heading4\"]))\n elems.append(\n ListFlowable(\n [ListItem(Paragraph(x, style[\"Normal\"])) for x in summary],\n bulletType=\"bullet\",\n )\n )\n elems.append(Spacer(0, 5))\n\n elems.append(Paragraph(docs[\"report_paragraph\"], style[\"Normal\"]))\n\n # glossary section\n elems.append(Paragraph(docs[\"glossary_header\"], style[\"Heading4\"]))\n elems.append(Paragraph(docs[\"glossary_paragraph\"], style[\"Normal\"]))\n elems.append(\n ListFlowable(\n [ListItem(Paragraph(x, style[\"Normal\"])) for x in docs[\"glossary\"]],\n bulletType=\"bullet\",\n )\n )\n elems.append(Spacer(0, 5))\n\n # access data section\n elems.append(Paragraph(docs[\"data_header\"], style[\"Heading4\"]))\n elems.append(\n Paragraph(sub_yaml_vars(report, docs[\"data_paragraph\"]), style[\"Normal\"])\n )\n elems.append(\n ListFlowable(\n [ListItem(Paragraph(x, style[\"Normal\"])) for x in docs[\"raw_data\"]],\n bulletType=\"bullet\",\n )\n )\n elems.append(Spacer(0, 5))\n elems.append(\n Paragraph(\n sub_yaml_vars(report, docs[\"agg_data_paragraph\"]), style[\"Normal\"]\n )\n )\n\n # notes section\n elems.append(Paragraph(docs[\"notes_header\"], style[\"Heading4\"]))\n elems.append(Paragraph(docs[\"notes_paragraph\"], style[\"Normal\"]))\n elems.append(\n ListFlowable(\n [ListItem(Paragraph(x, style[\"Normal\"])) for x in docs[\"notes\"]],\n bulletType=\"bullet\",\n )\n )\n\n elems.append(PageBreak()) # page 2: cluster details\n\n # query breakdown\n build_pdf_tables(elems, docs[\"query_breakdown\"], report)\n elems.append(Spacer(0, 5))\n\n # histogram and description\n image_path = hist_gen(\n x_data=report.feature_graph[\"sec_start\"],\n y_data=report.feature_graph[\"count\"],\n title=docs[\"graph\"].get(\"title\"),\n x_label=\"Average Elapsed Time (s)\",\n )\n\n desc = Paragraph(docs[\"graph\"].get(\"paragraph\"), style[\"Normal\"])\n data = [[Image(image_path, width=300, height=200, hAlign=\"LEFT\"), desc]]\n elems.append(\n Table(data, style=TableStyle([(\"VALIGN\", (0, 0), (-1, -1), \"MIDDLE\")]))\n )\n elems.append(Spacer(0, 5))\n\n # cluster metrics table\n build_pdf_tables(elems, docs[\"cluster_metrics\"], report)\n\n elems.append(PageBreak()) # page 3+ measure tables\n\n build_pdf_tables(\n elems, docs[\"measure_tables\"], report\n ) # build 5 measure tables all at once\n\n # build pdf\n pdf.build(\n elems,\n onFirstPage=partial(first_page, report=report),\n onLaterPages=partial(later_pages, report=report),\n )\n os.remove(image_path)\n\n return pdf.filename",
"def add_summary_header(self):\n self.fontSize(22, bold=True)\n self.PDF.setFillColor(\"black\")\n self.PDF.drawString(75, 260, \"Summary\")\n self.fontSize(FONT_XXS)\n self.PDF.setFillColor(HexColor(\"#9CA3AF\"))\n # self.PDF.drawString(\n # 185,\n # 260,\n # f\"{self.invoice.subscription.start_date} - {self.invoice.subscription.end_date}\",\n # )\n self.PDF.setFillColor(\"black\")\n self.fontSize(FONT_XS)\n self.PDF.setFillColor(HexColor(\"#9CA3AF\"))\n self.PDF.drawString(75, 290, \"Services\")\n self.PDF.drawString(475, 290, \"Amount\")\n self.PDF.setFillColor(\"black\")\n self.draw_line(305)",
"def buildPDF(self):\n\n # TODO: get this working\n # TODO: make this configurable via a dialog\n os.chdir(self.file_path.parent)\n proc = subprocess.Popen(\n [\"make\", \"latexpdf\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n proc.wait()\n for line in proc.stdout:\n print(\"stdout: \" + line.rstrip())",
"def build_pdf(branch):\n os.chdir(os.path.join(gitdname,'statsmodels','docs'))\n sphinx_dir = os.path.join(virtual_dir,'bin')\n retcode = subprocess.call(\" \".join(['make','latexpdf',\n 'SPHINXBUILD='+sphinx_dir+'/sphinx-build']), shell=True)\n if retcode != 0:\n os.chdir(old_cwd)\n msg = \"\"\"Could not build the pdf docs for branch %s\"\"\" % branch\n raise Exception(msg)\n os.chdir(dname)",
"def printSummary(self):\n pass",
"def legacy_reporter(self):\n logging.info('Creating database-friendly summary report')\n header = '{}\\n'.format(','.join(self.legacy_headers))\n # Create a string to store all the results\n data = str()\n for sample in self.metadata:\n # Add the value of the appropriate attribute to the results string\n data += GenObject.returnattr(sample, 'name')\n # SampleName\n data += GenObject.returnattr(sample.run, 'SamplePlate')\n # Genus\n data += GenObject.returnattr(sample.general, 'closestrefseqgenus')\n # SequencingDate\n data += GenObject.returnattr(sample.run, 'Date')\n # Analyst\n data += GenObject.returnattr(sample.run, 'InvestigatorName')\n # Legacy ConFindr clean/contaminated call\n data += 'ND,'\n # N50\n n50 = GenObject.returnattr(sample.quast, 'N50',\n number=True)\n if n50 != '-,':\n data += n50\n else:\n data += '0,'\n # NumContigs\n data += GenObject.returnattr(sample.quast, 'num_contigs',\n number=True)\n # TotalLength\n data += GenObject.returnattr(sample.quast, 'Total_length',\n number=True)\n # MeanInsertSize\n data += GenObject.returnattr(sample.quast, 'mean_insert',\n number=True)\n # InsertSizeSTD\n data += GenObject.returnattr(sample.quast, 'std_insert',\n number=True)\n # AverageCoverageDepth\n data += GenObject.returnattr(sample.qualimap, 'MeanCoveragedata',\n number=True)\n # CoverageDepthSTD\n data += GenObject.returnattr(sample.qualimap, 'StdCoveragedata',\n number=True)\n # PercentGC\n data += GenObject.returnattr(sample.quast, 'GC',\n number=True)\n # MASH_ReferenceGenome\n data += GenObject.returnattr(sample.mash, 'closestrefseq')\n # MASH_NumMatchingHashes\n data += GenObject.returnattr(sample.mash, 'nummatches')\n # 16S_result\n data += GenObject.returnattr(sample.sixteens_full, 'sixteens_match')\n # 16S PercentID\n data += GenObject.returnattr(sample.sixteens_full, 'percent_id')\n # rMLST_Result\n try:\n # If the number of matches to the closest reference profile is 53, return the profile number\n if sample.rmlst.matches == 53:\n if type(sample.rmlst.sequencetype) is list:\n rmlst_seq_type = ';'.join(sorted(sample.rmlst.sequencetype)).rstrip(';') + ','\n else:\n rmlst_seq_type = GenObject.returnattr(sample.rmlst, 'sequencetype')\n rmlst_seq_type = rmlst_seq_type if rmlst_seq_type != 'ND,' else 'new,'\n data += rmlst_seq_type\n else:\n # Otherwise the profile is set to new\n data += 'new,'\n except AttributeError:\n data += 'new,'\n # MLST_Result\n try:\n if sample.mlst.matches == 7:\n if type(sample.mlst.sequencetype) is list:\n mlst_seq_type = ';'.join(sorted(sample.mlst.sequencetype)).rstrip(';') + ','\n else:\n mlst_seq_type = GenObject.returnattr(sample.mlst, 'sequencetype')\n mlst_seq_type = mlst_seq_type if mlst_seq_type != 'ND,' else 'new,'\n data += mlst_seq_type\n else:\n data += 'new,'\n except AttributeError:\n data += 'new,'\n # MLST_gene_X_alleles\n try:\n # Create a set of all the genes present in the results (gene name split from allele)\n gene_set = {gene.split('_')[0] for gene in sample.mlst.combined_metadata_results}\n for gene in sorted(gene_set):\n allele_list = list()\n # Determine all the alleles that are present for each gene\n for allele in sample.mlst.combined_metadata_results:\n if gene in allele:\n allele_list.append(allele.replace(' ', '_'))\n # If there is more than one allele in the sample, add both to the string separated by a ';'\n if len(allele_list) > 1:\n data += '{},'.format(';'.join(allele_list))\n # Otherwise add the only allele\n else:\n data += allele_list[0] + ','\n # If there are fewer than seven matching alleles, add a ND for each missing result\n if len(gene_set) < 7:\n data += (7 - len(gene_set)) * 'ND,'\n except AttributeError:\n # data += '-,-,-,-,-,-,-,'\n data += 'ND,ND,ND,ND,ND,ND,ND,'\n # CoreGenesPresent\n data += GenObject.returnattr(sample.gdcs, 'coreresults')\n # E_coli_Serotype\n try:\n # If no O-type was found, set the output to be O-untypeable\n if ';'.join(sample.ectyper.o_type) == '-':\n otype = 'O-untypeable'\n else:\n otype = sample.ectyper.o_type\n # Same as above for the H-type\n if ';'.join(sample.ectyper.h_type) == '-':\n htype = 'H-untypeable'\n\n else:\n htype = sample.ectyper.h_type\n serotype = '{otype}:{htype},'.format(otype=otype,\n htype=htype)\n # Add the serotype to the data string unless neither O-type not H-type were found; add ND instead\n data += serotype if serotype != 'O-untypeable:H-untypeable,' else 'ND,'\n except AttributeError:\n data += 'ND,'\n # SISTR_serovar_antigen\n data += GenObject.returnattr(sample.sistr, 'serovar_antigen').rstrip(';')\n # SISTR_serovar_cgMLST\n data += GenObject.returnattr(sample.sistr, 'serovar_cgmlst')\n # SISTR_serogroup\n data += GenObject.returnattr(sample.sistr, 'serogroup')\n # SISTR_h1\n data += GenObject.returnattr(sample.sistr, 'h1').rstrip(';')\n # SISTR_h2\n data += GenObject.returnattr(sample.sistr, 'h2').rstrip(';')\n # SISTR_serovar\n data += GenObject.returnattr(sample.sistr, 'serovar')\n # GeneSeekr_Profile\n try:\n if sample.genesippr.report_output:\n data += ';'.join(sample.genesippr.report_output) + ','\n else:\n data += 'ND,'\n except AttributeError:\n data += 'ND,'\n # Vtyper_Profile\n data += GenObject.returnattr(sample.verotoxin, 'verotoxin_subtypes_set')\n # AMR_Profile and resistant/sensitive status\n if sample.resfinder_assembled.pipelineresults:\n # Profile\n for resistance, resistance_set in sorted(sample.resfinder_assembled.pipelineresults.items()):\n data += '{res}({r_set});'.format(res=resistance.replace(',', ';'),\n r_set=';'.join(sorted(list(resistance_set))))\n data += ','\n # Resistant/Sensitive\n data += 'Resistant,'\n else:\n # Profile\n data += 'ND,'\n # Resistant/Sensitive\n data += 'Sensitive,'\n # Plasmid Result'\n if sample.mobrecon.pipelineresults:\n for plasmid, details in sorted(sample.mobrecon.pipelineresults.items()):\n data += '{plasmid}({details});'.format(plasmid=plasmid,\n details=details)\n data += ','\n else:\n data += 'ND,'\n # TotalPredictedGenes\n data += GenObject.returnattr(sample.prodigal, 'predictedgenestotal',\n number=True)\n # PredictedGenesOver3000bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover3000bp',\n number=True)\n # PredictedGenesOver1000bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover1000bp',\n number=True)\n # PredictedGenesOver500bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover500bp',\n number=True)\n # PredictedGenesUnder500bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesunder500bp',\n number=True)\n # NumClustersPF\n data += GenObject.returnattr(sample.run, 'NumberofClustersPF')\n # Percentage of reads mapping to PhiX control\n data += GenObject.returnattr(sample.run, 'phix_aligned')\n # Error rate calculated from PhiX control\n data += GenObject.returnattr(sample.run, 'error_rate')\n # LengthForwardRead\n data += GenObject.returnattr(sample.run, 'forwardlength',\n number=True)\n # LengthReverseRead\n data += GenObject.returnattr(sample.run, 'reverselength',\n number=True)\n # Real time strain\n data += GenObject.returnattr(sample.run, 'Description')\n # Flowcell\n data += GenObject.returnattr(sample.run, 'flowcell')\n # MachineName\n data += GenObject.returnattr(sample.run, 'instrument')\n # PipelineVersion\n data += self.commit + ','\n # AssemblyDate\n data += datetime.now().strftime('%Y-%m-%d') + ','\n # SamplePurity\n data += GenObject.returnattr(sample.confindr, 'num_contaminated_snvs')\n # cgMLST\n try:\n if type(sample.cgmlst.sequencetype) is list:\n if sample.cgmlst.sequencetype:\n cgmlst_seq_type = ';'.join(sorted(sample.cgmlst.sequencetype)).rstrip(';') + ','\n else:\n cgmlst_seq_type = 'ND,'\n else:\n cgmlst_seq_type = GenObject.returnattr(sample.cgmlst, 'sequencetype')\n # cgmlst_seq_type = cgmlst_seq_type if cgmlst_seq_type != 'ND,' else 'new,'\n data += cgmlst_seq_type\n except AttributeError:\n data += 'ND,'\n # Name of the database used in the analyses\n data += os.path.split(self.reffilepath)[-1] + ','\n # Database download date\n data += self.download_date\n # Append a new line to the end of the results for this sample\n data += '\\n'\n # Replace any NA values with ND\n cleandata = data.replace('NA', 'ND')\n with open(os.path.join(self.reportpath, 'legacy_combinedMetadata.csv'), 'w') as metadatareport:\n metadatareport.write(header)\n metadatareport.write(cleandata)",
"def create_pdf(f,s1,s2='',s3=''):\n # does not need reportlab!\n if s1 == 'White Ballot': s1 = '\"'+'_'*10+'\"'\n cod = zlib.compress('BT /F1 16 Tf ET\\r\\nBT 300 270 Td (%s) Tj ET\\r\\nBT /F1 48 Tf ET\\r\\nBT 5 180 Td (%16s) Tj ET\\r\\nBT /F1 12 Tf ET\\r\\nBT 10 50 Td (%s) Tj ET'%(s3,s1,s2))\n open(f,'w').write(create_pdf.__doc__ + '/Length %d>>\\nstream\\n'%len(cod) + cod + 'endstream endobj\\ntrailer<</Root 4 0 R>>')",
"def makePdf(sources):\n pdf = PdfPages(\"sample_features.pdf\")\n classnames = []\n classname_dict = {}\n x = 2 # number of subplot columns\n y = 3 # number of subplot rows\n for source in sources:\n lc = source.lcs[0]\n\n if lc.classname not in classnames:\n classnames.append(lc.classname)\n classname_dict[lc.classname] = [lc]\n else:\n classname_dict[lc.classname].append(lc)\n\n if len(classname_dict[lc.classname]) < 3:\n\n label = lc.classname + \"; ID: \" + lc.id\n # all_times histogram:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(label)\n ax.axis('off')\n\n ax1 = fig.add_subplot(321)\n ax2 = fig.add_subplot(322)\n ax2.axis('off')\n ax3 = fig.add_subplot(323)\n ax4 = fig.add_subplot(324)\n ax4.axis('off')\n ax5 = fig.add_subplot(325)\n ax6 = fig.add_subplot(326)\n ax6.axis('off')\n\n hist, bins, other = ax1.hist(lc.all_times, 50, normed=True)\n ax1.text(np.max(bins) * 0.1, np.max(hist) * 0.8,\n r'Histogram (normed) of all $\\Delta$Ts')\n\n ax2.text(0.0, 0.9, (r'$\\bullet$med time to next obs: ' +\n str(np.round(lc.cads_med, 4))))\n ax2.text(0.0, 0.75, (r'$\\bullet$avg time to next obs: ' +\n str(np.round(lc.avgt, 4))))\n ax2.text(0.0, 0.6, (r'$\\bullet$std dev of time to next obs: ' +\n str(np.round(lc.cads_std, 4))))\n ax2.text(0.0, 0.45, (r'$\\bullet$med of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_med, 4))))\n ax2.text(0.0, 0.3, (r'$\\bullet$avg of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_avg, 4))))\n ax2.text(0.0, 0.15, (r'$\\bullet$std dev of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_std, 4))))\n\n hist, bins, other = ax3.hist(lc.cads, 50)\n ax3.text(np.max(bins) * 0.1, np.max(hist) * 0.8,\n r'Hist of time to next obs')\n\n ax6.text(\n 0.0, 0.9, r'$\\bullet$Number of epochs: ' + str(lc.n_epochs))\n ax6.text(0.0, 0.75, (r'$\\bullet$Time b/w first & last obs (days): ' +\n str(np.round(lc.total_time, 2))))\n ax6.text(0.0, 0.6, (r'$\\bullet$Average error in mag: ' +\n str(np.round(lc.avg_err, 4))))\n ax6.text(0.0, 0.45, (r'$\\bullet$Median error in mag: ' +\n str(np.round(lc.med_err, 4))))\n ax6.text(0.0, 0.3, (r'$\\bullet$Std dev of error: ' +\n str(np.round(lc.std_err, 4))))\n ax6.text(0.0, 0.15, '')\n\n ax5.scatter(lc.epochs, lc.mags)\n\n ax4.text(0.0, 0.9, (r'$\\bullet$Avg double to single step ratio: ' +\n str(np.round(lc.avg_double_to_single_step, 3))))\n ax4.text(0.0, 0.75, (r'$\\bullet$Med double to single step: ' +\n str(np.round(lc.med_double_to_single_step, 3))))\n ax4.text(0.0, 0.6, (r'$\\bullet$Std dev of double to single step: ' +\n str(np.round(lc.std_double_to_single_step, 3))))\n ax4.text(\n 0.0, 0.45,\n (r'$\\bullet$1st peak to 2nd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_1_to_2, 3))))\n ax4.text(\n 0.0, 0.3,\n (r'$\\bullet$2ndt peak to 3rd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_2_to_3, 3))))\n ax4.text(\n 0.0, 0.15,\n (r'$\\bullet$1st peak to 3rd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_1_to_3, 3))))\n\n pdf.savefig(fig)\n\n pdf.close()\n\n pdf = PdfPages('feature_plots.pdf')\n\n fig = plt.figure()\n\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n ax4 = fig.add_subplot(224)\n\n plt.subplots_adjust(wspace=0.4, hspace=0.4)\n\n classnamenum = 0\n\n colors = ['red', 'yellow', 'green', 'blue', 'gray', 'orange', 'cyan',\n 'magenta']\n for classname, lcs in list(classname_dict.items()):\n classnamenum += 1\n print(classname, len(lcs), 'light curves.')\n attr1 = []\n attr2 = []\n attr3 = []\n attr4 = []\n attr5 = []\n attr6 = []\n attr7 = []\n attr8 = []\n for lc in lcs:\n attr1.append(lc.n_epochs)\n attr2.append(lc.avgt)\n attr3.append(lc.cads_std)\n attr4.append(lc.total_time)\n attr5.append(lc.all_times_hist_peak_val)\n attr6.append(lc.cad_probs[5000])\n attr7.append(lc.all_times_nhist_peak_1_to_3)\n attr8.append(lc.all_times_nhist_peak_val)\n\n ax2.scatter(attr1, attr2, color=colors[classnamenum], label=classname)\n ax1.scatter(attr3, attr4, color=colors[classnamenum], label=classname)\n ax2.set_xlabel('N Epochs')\n ax2.set_ylabel('Avg time to next obs')\n ax1.set_xlabel('Standard dev. of time to next obs')\n ax1.set_ylabel('Time b/w first and last obs')\n\n ax3.scatter(attr5, attr6, color=colors[classnamenum], label=classname)\n ax4.scatter(attr7, attr8, color=colors[classnamenum], label=classname)\n ax3.set_xlabel(r'All $\\Delta$T hist peak val')\n ax3.set_ylabel('Prob time to next obs <= 5000 min')\n ax4.set_xlabel(r'$\\Delta$Ts normed hist peak 1 to peak 3')\n ax4.set_ylabel(r'Peak val of all $\\Delta$Ts normed hist')\n\n #ax1.legend(bbox_to_anchor=(1.1, 1.1),prop={'size':6})\n ax2.legend(bbox_to_anchor=(1.1, 1.1), prop={'size': 6})\n #ax3.legend(loc='upper right',prop={'size':6})\n #ax4.legend(loc='upper right',prop={'size':6})\n\n pdf.savefig(fig)\n\n pdf.close()\n return 0",
"def _print_summary(results):\n if not len(results) > 0:\n print 'No results to show in summary.'\n return\n\n table = {}\n for res in results:\n for k, v in res.iteritems():\n table.setdefault(k, []).append(v)\n print tabulate(table, headers='keys', tablefmt=\"simple\")",
"def metadata_reporter(self):\n logging.info('Creating summary report')\n header = '{}\\n'.format(','.join(self.headers))\n # Create a string to store all the results\n data = str()\n for sample in self.metadata:\n # Add the value of the appropriate attribute to the results string\n data += GenObject.returnattr(sample, 'name')\n # SampleName\n data += GenObject.returnattr(sample.run, 'SamplePlate')\n # Genus\n data += GenObject.returnattr(sample.general, 'closestrefseqgenus')\n # SamplePurity\n data += GenObject.returnattr(sample.confindr, 'num_contaminated_snvs')\n # N50\n n50 = GenObject.returnattr(sample.quast, 'N50',\n number=True)\n if n50 != '-,':\n data += n50\n else:\n data += '0,'\n # NumContigs\n data += GenObject.returnattr(sample.quast, 'num_contigs',\n number=True)\n # TotalLength\n data += GenObject.returnattr(sample.quast, 'Total_length',\n number=True)\n # MeanInsertSize\n data += GenObject.returnattr(sample.quast, 'mean_insert',\n number=True)\n # InsertSizeSTD\n data += GenObject.returnattr(sample.quast, 'std_insert',\n number=True)\n # AverageCoverageDepth\n data += GenObject.returnattr(sample.qualimap, 'MeanCoveragedata',\n number=True)\n # CoverageDepthSTD\n data += GenObject.returnattr(sample.qualimap, 'StdCoveragedata',\n number=True)\n # PercentGC\n data += GenObject.returnattr(sample.quast, 'GC',\n number=True)\n # MASH_ReferenceGenome\n data += GenObject.returnattr(sample.mash, 'closestrefseq')\n # MASH_NumMatchingHashes\n data += GenObject.returnattr(sample.mash, 'nummatches')\n # 16S_result\n data += GenObject.returnattr(sample.sixteens_full, 'sixteens_match')\n # 16S PercentID\n data += GenObject.returnattr(sample.sixteens_full, 'percent_id')\n # CoreGenesPresent\n data += GenObject.returnattr(sample.gdcs, 'coreresults')\n # rMLST_Result\n try:\n # If the number of matches to the closest reference profile is 53, return the profile number\n if sample.rmlst.matches == 53:\n if type(sample.rmlst.sequencetype) is list:\n rmlst_seq_type = ';'.join(sorted(sample.rmlst.sequencetype)).rstrip(';') + ','\n else:\n rmlst_seq_type = GenObject.returnattr(sample.rmlst, 'sequencetype')\n rmlst_seq_type = rmlst_seq_type if rmlst_seq_type != 'ND,' else 'new,'\n data += rmlst_seq_type\n else:\n # Otherwise the profile is set to new\n data += 'new,'\n except AttributeError:\n data += 'new,'\n # MLST_Result\n try:\n if sample.mlst.matches == 7:\n if type(sample.mlst.sequencetype) is list:\n mlst_seq_type = ';'.join(sorted(sample.mlst.sequencetype)).rstrip(';') + ','\n else:\n mlst_seq_type = GenObject.returnattr(sample.mlst, 'sequencetype')\n mlst_seq_type = mlst_seq_type if mlst_seq_type != 'ND,' else 'new,'\n data += mlst_seq_type\n else:\n data += 'new,'\n except AttributeError:\n data += 'new,'\n # MLST_gene_X_alleles\n try:\n # Create a set of all the genes present in the results (gene name split from allele)\n gene_set = {gene.split('_')[0] for gene in sample.mlst.combined_metadata_results}\n for gene in sorted(gene_set):\n allele_list = list()\n # Determine all the alleles that are present for each gene\n for allele in sample.mlst.combined_metadata_results:\n if gene in allele:\n allele_list.append(allele.replace(' ', '_'))\n # If there is more than one allele in the sample, add both to the string separated by a ';'\n if len(allele_list) > 1:\n data += '{},'.format(';'.join(allele_list))\n # Otherwise add the only allele\n else:\n data += allele_list[0] + ','\n # If there are fewer than seven matching alleles, add a ND for each missing result\n if len(gene_set) < 7:\n data += (7 - len(gene_set)) * 'ND,'\n except AttributeError:\n # data += '-,-,-,-,-,-,-,'\n data += 'ND,ND,ND,ND,ND,ND,ND,'\n # E_coli_Serotype\n try:\n # If no O-type was found, set the output to be O-untypeable\n if ';'.join(sample.ectyper.o_type) == '-':\n otype = 'O-untypeable'\n else:\n otype = sample.ectyper.o_type\n # Same as above for the H-type\n if ';'.join(sample.ectyper.h_type) == '-':\n htype = 'H-untypeable'\n\n else:\n htype = sample.ectyper.h_type\n serotype = '{otype}:{htype},'.format(otype=otype,\n htype=htype)\n # Add the serotype to the data string unless neither O-type not H-type were found; add ND instead\n data += serotype if serotype != 'O-untypeable:H-untypeable,' else 'ND,'\n except AttributeError:\n data += 'ND,'\n # SISTR_serovar_antigen\n data += GenObject.returnattr(sample.sistr, 'serovar_antigen').rstrip(';')\n # SISTR_serovar_cgMLST\n data += GenObject.returnattr(sample.sistr, 'serovar_cgmlst')\n # SISTR_serogroup\n data += GenObject.returnattr(sample.sistr, 'serogroup')\n # SISTR_h1\n data += GenObject.returnattr(sample.sistr, 'h1').rstrip(';')\n # SISTR_h2\n data += GenObject.returnattr(sample.sistr, 'h2').rstrip(';')\n # SISTR_serovar\n data += GenObject.returnattr(sample.sistr, 'serovar')\n # GeneSeekr_Profile\n try:\n if sample.genesippr.report_output:\n data += ';'.join(sample.genesippr.report_output) + ','\n else:\n data += 'ND,'\n except AttributeError:\n data += 'ND,'\n # Vtyper_Profile\n data += GenObject.returnattr(sample.verotoxin, 'verotoxin_subtypes_set')\n # AMR_Profile and resistant/sensitive status\n if sample.resfinder_assembled.pipelineresults:\n # Profile\n for resistance, resistance_set in sorted(sample.resfinder_assembled.pipelineresults.items()):\n data += '{res}({r_set});'.format(res=resistance.replace(',', ';'),\n r_set=';'.join(sorted(list(resistance_set))))\n data += ','\n # Resistant/Sensitive\n data += 'Resistant,'\n else:\n # Profile\n data += 'ND,'\n # Resistant/Sensitive\n data += 'Sensitive,'\n # Plasmid Result'\n if sample.mobrecon.pipelineresults:\n for plasmid, details in sorted(sample.mobrecon.pipelineresults.items()):\n data += '{plasmid}({details});'.format(plasmid=plasmid,\n details=details)\n data += ','\n else:\n data += 'ND,'\n # TotalPredictedGenes\n data += GenObject.returnattr(sample.prodigal, 'predictedgenestotal',\n number=True)\n # PredictedGenesOver3000bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover3000bp',\n number=True)\n # PredictedGenesOver1000bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover1000bp',\n number=True)\n # PredictedGenesOver500bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesover500bp',\n number=True)\n # PredictedGenesUnder500bp\n data += GenObject.returnattr(sample.prodigal, 'predictedgenesunder500bp',\n number=True)\n # AssemblyDate\n data += datetime.now().strftime('%Y-%m-%d') + ','\n # PipelineVersion\n data += self.commit + ','\n # Name of the database used in the analyses\n data += os.path.split(self.reffilepath)[-1] + ','\n # Database download date\n data += self.download_date\n # Append a new line to the end of the results for this sample\n data += '\\n'\n # Replace any NA values with ND\n cleandata = data.replace('NA', 'ND')\n with open(os.path.join(self.reportpath, 'combinedMetadata.csv'), 'w') as metadatareport:\n metadatareport.write(header)\n metadatareport.write(cleandata)",
"def create_standard_analysis_report(output_path, json_results, run_id):\n # Create the PDF object\n pdf = FPDF()\n pdf.add_page()\n pdf.set_font(\"Courier\", size=8)\n\n # Define the output path for the report PDF\n report_name = run_id + '_standard_analysis.pdf'\n report_path = os.path.join(output_path, report_name)\n\n # Create the header metadata from the metadata in the JSON results\n # and write out to PDF\n header_metadata_str = grab_header_metadata(json_results, run_id)\n line_height = 3\n pdf.write(line_height, header_metadata_str)\n\n # Add graphs to PDF object\n pdf = add_benchmark_graphs(pdf, output_path)\n\n # Output final PDF file from PDF object\n pdf.output(report_path)",
"def pdf_summary(self, request, object_id, extra_context=None):\n obj = self.get_object(request, unquote(object_id))\n title = \"PDFs\"\n\n cmd = ['fexsend', '-l', '-v']\n run = subprocess.Popen(' '.join(cmd), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n fex_tokens = run.communicate()[0]\n\n token_str = self.__find_between(fex_tokens, '<pre>', '</pre>')\n tokens = [token for token in token_str.split('<--') if 'dkey' in token]\n\n fex_file_list = []\n for token in tokens:\n pdf = self.__find_between(token, '>', '<')\n dkey = self.__find_between(token, 'dkey=', '&')\n expiry = self.__find_between(token, '[', ']')\n size = self.__find_between(token, ' ', ' [').strip()\n size = '< 1 MB' if size == '0 MB' else size\n if not pdf.endswith(\"_pfp.pdf\"): # exclude non-pfp's\n continue\n\n try:\n timestamp = datetime.strptime(pdf.split('_')[-2], '%Y-%m-%dT%H%M')\n except ValueError:\n timestamp = datetime.strptime(pdf.split('_')[-2], '%Y-%m-%dT%H%M%S')\n\n fex_file_list.append([\n pdf,\n settings.FEX_SVR_HTTP + '/fop/' + dkey + '/' + pdf,\n size,\n expiry,\n timestamp,\n ])\n\n fex_file_list = sorted(fex_file_list, key=lambda x: x[4], reverse=True)\n\n context = {\n 'title': title,\n 'current': obj,\n 'fex_file_list': fex_file_list,\n }\n return TemplateResponse(request, self.pdf_summary_template,\n context, current_app=self.admin_site.name)",
"def print_quick_report():\r\n print('function not yet written')\r\n # print a summary of the report as a structured pandas dataframe\r\n #Summary will include only date title and sentiment\r",
"def inscription_summary(request, pk):\n candidat = get_object_or_404(Candidate, pk=pk)\n buff = io.BytesIO()\n pdf = InscriptionSummaryPDF(buff)\n pdf.produce(candidat)\n filename = slugify('{0}_{1}'.format(candidat.last_name, candidat.first_name)) + '.pdf'\n buff.seek(0)\n return FileResponse(buff, as_attachment=True, filename=filename)",
"def make_pdf(self):\n source = self.get_page_source()\n if not source:\n self.errors.append('no_source')\n if not self.errors:\n self.generate_pdf_file(source)",
"def create_pdf(self):\n\n my_datetime = datetime.now()\n self.pdf_name = (\n self.pdf_name + \"_\" + my_datetime.strftime(\"%H%M_%d%m%Y\") + \".pdf\"\n )\n fig_width = aW * self.column_ratio[0]\n\n clm_width_meta = (aW * self.column_ratio[1]) / len(self.fields)\n\n c = canvas.Canvas(os.path.join(self.pdf_folder, self.pdf_name), pagesize=A4)\n\n for qc_run_id, fig_file in sorted(self._files.items()):\n (param_values, feature_values) = get_param_values(\n qc_run_id, self.db_name, return_meta_add_on=True\n )\n\n comment = self.subject + \"<br/>\"\n # c.saveState()\n title = \"Dataset \" + qc_run_id\n\n # Prepare header\n header = Paragraph(title, title_style)\n h_w, h_h = header.wrap(aW, aH)\n\n # Prepare image\n img = ImageReader(fig_file)\n im_width, im_height = img.getSize()\n aspect = im_height / float(im_width)\n fig_height = fig_width * aspect\n\n # Prepare metadata section\n\n meta_table = Table(\n param_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n meta_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n\n meta_width, meta_height = meta_table.wrap(aW - im_width, aH / 2)\n\n # Prepare comments header\n comments_header = Paragraph(\"Comments:\", title_style)\n avail_height = aH - fig_height - v_padding\n comm_h_width, comm_h_height = comments_header.wrap(\n im_width, avail_height # aW - meta_width,\n )\n # Prepare comments\n my_datetime = datetime.now()\n ts = \"Printed on \" + my_datetime.strftime(\"%c\")\n\n try:\n data_specific_comment = self.comments[int(qc_run_id)]\n comment += data_specific_comment + \"<br/>\"\n comment += self.comments[\"general\"] + \"<br/>\"\n\n comment += self.smalltalk + \"<br/>\"\n except Exception:\n logger.warning(\n \"Unable to summarize result of \" + \"dataset {}\".format(qc_run_id)\n )\n comment_ts = comment + ts\n comment_ts = textwrap.fill(comment_ts, 70)\n comment_ts = comment_ts.replace(\"\\n\", \"<br/>\")\n\n comments_p = Paragraph(comment_ts, body_style)\n\n avail_height = aH - fig_height - v_padding - comm_h_height\n\n comm_width, comm_height = comments_p.wrap(im_width, avail_height) # aW,\n\n line_widths = comments_p.getActualLineWidths0()\n number_of_lines = len(line_widths)\n if number_of_lines > 1:\n pass\n if number_of_lines == 1:\n min(line_widths)\n comm_width, comm_height = comments_p.wrap(im_width, avail_height)\n\n # Prepare features\n feat_table = Table(\n feature_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n feat_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n avail_height = aH - meta_height # fig_height - v_padding - comm_h_height\n avail_height -= comm_height\n feat_width, feat_height = feat_table.wrap(aW - im_width, avail_height)\n\n # Draw everyting on canvas\n\n header.drawOn(c, left_margin, aH - top_margin)\n\n c.drawImage(\n img,\n left_margin,\n aH - top_margin - fig_height - v_padding,\n width=fig_width * 1.1,\n height=fig_height * 1.1,\n mask=\"auto\",\n )\n\n meta_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2, # - v_padding\n )\n\n comments_header.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - fig_height\n - 2 * v_padding, # - add_on_height\n )\n\n comments_p.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - comm_height\n - fig_height\n - 2 * v_padding\n - comm_h_height, # - add_on_height\n )\n\n feat_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2 - feat_height - v_padding,\n # top_margin - fig_height - 2*v_padding - feat_height\n )\n\n # new page\n c.showPage()\n c.saveState()\n\n c.save()",
"def mk_summary(dlas, prefix, outfil, specpath=None, htmlfil=None):\n #\n if htmlfil is None:\n htmlfil = 'tmp.html'\n\n # # Constructing\n # QSO, RA/DEC\n cqso = Column(dlas.qso, name='QSO')\n ra = dlas.coord.ra.degree\n dec = dlas.coord.dec.degree\n jname = []\n for abs_sys in dlas._abs_sys:\n jname.append(survey_name(prefix, abs_sys))\n\n cjname = Column(jname, name='Name')\n cra = Column(ra, name='RA', unit=u.degree)\n cdec = Column(dec, name='DEC', unit=u.degree)\n czem = Column(dlas.zem, name='Z_QSO')\n\n # Begin the Table\n dla_table = QTable( [cjname, cqso, cra, cdec, czem] )\n\n # LLS properties\n czabs = Column(dlas.zabs, name='ZABS')\n cNHI = Column(dlas.NHI, name='logNHI')\n csigNHI = Column(dlas.sig_NHI, name='sig(logNHI)')\n\n # Add to Table\n dla_table.add_columns([czabs, cNHI, csigNHI])\n\n # Spectra files\n all_sfiles = []\n for jj,ills in enumerate(dlas._abs_sys):\n sub_spec = mk_1dspec(ills, name=cjname[jj], outpath=specpath)\n # Pad\n while len(sub_spec) < 5:\n sub_spec.append(str('NULL'))\n # Append\n all_sfiles.append(sub_spec)\n\n cspec = Column(np.array(all_sfiles), name='SPEC_FILES')\n dla_table.add_column( cspec )\n\n # Sort\n dla_table.sort('RA')\n\n # Write\n print('Writing {:s}'.format(outfil))\n xxf.table_to_fits(dla_table,outfil)\n print('Writing {:s}'.format(htmlfil))\n Table(dla_table).write(htmlfil)\n\n return dla_table",
"def show_summary(self, lang):\n return self.summary % self.vars",
"def create_summary_writer(self):\n col_names_lungs = ['Epoch', 'Step', 'Local_gen_total_loss', 'Global_gen_total_loss', 'Local_gen_gan_loss',\n 'Global_gen_gan_loss', 'Local_gen_l1_loss', 'Global_gen_l1_loss', 'Local_disc_loss',\n 'Global_disc_loss', 'All_gen_loss', 'All_disc_loss']\n self.summary_writer_lungs = pd.DataFrame(columns=col_names_lungs)\n\n col_names_organs = ['Epoch', 'Step', 'Gen_total_loss', 'Gen_gan_loss', 'Gen_l1_loss', 'Disc_loss']\n self.summary_writer_organs = pd.DataFrame(columns=col_names_organs)\n return self",
"def generate_document(stats: dict, semester: str):\n filename = 'report_' + str(date.today()) + '.html'\n with open('raw_html.html', 'r') as f:\n string = f.read()\n string = string.format(semester,\n stats['faculty_with_usage'],\n stats['full_time'],\n stats['total_full_time'],\n round((stats['full_time'] / stats['total_full_time']) * 100, 1),\n stats['part_time'],\n stats['total_part_time'],\n round((stats['part_time'] / stats['total_part_time']) * 100, 1),\n stats['staff'],\n stats['courses_with_usage'],\n stats['total_courses'],\n round((stats['courses_with_usage'] / stats['total_courses']) * 100, 1),\n stats['specifics']['assignments'],\n stats['specifics']['grade'],\n stats['specifics']['graded'],\n stats['specifics']['discussion'])\n with open(filename, 'w') as f:\n f.write(string)\n pdf = weasyprint.HTML(filename).write_pdf()\n open(\"report_\" + str(date.today()) + \".pdf\", 'wb').write(pdf)",
"def present_summary(services, methods, count, backup):\n print_heading(\"Summary\")\n if backup is not None:\n writer(f\"Backup: {backup}\")\n writer(f\"Showing {count[0]}/{len(services)} Services\")\n writer(f\"Showing {count[1]}/{len(methods)} Methods\\n\")",
"def create_test_summary(args, TEST_RESULTS):\n logging.error(\"Creating test summary report...\")\n\n try:\n test_summary = \"Performance Metrics of {APP} Application Tested from this PR\\n\".format(APP=args.bundle_id)\n test_summary += \"---------------------------------------------------------------\\n\"\n\n for element in TEST_RESULTS:\n if element != LAUNCHES:\n test_summary += \"> {KEY}: {VALUE}\".format(KEY=element, VALUE=TEST_RESULTS[element])\n if element == INSTALL_LAUNCH_DURATION:\n if int(TEST_RESULTS[INSTALL_LAUNCH_DURATION]) > args.duration_limit:\n test_summary += \"ms :x:\\n\"\n else:\n test_summary += \"ms :white_check_mark:\\n\"\n\n if element == INSTALL_MEMORY_USAGE:\n if int(TEST_RESULTS[INSTALL_MEMORY_USAGE]) > args.memory_limit:\n test_summary += \"MB :x:\\n\"\n else:\n test_summary += \"MB :white_check_mark:\\n\"\n\n if element == APP_SIZE:\n if int(TEST_RESULTS[APP_SIZE]) > args.size_limit:\n test_summary += \"MB :x:\\n\"\n else:\n test_summary += \"MB :white_check_mark:\\n\"\n test_summary += \"---------------------------------------------------------------\\n\"\n\n for element in TEST_RESULTS[LAUNCHES]:\n test_summary += \"> DEVICE: {DEVICE} | LAUNCH TYPE: {LAUNCH_TYPE} | \".format(DEVICE=element[DEVICE], LAUNCH_TYPE=element[LAUNCH_TYPE])\n test_summary += \"DURATION: {DURATION}ms \".format(DURATION=element[LAUNCH_DURATION])\n if int(element[LAUNCH_DURATION]) > args.duration_limit:\n test_summary += \" :x: | \"\n else:\n test_summary += \" :white_check_mark: | \"\n\n test_summary += \"MEMORY USAGE: {MEMORY_USAGE}MB \".format(MEMORY_USAGE=element[MEMORY_USAGE])\n if int(element[MEMORY_USAGE]) > args.memory_limit:\n test_summary += \" :x:\\n\"\n else:\n test_summary += \" :white_check_mark:\\n\"\n test_summary += \"----------------------------------------------------\\n\"\n\n except Exception as e:\n logging.error(\"Creating test summary failed with error '{ERROR}'\".format(ERROR=e))\n return None\n\n logging.info(test_summary)\n return test_summary",
"def generate_pdf(list,id):\n\n doc = SimpleDocTemplate(settings.STATIC_ROOT+\"/tests/\"+str(id)+\"/\"+str(id)+\".pdf\")\n\n Story = [Spacer(1,2*inch)]\n styles = stylesheet()\n global Title\n\n # Add 10 questions with boxes below\n for i in list:\n if not i[0] in \"skills-scan\" and not i[0] in \"csrfmiddlewaretoken\" and not i[0] in \"titre\" and not i[0] in \"custom\":\n tmp = int(i[0])+1\n bogustext = (str(tmp)+\". %s\" % i[1])\n p = Paragraph(bogustext, styles['default'])\n # Write the paragraph\n\n draw = Drawing()\n # rect(x1,y1,width,height)\n rec = Rect(0, 100, 450, 150)\n rec.fillColor = colors.white\n # draw the rect under each paragraph\n draw.add(rec)\n p.keepWithNext = True\n Story.append(p)\n Story.append(draw)\n Story.append(Spacer(1,-0.9 * inch))\n elif i[0] in \"titre\":\n Title = i[1]\n # build the document by inserting the whole story\n doc.build(Story, onFirstPage=myFirstPage, onLaterPages=myLaterPages)\n return str(id)+\".pdf\"",
"def pdf():\n env.file_ext = \".pdf\"\n local(\"pandoc {input_files} -o {output_file}{file_ext} -H {preamble_file} --template {template_file} --bibliography={bib_file} --csl={csl_file} -V fontsize=12pt -V papersize=a4paper -V documentclass:report -N --latex-engine=xelatex\".format(**env))",
"def summary(self, summary: str):\n return self.swag({\n 'summary': summary\n })",
"def make_summary(df, save_path):\n # first survey blocks\n style_list = df.name.unique()\n\n # summary data\n num_styles = len(style_list)\n num_units = df.guid.count()\n num_floors = len(df.floor.unique())\n num_phases = len(df.phase.unique())\n num_priority = len(df.priority.unique())\n num_swings = len(df.swing_drop.unique())\n num_groups = len(df.db.unique())\n group_totals = df[[\"db\", \"guid\"]].groupby(\"db\").count()\n group_totals.reset_index(inplace=True)\n loners = len(group_totals.loc[group_totals.guid == 1, \"db\"].tolist())\n assemblies = len(group_totals.loc[group_totals.guid != 1, \"db\"].tolist())\n\n summary = {\"Total_Styles\": num_styles,\n \"Total_Units\": num_units,\n \"Total_Floors\": num_floors,\n \"Total_Phases\": num_phases,\n \"Total_Priorities\": num_priority,\n \"Total_Drops\": num_swings,\n \"Total_Groups\": num_groups,\n \"Total_Loners\": loners,\n \"Total_Assemblies\": assemblies}\n summary_df = pd.DataFrame(summary, index=[0])\n summary_df = summary_df.T\n\n # print(assemblies)\n # print(loners)\n # print(num_units)\n # print(list(df))\n\n # first groups\n # print(df[[\"group\",\"group_order\"]])\n # print(list(df))\n\n # reports\n floor_counts = df[[\"floor\", \"guid\"]].groupby(\"floor\").count()\n floor_counts.reset_index(inplace=True)\n # print(floor_counts)\n\n elevations = df[[\"elevation\", \"guid\"]].groupby(\"elevation\").count()\n elevations.reset_index(inplace=True)\n # (list(df))\n\n names = df[[\"name\", \"guid\"]].groupby(\"name\").count()\n names.reset_index(inplace=True)\n # print(names)\n\n priorities = df[[\"priority\", \"guid\"]].groupby(\"priority\").count()\n priorities.reset_index(inplace=True)\n # print(priorities)\n\n swing = df[[\"swing_drop\", \"guid\"]].groupby(\"swing_drop\").count()\n swing.reset_index(inplace=True)\n # print(swing)\n\n phase = df[[\"phase\", \"guid\"]].groupby(\"phase\").count()\n phase.reset_index(inplace=True)\n # print(phase)\n\n group_rpt = df[[\"db\", \"guid\"]].groupby(\"db\").count()\n group_rpt.reset_index(inplace=True)\n # print(group_rpt)\n\n group_odr = df[[\"name\", \"group_order\", \"guid\"]].groupby([\"group_order\", \"name\"]).count()\n group_odr.reset_index(inplace=True)\n # print(group_odr)\n\n samples = df.loc[df.instance == 1, [\"survey_name\", \"priority\", \"phase\"]]\n # print(firsts)\n\n\n\n\n df[\"qmarks\"] = df.name.apply(find_qmark)\n qmarks = df.loc[df.qmarks == 1, [\"name\", \"guid\"]]\n qmarks.reset_index(drop=True, inplace=True)\n qmarks = qmarks.groupby(\"name\").count()\n\n # Create a Pandas Excel writer using XlsxWriter as the engine.\n writer = pd.ExcelWriter(S.save_path, engine='xlsxwriter')\n\n # Write each dataframe to a different worksheet.\n\n summary_df.to_excel(writer, sheet_name='Summary')\n df.to_excel(writer, sheet_name='Main')\n floor_counts.to_excel(writer, sheet_name='Floors')\n names.to_excel(writer, sheet_name='Names')\n elevations.to_excel(writer, sheet_name='Elevations')\n priorities.to_excel(writer, sheet_name='Priorities')\n swing.to_excel(writer, sheet_name='Drops')\n phase.to_excel(writer, sheet_name='Phases')\n group_rpt.to_excel(writer, sheet_name='Groups')\n group_odr.to_excel(writer, sheet_name='Group Order')\n samples.to_excel(writer, sheet_name='Samples Trimmed')\n # first_styles_survey.to_excel(writer, sheet_name='Primary Styles Detail')\n qmarks.to_excel(writer, sheet_name=\"Question Marks\")\n\n # Close the Pandas Excel writer and output the Excel file.\n writer.save()\n print(\"Summaries done!\")\n # os.startfile(save_path)\n return df",
"def summaryText(self):\n\n print('\\nReport Summary:\\n')\n for author in self.lowQuality.keys():\n if len(self.lowQuality[author]) > 0:\n print('Author: ' + author)\n print('---------------------')\n # do some sorting for readability\n files = []\n file2rating = {}\n for fileRating in self.lowQuality[author]:\n files.append(fileRating[1])\n file2rating[fileRating[1]] = fileRating[0]\n files.sort()\n for fileRating in files:\n print(file2rating[fileRating] + ' :: ' + fileRating)\n print('\\n\\n')",
"def _show_summary(self):\n print 'Summary:'\n print ' Reports downloaded successfully: %d' % self.counts\n print ' Reports not downloaded: %d\\n' % self.failed",
"def testSummaryDOCstr(self):\n pass"
] | [
"0.625413",
"0.62132585",
"0.6162406",
"0.60121036",
"0.5929205",
"0.58661926",
"0.57498765",
"0.57135904",
"0.5660982",
"0.5657245",
"0.56555516",
"0.5653621",
"0.5631681",
"0.5626717",
"0.5612419",
"0.55790055",
"0.5574666",
"0.5551953",
"0.5544516",
"0.5527586",
"0.5504218",
"0.55011153",
"0.5462297",
"0.54321426",
"0.54233867",
"0.5417298",
"0.5416177",
"0.5410641",
"0.5391833",
"0.5386297"
] | 0.78915954 | 0 |
Returns a file path for the anonymised c3d file to be save to. Format "{outputdir}/{subjectname}_{condition}_{trialno}.c3d | def createFilePath(self, outputdir, condition, trialno):
# Create file name
filename= self.trialC3D['parameters']['SUBJECTS']['NAMES']['value']
if condition!=None:
filename = "{}_{}".format(filename, condition)
else:
pass
if trialno!=None:
filename = "{}_{}.c3d".format(filename, trialno)
else:
filename = "{}.c3d".format(filename)
if outputdir!=None:
filepath = os.path.join(outputdir, filename)
else:
filepath = filename
return filepath | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def saveC3D(self, outputdir=None, condition=None, trialno=None):\n fpath = self.createFilePath(outputdir, condition, trialno)\n self.trialC3D.write(fpath)\n return",
"def _make_output_path(self, filename):\n return os.path.join(self._output_directory, filename)",
"def write_file(country, season, final, var):\n if var=='label':\n path='../results/kmeans/'\n elif var=='cluster':\n path='../results/sequence_analysis/'\n country_ = country.lower()\n season_ = season.replace('-','_')\n file_name=country_+\"_\"+season_\n newpath=path+file_name+'/'\n if not os.path.exists(newpath):\n os.makedirs(newpath)\n f = open(newpath+file_name+\".txt\",\"w\") \n f.write(final)\n f.close()",
"def _prepare_subject_output_path(output_root, subject_id):\n output_dir = output_root / subject_id\n output_dir.mkdir(parents=True, exist_ok=True)\n return output_dir / f\"{subject_id}_task-tapping_nirs.nwb\"",
"def _output_path(name):\n output = Path(\"../Analysis Results/\").joinpath(str(date.today()))\n output.mkdir(parents=True, exist_ok=True)\n return output.joinpath(f\"{name}.png\")",
"def output_path(self):\r\n return '%s/%s' % (os.path.abspath(os.path.dirname(__file__) + 'outputs'),\r\n self.identifier)",
"def make_path(self, basename):\n return os.path.join(self.output_folder, basename.format(self.sample_name))",
"def write_to_file(output, test_case_name, path):\n path_to_store = OutputWrite.make_test_dir(path, test_case_name)\n time_stamp = OutputWrite.get_time_stamp()\n try:\n LOG.debug('Changing the dir to {0}'.format(path_to_store))\n os.chdir(path_to_store)\n except Exception as _ex_:\n LOG.exception('Error :{0}'.format(_ex_))\n else:\n file_name = os.path.join(path_to_store, test_case_name +\n time_stamp)\n LOG.debug('The file name after joining = {0}'.format(file_name))\n try:\n LOG.debug('Writing Test case output to the file')\n with open(file_name, 'w') as file_obj:\n file_obj.write(output)\n except FileNotFoundError as _ex_:\n LOG.exception('Error : {0}'.format(_ex_))",
"def _get_station_filename():\n output_dir = os.path.join(output, state, station)\n if not os.path.isdir(output_dir):\n logger.debug(\"Creating directory %s\", output_dir)\n os.makedirs(output_dir)\n return os.path.join(output_dir, \"%s.%s\" % (c_time, format))",
"def _output_log_path(name):\n output = Path(\"../Raw Data/\").joinpath(str(date.today()))\n output.mkdir(parents=True, exist_ok=True)\n return output.joinpath(\"000_logging.hdf5\")",
"def get_out_file_path(self):\n dir_path = self._get_output_dir()\n self._check_make_dirs(dir_path)\n return join(dir_path, self.FOCUSED_IMAGE_NAME)",
"def output_file_name_maker(args):\n log.debug(\"Entering output_file_name_maker()\")\n path = os.getcwd() + '/out_files/'\n if not os.path.isdir(path):\n os.mkdir(path)\n\n if args.output is None:\n out_file_name = path + args.input[:-4] + '_' + args.type + '_' + args.layer\n else:\n out_file_name = path + args.output\n\n log.debug(\"Exiting output_file_name_maker()\")\n return out_file_name",
"def get_oc_path(cfg):\n return os.path.join(\n BASE_DATA_DIR,\n \"castp\",\n \"pH\" + str(cfg.pH),\n str(cfg.mut),\n \"oc\" + str(cfg.probe) + \".csv\")",
"def _get_output_filename(dataset_dir, split_name):\n return '%s/cifar100_%s.tfrecord' % (dataset_dir, split_name)",
"def output_path(self) -> str:\n if self._output_path is None:\n if not self._root_folder:\n self._root_folder = self._env.experiments_folder\n folder = os.path.join(self._root_folder, self.key)\n\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n self._output_path = folder\n\n return self._output_path",
"def get_output_path():\n return os.getcwd() + \"/output/\"",
"def GetOutputFilename(fname):\n return os.path.join(outdir, fname)",
"def output_filename(phase, debug=False, append=''):\n suffix = ''\n if append:\n suffix = '_{0}'.format(append)\n\n if debug:\n filename = os.path.abspath(config.output_path + tst_map[phase])\n else:\n filename = os.path.abspath(config.output_path + csv_map[phase])\n\n return filename + suffix + '.csv'",
"def out_path(dataset, work_dir=consts.WORK_DIR):\r\n return join_path(dataset_path(dataset, work_dir), consts.OUTPUT_DIR)",
"def fmt_run_path(model_data_dir, case, ivc, dom):\n filename = \"wrfout_d\"+dom+\"_*\"\n\n prefix_casestudy = \"CaseStudy_\"\n dtuple = datetime.strptime(case, \"%Y-%m-%d_%H:%M\")\n stime = dtuple.strftime('%-m-%-d-%Y')\n sims = \"/\"+prefix_casestudy+stime+\"/\"+ivc+\"_\"+stime.replace(\"-\", \"_\")+\"/\"\n\n model_path = model_data_dir+sims+filename\n\n return model_path, stime",
"def WriteDREAM3DFile(path, dca, verbose=False):\n # Lets write out the whole Heirarchy to a .dream3d file\n if verbose:\n print(\"Creating DREAM3D Writer filter....\")\n writer = simpl.DataContainerWriter.New()\n writer.OutputFile = (path)\n if verbose:\n print(\"Writing to file:\", path)\n writer.setDataContainerArray(dca)\n writer.execute()\n if(writer.ErrorCondition != 0):\n print(\"Error %d writing DREAM.3D File to path %s\" % writer.ErrorCondition, path)\n return writer.ErrorCondition",
"def initialize_output(fn, output_dir, station_id, dataset):\n \n source_file =fn.split('/')[-1]\n output_file = output_dir + '/' + station_id + '_' + dataset + '_harvested_' + source_file + '.nc' # creating an output file name e.g. chera5.conv._10393.nc , try 01009 faster\n return output_file , source_file",
"def create_filename (self):\n\t\tassert self.__patient_name and self.__location_name, \"New filename could not be determined, one or more needed arguments is empty!\"\n\t\t_patient_name = self.__patient_name.split(' ')\n\t\t_patient_name.reverse()\n\t\t\n\t\treturn os.path.join(os.path.dirname(self.file._path), \"%s MR %s%s\" % (self.__location_name, ', '.join(_patient_name).upper(), self._file.extension))",
"def output_file_path(self):\n return self.__output_file_path",
"def get_output_filename(self, gzip=False):\n if self.mode == 'light':\n suffix = '-light'\n else:\n suffix = ''\n\n destination = self.destination\n extension = 'fits'\n if gzip:\n destination += '-compressed'\n extension += '.gz'\n return os.path.join(destination,\n 'iphas-dr2-{0}{1}.{2}'.format(\n self.get_partname(),\n suffix,\n extension))",
"def get_file_save_path(self):\n return self.out",
"def _filename(self, corotid):\n from datasource import DataSource\n self.corotid = corotid\n self.corot = DataSource(database='corot', user='sro', host='pina.aip.de')\n \n query = \"\"\"SELECT run_code, hlfccdid, win_id \n FROM corot \n WHERE corotid = %d;\"\"\" % self.corotid\n result = self.corot.query(query)\n \n par = {'run': result[0][0],\n 'half': result[0][1].rstrip('RL'), \n 'win': result[0][2]}\n filename = '/work2/jwe/CoRoT/%(run)s/data/%(run)s_%(half)s_%(win)04d.fits' % par\n logger.info('%d = %s' % (corotid,filename))\n return filename",
"def __add_output_file_location(self, filename):\n # Return the joined path of the output directory and the filename\n return os.path.join(self._output_file_location, filename)",
"def getOutputFile(fname):\n return os.path.join(Configurations.getOutputDir(), fname)",
"def create_e3d_file(self,path='./'):\n dt=0.606*self.model_parameters['dh']/np.max(self.velocity_model['vp']) # dt needs to satify the courant condition\n t=int(self.model_parameters['duration']/dt)\n \n # Check path exists, if not create one\n if not os.path.exists(path):\n os.makedirs(path)\n \n # Create e3d parameter file\n f=open('%s%s_e3dmodel.txt'%(path,self.model_name),'w')\n f.write(\"grid x=%s z=%s dh=%s b=2 q=1\\ntime dt=%0.5f t=%s\\n\"%(self.model_parameters['xmax'],self.model_parameters['zmax'],self.model_parameters['dh'],dt,t))\n f.write(\"block p=%s s=%s r=%s Q=20 Qf=50\\n\"%(self.velocity_model['vp'][0],self.velocity_model['vs'][0],self.velocity_model['rho'][0]))\n \n for i in range(1,len(self.velocity_model['vp'])-1):\n f.write(\"block p=%s s=%s r=%s z1=%s z2=%s Q=20 Qf=50\\n\"%(self.velocity_model['vp'][i],self.velocity_model['vs'][i],self.velocity_model['rho'][i],\n self.velocity_model['depth'][i],self.velocity_model['depth'][i+1]))\n \n f.write(\"block p=%s s=%s r=%s z1=%s z2=%s Q=20 Qf=50\\n\\n\"%(self.velocity_model['vp'][i+1],self.velocity_model['vs'][i+1],self.velocity_model['rho'][i+1],\n self.velocity_model['depth'][i+1],self.model_parameters['zmax'])) # extend to the based of the model \n \n f.write(\"visual movie=5\\n\\n\")\n\n if self.source['src_type']!=4:\n f.write(\"source type=%s x=%s z=%s freq=%s amp=%s\\n\\n\"%(self.source['src_type'],self.source['srcx'],self.source['srcz'],self.source['freq'],self.source['amp'])) \n else:\n f.write(\"source type=%s x=%s z=%s freq=%s amp=%s Mxx=%s Myy=%s Mzz=%s Mxy=%s Mxz=%s Myz=%s\\n\\n\"%(self.source['src_type'],self.source['srcx'],self.source['srcz'],self.source['freq'],self.source['amp'],self.source['mt'][0],self.source['mt'][1],self.source['mt'][2],self.source['mt'][3],self.source['mt'][4],self.source['mt'][5])) \n\n for r in range(len(self.receivers['recxs'])):\n f.write('sac x=%0.3f z=%0.3f file=%s\\n'%(self.receivers['recxs'][r],self.receivers['reczs'][r],self.model_name))\n\n f.write(\"visual sample=0.1 movie=1 scale=10000000000/n\")\n f.close()\n \n print('File created: %s%s_e3dmodel.txt'%(path,self.model_name))"
] | [
"0.79284745",
"0.65506065",
"0.6297584",
"0.61337495",
"0.6113784",
"0.6045709",
"0.6026302",
"0.6013044",
"0.60040236",
"0.59769714",
"0.5932844",
"0.5885488",
"0.58698124",
"0.5857384",
"0.5840238",
"0.58320004",
"0.57913876",
"0.5790887",
"0.5785458",
"0.57839334",
"0.5778003",
"0.5771047",
"0.5769266",
"0.5762494",
"0.5759071",
"0.5740906",
"0.57350254",
"0.57332647",
"0.5696127",
"0.56657934"
] | 0.8017994 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.