query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Returns the ``BatchStats`` for a specific batch.
def get_batch_stats(self, batch): return self.batch_stats[batch]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_batch_stats():\n\n # We use the moving mean as an estimate of the mean in order to perform\n # a more numerically stable calculation of the batch mean.\n # Copy for better stability.\n shift = tf.add(self._moving_mean, 0)\n counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics(\n input_batch,\n reduction_indices,\n keep_dims=True,\n shift=shift,\n name=\"batch_norm_ss\")\n\n mean, variance = tf.nn.normalize_moments(counts,\n shifted_sum_x,\n shifted_sum_x2,\n shift,\n name=\"normalize_moments\")\n\n return mean, variance", "def test_get_batch_statistics_request(self):\n self.trans_details.get_batch_statistics(\n batch_id = 123456,\n )", "def build_batch_stats():\n\n # Copy for better stability.\n # We use the moving mean as an estimate of the mean in order to perform\n # a more numerically stable calculation of the batch mean.\n shift = tf.add(self._moving_mean, 0)\n counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics(\n input_batch,\n reduction_indices,\n keep_dims=True,\n shift=shift,\n name=\"batch_norm_ss\")\n\n mean, variance = tf.nn.normalize_moments(counts,\n shifted_sum_x,\n shifted_sum_x2,\n shift,\n name=\"normalize_moments\")\n second_moment = variance + tf.square(mean)\n\n return mean, variance, second_moment", "def get_plant_batch_stats(db_path: str) -> int:\n return get_db_count(db_path, 'batches.db', 'batches')", "def print_batch_stats(self):\n\n # current epoch time, numfiles, numbytes, trans secs, status\n print(f\"TRANS_STATS_BATCH: {time.time()} {self.batchvals['transfer_name']} {self.batchvals['numfiles']} {self.filevals['totbytes']} {self.filevals['end_time'] - self.filevals['start_time']} {self.filevals['status']}\")", "def batch_to_dict(batch: BatchTrial) -> Dict[str, Any]:\n return {\n \"__type\": batch.__class__.__name__,\n \"index\": batch.index,\n \"trial_type\": batch.trial_type,\n \"ttl_seconds\": batch.ttl_seconds,\n \"status\": batch.status,\n \"status_quo\": batch.status_quo,\n \"status_quo_weight_override\": batch._status_quo_weight_override,\n \"time_created\": batch.time_created,\n \"time_completed\": batch.time_completed,\n \"time_staged\": batch.time_staged,\n \"time_run_started\": batch.time_run_started,\n \"abandoned_reason\": batch.abandoned_reason,\n \"run_metadata\": batch.run_metadata,\n \"stop_metadata\": batch.stop_metadata,\n \"generator_run_structs\": batch.generator_run_structs,\n \"runner\": batch.runner,\n \"abandoned_arms_metadata\": batch._abandoned_arms_metadata,\n \"num_arms_created\": batch._num_arms_created,\n \"optimize_for_power\": batch.optimize_for_power,\n \"generation_step_index\": batch._generation_step_index,\n \"properties\": batch._properties,\n }", "def get_batch(self):\n return self.batch", "def _get_batch(batch, ctx):\n if isinstance(batch, mx.io.DataBatch):\n data = batch.data[0]\n label = batch.label[0]\n else:\n data, label = batch\n return (gluon.utils.split_and_load(data, ctx),\n gluon.utils.split_and_load(label, ctx),\n data.shape[0])", "def stats_batchwise(data_source, batch_size=1024):\n mean = np.zeros(data_source.dshape, dtype=np.float32)\n mean_xs = np.zeros_like(mean, dtype=np.float32)\n\n for x, _ in iterate_batches(data_source, batch_size, expand=False):\n corr_fact = float(x.shape[0]) / batch_size\n mean += x.mean(axis=0) * corr_fact\n mean_xs += (x ** 2).mean(axis=0) * corr_fact\n\n corr_fact = float(batch_size) / data_source.n_data\n mean *= corr_fact\n mean_xs *= corr_fact\n std = np.sqrt(mean_xs - mean ** 2)\n\n return mean, std", "def get_batch(self, batch_id):\n #fmt = lambda x: join(self.path, self.simulation_paths[x])\n fmt = lambda x: self.simulation_paths[x]\n simulation_paths = [fmt(i) for i in self.batch_indices[batch_id]]\n return Batch(simulation_paths, root=self.path)", "def get_batch(self, name):\n batches = self._meta['sets'].get('batches', {})\n if batches.get(name):\n b = name\n elif batches.get(name):\n b = name\n else:\n raise KeyError('No Batch found named {}.'.format(name))\n return qp.Batch(self, b)", "def __call__(self, batch: Dict[str, Tensor]) -> Tuple[Tensor, Dict[str, float]]:\n obs, actions, next_obs = get_keys(batch, *self.batch_keys)\n loss = -self.model_likelihood(obs, actions, next_obs).mean()\n return loss, {\"loss(model)\": loss.item()}", "def batch_info():\n return BatchInfo(\"UFG Hackathon\")", "def get_batch(self, batch_size):\n return random.sample(self.buffer, batch_size)", "def sample_batch(self, batch_size):\n batch = []\n\n # Sample using prorities\n if(self.with_per):\n T = self.buffer.total() // batch_size\n #print(\"T is \",T)\n for i in range(batch_size):\n a, b = T * i, T * (i + 1)\n s = random.uniform(a, b)\n idx, error, data = self.buffer.get(s)\n #print(\"sampled data \", s, \" \",data, end=\" \")\n batch.append((*data, idx))\n\n idx = np.array([i[2] for i in batch])\n #idx in the offline buffer\n \n # Sample randomly from Buffer\n elif self.count < batch_size:\n idx = None\n batch = random.sample(self.buffer, self.count)\n else:\n idx = None\n batch = random.sample(self.buffer, batch_size)\n\n # Return a batch of experience\n names_batch = np.array([i[1] for i in batch])\n\n return names_batch, idx", "def get_keyword_stats(self, adgroup_id, batch=False):\n path = '%s/keywordstats' % adgroup_id\n return self.make_request(path, 'GET', batch=batch)", "def sync_batch_stats(state: TrainState) -> TrainState:\n # Each device has its own version of the running average batch\n # statistics and those are synced before evaluation\n return state.replace(batch_stats=cross_replica_mean(state.batch_stats))", "def evaluate_batch(self, batch: TorchData) -> Dict[str, Any]:\n batch = cast(Tuple[torch.Tensor, torch.Tensor], batch)\n data, labels = batch\n\n output = self.model(data)\n accuracy = accuracy_rate(output, labels)\n return {\"validation_accuracy\": accuracy, \"validation_error\": 1.0 - accuracy}", "def get_loss(self, session, batch):\n\n input_feed = {}\n input_feed[self.context_ids] = batch.context_ids\n input_feed[self.context_mask] = batch.context_mask\n input_feed[self.qn_ids] = batch.qn_ids\n input_feed[self.qn_mask] = batch.qn_mask\n input_feed[self.ans_ids] = batch.ans_ids\n input_feed[self.ans_mask] = batch.ans_mask\n if not self.FLAGS.use_raw_graph:\n input_feed[self.context_embedding] = batch.context_embeddings\n # Note: don't supply keep_prob here, so it will default to 1 i.e. no dropout\n output_feed = [self.dev_loss]\n [loss] = session.run(output_feed, input_feed)\n\n return loss", "def get_batch(self):\n\t\tbatch = np.arange(len(self.mem_size))\n\t\tbatch = batch[:self.batch_size]\n\n\t\treturn np.array(self.memory)[batch]", "def batch_metadata(batch_name):\r\n sql = \"\"\"\r\n select top 1 batch_id, batch_name, notification_email_recipients\r\n from dbo.Metadata_ETL_Batch\r\n where batch_name = '{}';\r\n \"\"\".format(batch_name)\r\n with pyodbc.connect(ETL_LOAD_A_ODBC_STRING) as conn:\r\n cursor = conn.execute(sql)\r\n field_names = [column[0] for column in cursor.description]\r\n try:\r\n meta = next(dict(zip(field_names, row)) for row in cursor)\r\n except StopIteration:\r\n raise AttributeError(\"batch_name does not exist in\"\r\n \" batch metadata table.\")\r\n return meta", "def get_batch_data_and_metadata(\n self,\n batch_definition: BatchDefinition,\n ) -> Tuple[Any, BatchSpec, BatchMarkers]: # batch_data\n batch_spec: BatchSpec = self.build_batch_spec(batch_definition=batch_definition)\n batch_data, batch_markers = self._execution_engine.get_batch_data_and_markers(\n batch_spec=batch_spec\n )\n self._execution_engine.load_batch_data(batch_definition.id, batch_data)\n return (\n batch_data,\n batch_spec,\n batch_markers,\n )", "def num_batches(self):\n\t\t\n\t\treturn len(self.batch_stats)", "def _get_batch_data(batch, ctx):\n data, label = batch\n return (mx.gluon.utils.split_and_load(data, ctx),\n mx.gluon.utils.split_and_load(label, ctx),\n data.shape[0])", "def get_batch(self, batch_size):\n n, _ = self.contexts.shape\n if self.buffer_s == -1:\n # use all the data\n ind = np.random.choice(range(n), batch_size)\n else:\n # use only buffer (last buffer_s observations)\n ind = np.random.choice(range(max(0, n - self.buffer_s), n), batch_size)\n return self.contexts[ind, :], self.rewards[ind, :]", "def get_batch(self, batch_kwargs, batch_parameters=None) -> None:\n raise NotImplementedError", "def sample_batch(self, batch_size):\n batch = []\n\n # Sample using prorities\n if(self.with_per):\n T = self.buffer.total() // batch_size\n for i in range(batch_size):\n a, b = T * i, T * (i + 1)\n s = random.uniform(a, b)\n idx, error, data = self.buffer.get(s)\n batch.append((*data, idx))\n idx = np.array([i[5] for i in batch])\n #TD errors are only updated for transitions that are replayed\n \n # Sample randomly from Buffer\n elif self.count < batch_size:\n idx = None\n batch = random.sample(self.buffer, self.count)\n else:\n idx = None\n batch = random.sample(self.buffer, batch_size)\n\n # Return a batch of experience\n s_batch = np.array([i[0] for i in batch])\n a_batch = np.array([i[1] for i in batch])\n r_batch = np.array([i[2] for i in batch])\n d_batch = np.array([i[3] for i in batch])\n new_s_batch = np.array([i[4] for i in batch])\n\n return s_batch, a_batch, r_batch, d_batch, new_s_batch, idx", "def get_stat(self, name: str) -> int:\n return self._mallctl(f\"stats.{name}\")", "def __call__(self, batch):\n obs, is_ratios = dutil.get_keys(batch, SampleBatch.CUR_OBS, self.IS_RATIOS)\n\n values = self.critic(obs).squeeze(-1)\n with torch.no_grad():\n targets = self.sampled_one_step_state_values(batch)\n value_loss = torch.mean(\n is_ratios * torch.nn.MSELoss(reduction=\"none\")(values, targets) / 2\n )\n return value_loss, {\"loss(critic)\": value_loss.item()}", "def run_training_batch(self, session, batch):\n feed_dict = self.batch_to_feed(batch)\n feed_dict[self.use_dropout_placeholder] = 1.0\n fetches = [self.loss, self.train_op]\n\n # options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n # run_metadata = tf.RunMetadata()\n \n loss, _ = session.run(fetches, feed_dict=feed_dict)\n # loss, _ = session.run(fetches,\n # feed_dict=feed_dict,\n # options=options,\n # run_metadata=run_metadata)\n \n # fetched_timeline = timeline.Timeline(run_metadata.step_stats)\n # chrome_trace = fetched_timeline.generate_chrome_trace_format()\n # with open('timeline.json', 'w') as f:\n # f.write(chrome_trace)\n \n return loss" ]
[ "0.65609884", "0.65114206", "0.65014577", "0.6211535", "0.59668744", "0.5954246", "0.58340657", "0.5821755", "0.57448745", "0.57334435", "0.57184416", "0.5670613", "0.56242794", "0.560072", "0.55778915", "0.5565209", "0.55386823", "0.5498818", "0.54651314", "0.5450578", "0.54305553", "0.54261255", "0.54030293", "0.5400469", "0.53819317", "0.53796047", "0.53737915", "0.5369569", "0.5344904", "0.5344596" ]
0.89985496
0
Convenience method that sums up all the sentences across all batches.
def get_total_sentences(self): # loop through batches and add up all their individual sentence counts total_sentences = 0 for batch in self.batch_stats: total_sentences += self.batch_stats[batch].total_sentences return total_sentences
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_texts(self) -> None:\n texts = []\n for text in self.texts:\n paragraphs = list(filter(lambda x: x != \"\", text.split(\"\\n\\n\")))\n for paragraph in paragraphs:\n text = paragraph.replace(\"\\n\", \" \").strip()\n if len(text) > self.split_threshold_min:\n text_sentences = nlp(text)\n sentences = []\n for sentence in text_sentences.sents:\n current = sentence.text\n sentences.append(current.strip())\n texts.extend(sentences)\n else:\n texts.append(text)\n self.texts = list(set(texts))", "def summarize(self, doc):\n import torch\n\n with torch.no_grad():\n answers_input_ids = self.tokenizer.batch_encode_plus(\n [doc], return_tensors=\"pt\", truncation=True, max_length=1024\n )[\"input_ids\"].to(self.torch_device)\n summary_ids = self.model.generate(\n answers_input_ids,\n num_beams=4,\n length_penalty=2.0,\n max_length=142,\n min_length=56,\n no_repeat_ngram_size=3,\n )\n\n exec_sum = self.tokenizer.decode(\n summary_ids.squeeze(), skip_special_tokens=True\n )\n return exec_sum", "def get_batches(summaries, texts, batch_size):\r\n for batch_i in range(0, len(texts)//batch_size):\r\n start_i = batch_i * batch_size\r\n summaries_batch = summaries[start_i:start_i + batch_size]\r\n texts_batch = texts[start_i:start_i + batch_size]\r\n pad_summaries_batch = np.array(pad_sentence_batch(summaries_batch))\r\n pad_texts_batch = np.array(pad_sentence_batch(texts_batch))\r\n \r\n # Need the lengths for the _lengths parameters\r\n pad_summaries_lengths = []\r\n for summary in pad_summaries_batch:\r\n pad_summaries_lengths.append(len(summary))\r\n \r\n pad_texts_lengths = []\r\n for text in pad_texts_batch:\r\n pad_texts_lengths.append(len(text))\r\n \r\n yield pad_summaries_batch, pad_texts_batch, pad_summaries_lengths, pad_texts_lengths", "def get_batches(summaries, texts, batch_size):\n for batch_i in range(0, len(texts)//batch_size):\n start_i = batch_i * batch_size\n summaries_batch = summaries[start_i:start_i + batch_size]\n texts_batch = texts[start_i:start_i + batch_size]\n pad_summaries_batch = np.array(pad_sentence_batch(summaries_batch))\n pad_texts_batch = np.array(pad_sentence_batch(texts_batch))\n \n # Need the lengths for the _lengths parameters\n pad_summaries_lengths = []\n for summary in pad_summaries_batch:\n pad_summaries_lengths.append(len(summary))\n \n pad_texts_lengths = []\n for text in pad_texts_batch:\n pad_texts_lengths.append(len(text))\n \n yield pad_summaries_batch, pad_texts_batch, pad_summaries_lengths, pad_texts_lengths", "def run_summarized_text(text,lines):\r\n \r\n #text_preprocessing\r\n words = word_tokenize(text)\r\n # print(words)\r\n print(\"\\n\")\r\n ps = PorterStemmer()\r\n lem = WordNetLemmatizer()\r\n stopWords = set(stopwords.words(\"english\"))\r\n # print(stopWords)\r\n print(\"\\n\")\r\n # 1 Create the word frequency table\r\n freq_table = calc_weighted_frequency(words,ps,lem,stopWords,text)\r\n\r\n '''\r\n We already have a sentence tokenizer, so we just need \r\n to run the sent_tokenize() method to create the array of sentences.\r\n '''\r\n\r\n # 2 Tokenize the sentences\r\n sentences = sent_tokenize(text)\r\n print(sentences)\r\n print(\"\\n\")\r\n\r\n # 3 Important Algorithm: score the sentences\r\n sentence_scores = get_sentence_score(sentences, freq_table)\r\n\r\n #\r\n\r\n # 4 Important Algorithm: Generate the summary\r\n summary = generate_summary(sentence_scores,lines)\r\n\r\n return summary", "def batch_gen():\n i = 0\n while len(all_sentences) - i >= batch_size:\n # TODO this is a mess...\n yield np.stack([\n np.pad(\n np.stack(\n [embeddings[id]\n for id in sentence[:max_sentence_length]]), [[\n 0, max_sentence_length -\n min(len(sentence), max_sentence_length)\n ], [0, 0]],\n 'constant',\n constant_values=0)\n for sentence in all_sentences[i:i + batch_size]\n ])\n\n i += batch_size", "def __call__(self, docs_batch: List[str]) -> Tuple[List[List[str]], List[List[int]]]:\n text_batch_list = []\n text_batch = []\n nums_batch_list = []\n nums_batch = []\n count_texts = 0\n text = \"\"\n curr_doc = 0\n for n, doc in enumerate(docs_batch):\n sentences = sent_tokenize(doc)\n for sentence in sentences:\n if len(text) + len(sentence) < self.max_chunk_len and n == curr_doc:\n text += f\"{sentence} \"\n else:\n if count_texts < self.batch_size:\n text_batch.append(text.strip())\n if n == curr_doc:\n nums_batch.append(n)\n else:\n nums_batch.append(n - 1)\n count_texts += 1\n else:\n text_batch_list.append(text_batch)\n text_batch = []\n nums_batch_list.append(nums_batch)\n nums_batch = [n]\n count_texts = 0\n curr_doc = n\n text = f\"{sentence} \"\n\n if text:\n text_batch.append(text.strip())\n text_batch_list.append(text_batch)\n nums_batch.append(len(docs_batch) - 1)\n nums_batch_list.append(nums_batch)\n\n return text_batch_list, nums_batch_list", "def bag_of_words(batch, TEXT):\n V = len(TEXT.vocab)\n X = torch.zeros(batch.text.size(0), V)\n ones = torch.ones(batch.text.size(1))\n for b in range(batch.text.size(0)):\n X[b].index_add_(0, batch.text.data[b], ones)\n X[b][TEXT.vocab.stoi['<pad>']] = 0\n X = Variable(X, requires_grad=False)\n return X", "def batch_sentences(self, sentences, bos, eos, indices=None):\n batch_size = len(sentences)\n slen = max([len(s) for s in sentences])\n if bos:\n slen += 1\n if eos:\n slen += 1\n \n def pad_sent(s, max_len, bos, eos):\n ret = s\n if bos:\n ret = [self.bos_index] + ret\n if eos:\n ret = ret + [self.eos_index]\n ret = ret + [self.pad_index for _ in range(max_len - len(ret))]\n return ret\n\n sent_tensor = [pad_sent(s, slen, bos, eos) for s in sentences]\n sent_tensor = torch.from_numpy(np.array(sent_tensor)).long()\n \n if indices is None:\n return sent_tensor\n else:\n return sent_tensor, indices", "def yield_batches(self, texts):\n batch = []\n for text in self._iter_texts(texts):\n batch.append(text)\n if len(batch) == self.batch_size:\n yield batch\n batch = []\n\n if batch:\n yield batch", "def train(self, corpus):\n for sentence in corpus.corpus:\n cleanSentence = sentence.cleanSentence()\n for datum in cleanSentence.data:\n token = datum.word\n self.unigramCounts[token] = self.unigramCounts[token] + 1\n self.total += 1\n\n i = 0\n while i < len(sentence.data) - 1:\n token = str(cleanSentence.get(i))\n self.followingWords[token].add(str(cleanSentence.get(i+1)))\n i += 1\n\n i = 1\n while i < len(sentence.data):\n bigram = str(cleanSentence.get(i-1)) + \" \" + str(cleanSentence.get(i))\n self.bigramCounts[bigram] = self.bigramCounts[bigram] + 1\n\n self.precedingWords[str(cleanSentence.get(i))].add(str(cleanSentence.get(i-1)))\n i += 1\n self.precedingWordsTotal = sum(map(lambda x: len(x), self.precedingWords.values()))\n\n i = 2\n while i < len(sentence.data):\n trigram = str(cleanSentence.get(i-2)) + \" \" + str(cleanSentence.get(i-1)) + \" \" + str(cleanSentence.get(i))\n self.trigramCounts[trigram] = self.trigramCounts[trigram] + 1\n i += 1\n\n #print('precedingWords')\n #print(self.precedingWords)\n #print('followingWords')\n #print(self.followingWords)\n #print('unigrams')\n #print(self.unigramCounts)\n #print('bigrams')\n #print(self.bigramCounts)\n\n #self.discount(self.trigramCounts)\n #self.discount(self.bigramCounts)\n #self.discount(self.unigramCounts)", "def summarize(self, text, text_index, n):\r\n self.text_index = text_index\r\n sentences = sent_tokenize(text)\r\n if len(sentences) < n:\r\n raise ValueError(\"Cannot extract %s sentences from text with %s sentences\" % \\\r\n (n, len(sentences)))\r\n preprText = self.preprocess_document(text)\r\n words = self.word_tokenize_preprocessed(preprText)\r\n tfIdfTable = self._create_tf_idf_table(words)\r\n # print({k: v for k, v in sorted(freqTable.items(), key=lambda item: item[1], reverse=True)})\r\n sentenceScores = np.array(self._score_sentences(sentences, tfIdfTable))\r\n nBestIndexes = np.argpartition(sentenceScores, -n)[-n:] # indexes of sentences with n best scores\r\n nBestIndexes = sorted(nBestIndexes)\r\n\r\n summary = ''\r\n for index in nBestIndexes:\r\n summary += sentences[index] + \" \"\r\n\r\n self.text_index = None # reset text_index once completed\r\n return summary[:-1] # remove last space\r", "def summarize(self, text, n):\n sents = sent_tokenize(text)\n assert n <= len(sents)\n word_sent = [word_tokenize(s.lower()) for s in sents]\n self._freq = self._compute_frequencies(word_sent)\n ranking = defaultdict(int)\n for i,sent in enumerate(word_sent):\n for w in sent:\n if w in self._freq:\n ranking[i] += self._freq[w]\n sents_idx = self._rank(ranking, n)\n return [sents[j] for j in sents_idx]", "def universal_sentence_embedding(sentences, mask, sqrt=True):\n # need to mask out the padded chars\n sentence_sums = th.bmm(\n sentences.permute(0, 2, 1),\n mask.float().unsqueeze(-1)).squeeze(-1)\n divisor = mask.sum(dim=1).view(-1, 1).float()\n if sqrt:\n divisor = divisor.sqrt()\n sentence_sums /= divisor\n return sentence_sums", "def batchify_summary(batch):\r\n\r\n if type(batch[0][1]) != torch.LongTensor:\r\n no_elmo, use_char = (True, False) if batch[0][1] == -2 else (False, False)\r\n else:\r\n no_elmo, use_char = True, True\r\n\r\n docs = [ex[0] for ex in batch]\r\n docs_char = [ex[1] for ex in batch]\r\n summaries = [ex[2] for ex in batch]\r\n\r\n # Batch documents\r\n max_doc_length = max([d.size(0) for d in docs])\r\n x1_len = torch.LongTensor(len(docs)).zero_()\r\n x1 = torch.LongTensor(len(docs),\r\n max_doc_length).zero_() if no_elmo else torch.LongTensor(len(docs),\r\n max_doc_length,\r\n 50).zero_()\r\n x1_char = torch.LongTensor(len(docs),\r\n max_doc_length,\r\n docs_char[0].size(1)).zero_() if (no_elmo and use_char) else None\r\n for i, d in enumerate(docs):\r\n x1_len[i] = d.size(0)\r\n x1[i, :d.size(0)].copy_(d)\r\n if not no_elmo:\r\n x1_char[i, :d.size(0), :].copy_(docs_char[i])\r\n\r\n # Batch answers\r\n max_ans_length = max([a.size(0) for a in summaries])\r\n ans_len = torch.LongTensor(len(summaries)).zero_()\r\n ans = torch.LongTensor(len(summaries), max_ans_length).zero_()\r\n for i, a in enumerate(summaries):\r\n ans_len[i] = a.size(0)\r\n ans[i, :a.size(0)].copy_(a)\r\n\r\n ids = [ex[3] for ex in batch]\r\n contexts = [ex[4] for ex in batch]\r\n # FIXME: multiple answers are possible, fix vectorize also.\r\n targets = [ex[5] for ex in batch]\r\n src_vocabs = [ex[6] for ex in batch]\r\n source_maps = []\r\n alignments = []\r\n\r\n # Prepare source vocabs, alignment [required for Copy Attention]\r\n for eid, context, target, (token2idx, idx2token) in \\\r\n zip(ids, contexts, targets, src_vocabs):\r\n # Mapping source tokens to indices in the dynamic dict.\r\n src_map = torch.LongTensor([token2idx[w] for w in context])\r\n source_maps.append(src_map)\r\n\r\n # TODO: does skipping the first and last token in answer valid?\r\n mask = torch.LongTensor([token2idx[w] if w in token2idx\r\n else UNK for w in target])\r\n alignments.append(mask)\r\n\r\n return {'doc_rep': x1,\r\n 'doc_char_rep': x1_char,\r\n 'doc_len': x1_len,\r\n 'summ_rep': ans,\r\n 'summ_len': ans_len,\r\n 'ids': ids,\r\n 'documents': contexts,\r\n 'answers': targets,\r\n 'source_vocabs': src_vocabs,\r\n 'src_map': source_maps,\r\n 'alignment': alignments}", "def hf_summarizer(sentences):\n\n max_chunk = 512\n current_chunk = 0\n chunks = []\n\n for sentence in sentences:\n if len(chunks) == current_chunk +1 :\n if len(chunks[current_chunk]) + len(sentence.split()) <= max_chunk:\n chunks[current_chunk].extend(sentence.split())\n else:\n current_chunk += 1\n chunks.append(sentence.split())\n else:\n print(current_chunk)\n chunks.append(sentence.split())\n\n # print(chunks[0])\n\n for chunk_id in range(len(chunks)):\n chunks[chunk_id] = ' '.join(chunks[chunk_id])\n\n #print(len(chunks[0].split()))\n\n summarizer = pipeline(\"summarization\")\n summarized = summarizer(chunks, min_length = 50, max_length = 100, do_sample=False)\n\n text = ''.join([sum[\"summary_text\"] for sum in summarized])\n\n with open(\"static/files/book.txt\", \"w\",encoding=\"utf-8\") as f:\n f.write(text)\n \n return summarized", "def _score_sentence(self, feats, tags):\n score = torch.zeros((self.batch_size,1), device=self.device)\n tags = torch.cat([torch.full((self.batch_size, 1, 1), self.tag2idx[START_TAG], dtype=torch.long, device=self.device), tags],dim=1)\n for i in range(feats.shape[1]):\n feat = feats[:,i,:]\n \n score = score + self.transitions[tags[:,i+1], tags[:,i]] + feat.gather(dim=-1, index=tags[:,i+1])\n \n score = score + self.transitions[self.tag2idx[STOP_TAG], tags[:,-1]]\n\n return score", "def train(self, corpus):\n for sentence in corpus.corpus:\n for datum in sentence.data: \n self.unigramCounts[datum.word] += 1\n self.totalCount += 1", "def train(self, corpus):\n lastToken = \"#\"\n for sentence in corpus.corpus:\n for datum in sentence.data:\n token = datum.word\n self.reverseBigramCount[token][lastToken] += 1\n self.bigramCount[lastToken][token] += 1\n self.unigramCount[token] += 1\n self.total += 1\n lastToken = token", "def batch_sentences_v2(sentences, lm_labels=None):\n # sentences = sorted(sentences, key=lambda x: len(x), reverse=True)\n lengths = torch.LongTensor([len(s) + 2 for s in sentences])\n sent = torch.LongTensor(lengths.max().item(), lengths.size(0)).fill_(1)\n if lm_labels is not None:\n _labels = torch.LongTensor(lengths.max().item(), lengths.size(0)).fill_(-1)\n\n sent[0] = 0\n for i, s in enumerate(sentences):\n if lengths[i] > 2: # if sentence not empty\n sent[1:lengths[i] - 1, i].copy_(torch.from_numpy(s.astype(np.int64)))\n if lm_labels is not None:\n lm = np.array(lm_labels[i])\n _labels[1:lengths[i] - 1, i].copy_(torch.from_numpy(lm.astype(np.int64)))\n sent[lengths[i] - 1, i] = 2\n if lm_labels is not None:\n _labels[lengths[i] - 1, i] = -1\n\n if lm_labels is not None:\n return sent, lengths, _labels\n return sent, lengths", "def lstm_summarize(text, query, lstm_model, nn_model, stopwords, word_indices, limit = 250, remove_stop_words = True,with_txt_vect=False):\n if remove_stop_words : \n stopwords = stop_words()\n else :\n stopwords = []\n \n if with_txt_vect :\n text_vector = lstm_infer_vector(lstm_model, text, stopwords,word_indices)\n \n query_vector = lstm_infer_vector(lstm_model, query, stopwords,word_indices)\n \n summary = \"\"\n summary_vector = np.zeros(400)\n summary_idx = []\n \n sentences = text.split('.')\n sentences = np.asarray(sentences)\n \n remaining_sentences = copy.copy(sentences)\n \n size = 0\n counter = 0\n while size < limit and len(remaining_sentences)>0 :\n counter = counter+1\n scores = []\n for sentence in remaining_sentences :\n sentence_vector = lstm_infer_vector(lstm_model, sentence, stopwords,word_indices)\n if with_txt_vect :\n nn_input = np.hstack([query_vector, summary_vector, sentence_vector, text_vector])\n else:\n nn_input = np.hstack([query_vector, summary_vector, sentence_vector])\n nn_input = np.asarray([nn_input]) # weird but it is important to do it\n score = nn_model.predict(nn_input) \n scores.append(score)\n #print(scores)\n max_idx_rem = int(np.argmax(scores))\n idx_selected_sentence = np.arange(len(sentences))[sentences == remaining_sentences[max_idx_rem]]\n idx_selected_sentence = int(idx_selected_sentence[0])\n size += len(remaining_sentences[max_idx_rem].split())\n \n remaining_sentences = list(remaining_sentences)\n del remaining_sentences[max_idx_rem]\n bisect.insort_left(summary_idx,idx_selected_sentence)\n\n summary = \"\"\n\n for idx in summary_idx:\n summary = summary + \" \" + sentences[idx]\n\n summary_vector = lstm_infer_vector(lstm_model, summary, stopwords,word_indices)\n\n return summary", "def queue_all_texts(self, q, texts, window_size):\n for batch_num, batch in enumerate(self.yield_batches(texts)):\n q.put(batch, block=True)\n before = self._num_docs / self.log_every\n self._num_docs += sum(len(doc) - window_size + 1 for doc in batch)\n if before < (self._num_docs / self.log_every):\n logger.info(\n \"%d batches submitted to accumulate stats from %d documents (%d virtual)\",\n (batch_num + 1), (batch_num + 1) * self.batch_size, self._num_docs)", "def total_exs(dataset):\n total = 0\n for article in dataset['data']:\n for para in article['paragraphs']:\n total += len(para['qas'])\n return total", "def train(self, corpus): \n for sentence in corpus.corpus:\n prev_word = None\n for datum in sentence.data:\n word = datum.word\n self.unigram_count[word] += 1\n if prev_word != None:\n self.bigram_count[prev_word][word] += 1\n prev_word = word\n \n self.vocabulary_size = len(self.unigram_count)\n self.num_words = sum(self.unigram_count.values())", "def total_test_batches(self) -> int:\n return sum(self.trainer.num_test_batches)", "def fit(self, text):\n\n if self.lowercase:\n text = text.lower()\n\n print(\"Tokenize sentences...\")\n tokens = word_tokenize(text)\n\n self.words_set_size = len(set(tokens))\n\n print(\"Collecting of ngram counters...\")\n\n self.unigram_counts = Counter(tokens)\n self.bigram_counts = Counter(bigrams(tokens))\n\n return self", "def processCorpus(self, texts: [Text]):\n \n remaining_texts = texts\n curr_texts = []\n processed_texts = []\n cases = []\n num_failed_test_cases = []\n num_failed_test_cases_per_asr = {}\n num_processed_texts = []\n for asr in self.asrs:\n num_failed_test_cases_per_asr[asr.getName()] = []\n \n for i in range(self.num_iteration):\n # print(f\"Iteration: {i+1}\")\n \n if self.text_batch_size :\n curr_texts = remaining_texts[:self.text_batch_size]\n remaining_texts = remaining_texts[self.text_batch_size:]\n else : # use global visibility\n curr_texts = remaining_texts\n\n if len(curr_texts) > 0 :\n \n curr_cases, curr_processsed_texts, unprocessed_texts = self.processOneIteration(curr_texts, processed_texts, cases)\n cases.extend(curr_cases)\n processed_texts.extend(curr_processsed_texts)\n if self.text_batch_size :\n remaining_texts.extend(unprocessed_texts)\n else :\n remaining_texts = unprocessed_texts\n\n num_failed_test_cases.append(calculate_cases(cases, mode=FAILED_TEST_CASE))\n for asr in self.asrs:\n num_failed_test_cases_per_asr[asr.getName()].append(calculate_cases_per_asr(\n cases, mode=FAILED_TEST_CASE, asr_name=asr.getName()))\n num_processed_texts.append(len(processed_texts))\n else :\n print(\"Texts are not enough!\")\n \n # shuffle the remaining texts\n np.random.shuffle(remaining_texts)\n \n data = {}\n data[\"number_of_failed_test_cases_all\"] = num_failed_test_cases\n data[\"number_of_failed_test_cases_per_asr\"] = num_failed_test_cases_per_asr\n data[\"number_of_processed_texts\"] = num_processed_texts\n with open(self.outputfile_failed_test_case, 'w') as outfile:\n json.dump(data, outfile, indent=2, sort_keys=True)\n\n if self.target_asr :\n self.saveFailedTestCases(processed_texts, cases)", "def get_whole_and_per_sentence_flair_sentiments(list_of_comments):\n\n for comment in list_of_comments:\n result_sum = get_whole_flair_sentiment(comment)\n print(comment)\n print('Whole comment sentiment:', result_sum)\n print()\n sentence_score_list = get_sentence_sentiments(comment)\n print(comment)\n print('per sentence sentiment:', sentence_score_list)\n print()", "def batchify(self, i, iterator):\n print(f'Starting Batch {i}')\n iterator = [item.strip() for item in iterator]\n max_length = self.max_seq_length - 2 # for special tokens\n\n batches = []\n n = len(iterator)\n sentence_count = 0\n index_start = 0\n index_stop = 0\n\n while index_stop < n:\n if (len(self.tokenizer.encode(' '.join(iterator[index_start:index_stop+1])).tokens) < max_length):\n index_start += 1\n index_stop += 1\n while (len(self.tokenizer.encode(' '.join(iterator[index_start:index_stop+1])).tokens) < max_length) and (index_stop<n):\n index_stop += 1\n batches.append(iterator[index_start:index_stop])\n index_start = index_stop\n print(f'Batch {i} Done')\n return batches", "def _raw_word_count(self, job):\n return sum(len(sentence.words) for sentence in job)" ]
[ "0.62009716", "0.6035176", "0.60308325", "0.6027758", "0.58148813", "0.5691236", "0.567909", "0.5636329", "0.562765", "0.56133324", "0.5553161", "0.55471104", "0.55366445", "0.5535218", "0.5534292", "0.5531749", "0.5505629", "0.54945356", "0.5477114", "0.54692113", "0.5455944", "0.54458296", "0.5425127", "0.54187727", "0.5410996", "0.54065347", "0.53945845", "0.5387348", "0.5384576", "0.53568333" ]
0.6880938
0
Adds ``documents`` to the document inventory, writing to disk in batches of 500,000.
def add_documents(self, documents): # flag for StopIteration exceptions more_documents = True # loop while there are still documents in the iterator while more_documents: # increment batch number batch = len(self.batch_stats) + 1 # count sentences sentences_count = 0 # create temporary batch data file in the version directory batch_file = os.path.join(self.file_base.get_version_path(self.version), "data.jl.gz.temp") # try to read the next batch of files, catch exception and stop if there are no more try: # get next document before opening the file just to make sure it's there document = documents.next() # open the data file with gzip.open(batch_file, "wb") as outfile: # loop through DOCUMENT_BATCH_SIZE documents for i in range(DocumentDatabase.DOCUMENT_BATCH_SIZE): # count sentences in document for paragraph in document["paragraphs"]: sentences_count += len(paragraph["sentences"]) # write JSON to file one line at a time outfile.write("%s\n" % json.dumps(document)) # if we are not done with this batch, retrieve the next document if i < DocumentDatabase.DOCUMENT_BATCH_SIZE - 1: document = documents.next() except StopIteration: # the end of the documents stream, set the flag to False more_documents = False # make sure the batch isn't empty if sentences_count > 0: # create the new batch in the file system self.version_batches.create_latest_version() # add the stats to the statistics hash self.batch_stats[batch] = BatchStats(sentences_count) # write the batch statistics to file with codecs.open(self._get_batch_stat_file(batch), "wb", "utf-8") as outfile: # write the JSON representation for the stats outfile.write(json.dumps(self.batch_stats[batch].to_json())) # move the temp data file to the correct location inside the version folder os.rename(batch_file, self._get_batch_file(batch))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def store_documents(self, documents: list):\n requests = [\n {'PutRequest': {'Item': Item}} \n for Item in documents\n ]\n ticks = [d['symbol'] for d in documents]\n size = getsizeof(requests)\n exceptions = self.dynamo_client.exceptions\n errors = (exceptions.ProvisionedThroughputExceededException)\n\n self.Logger.info(\n f'Writing batch of {ticks} into dynamodb '\n f'with size {size} bytes',\n extra={\"message_info\": {\"Type\": \"DynamoDB write\", \"Tickers\": ticks, \"Size\": size}}\n )\n \n try:\n response = self.dynamo_resource.batch_write_item(\n RequestItems={self.table_name: requests},\n ReturnConsumedCapacity = 'INDEXES')\n \n self.Logger.debug(f'{response}')\n \n if response['UnprocessedItems']:\n raise RuntimeError('UnprocessedItems in batch write')\n except errors as ex:\n raise app.AppException(ex, f'dynamodb throughput exceed')\n\n return True", "def upload(self, documents: List[Document], vectorise_func) -> None:\n\n # Add doc_store to documents\n for d in documents:\n d.doc_store = self\n # Check ID uniqueness\n check_duplicate_documents(documents)\n # Check type consistency\n check_document_types(documents)\n # Batching\n batches = batch_items(documents)\n\n # Update document class conveniently\n if issubclass(type(documents[0]), ChunkedDocument):\n self._doc_class = ChunkedDocument\n\n for batch in batches:\n vectorise_func(batch, self)\n self.documents += batch", "def store_documents(self, documents: list):\n results = app.Results()\n entries = [\n { \n 'Id': str(uuid1()),\n 'MessageBody': json.dumps(doc)\n }\n for doc in documents\n ]\n ids = [ e['Id'] for e in entries ]\n self.Logger.info(f'Store {ids} in sqs')\n self.Logger.debug(f'Saving {entries} in sqs {self.sqs_queue_url}')\n self.sqs_client.send_message_batch(\n QueueUrl=self.sqs_queue_url,\n Entries=entries\n )\n results.ActionStatus = 0\n results.Results = ids\n return results", "def insert_documents(connection: DBConnection, documents: Sequence[Document]) -> None:\n max_ = len(documents)\n current = 0\n print() # print an extra line, because we will delete lines with printing \\r\n for chunk in chunks(documents):\n connection.execute(\"BEGIN TRANSACTION\")\n for doc in chunk:\n # python doesn't support prepared statements, but instead has a builtin sql cache\n connection.execute(\n \"INSERT INTO docs(did, title, url) VALUES (?, ?, ?)\", doc.convert_to_tuple())\n current += 1\n print(f\"\\r[{current}/{max_}] doc done\", end='')\n connection.execute(\"COMMIT\")", "def add_documents(self, docs):\n for doc in docs:\n assert isinstance(doc, pylastica.document.Document), \"All items in list docs must be of type Document: %r\" % doc\n doc.doc_type = self.name\n return self.index.add_documents(docs)", "def add_documents(self, docs):\n for sent in docs:\n sent = map(self.process_token, sent)\n self._token_count.update(sent)", "async def put_documents(self, collection, documents):\n await self.ensure_collection(collection)\n try:\n if SOLR_COMMIT_WITHIN:\n params = {'commitWithin': SOLR_COMMIT_WITHIN}\n else:\n params = {'commit': 'true'}\n await self.post(\n '/v2/collections/{}/update'.format(collection),\n params=params, json_data=documents\n )\n logger.info('Successfully indexed {} documents to collection {}'\n .format(len(documents), collection))\n except SolrError:\n logger.warning('Failed to put {} documents to collection {}'\n .format(len(documents), collection))\n raise", "def insert_many(self, documents: Iterable[dict]) -> None:\n for i, document in enumerate(documents):\n if isinstance(document, dict):\n self._store_document(document)\n else:\n raise TypeError(\n f\"The document at index {i} was not a dictionary. All documents must be dictionaries.\"\n )\n self._dump()", "def documents(self, documents):\n\n self._documents = documents", "def createDocumentAll(self, documents):\n docs = []\n for document in documents:\n if isinstance(document, couch.Document):\n document = document.getData()\n\n # this is create method, no update allowed\n if \"_rev\" in document: del document[\"_rev\"]\n if \"_deleted\" in document: del document[\"_deleted\"]\n\n docs.append(document)\n\n return self.client.post(self.name +\"/_bulk_docs\", None,\n {\"docs\": docs}).getBodyData()", "async def bulk_insert(self, documents, alias=None):\n\n is_valid = True\n docs_to_insert = []\n\n for document_index, document in enumerate(documents):\n self.update_field_on_save_values(document, document._id is not None)\n try:\n is_valid = is_valid and self.validate_document(document)\n except Exception:\n err = sys.exc_info()[1]\n raise ValueError(\n \"Validation for document %d in the documents you are saving failed with: %s\"\n % (document_index, str(err))\n )\n\n if not is_valid:\n return\n\n docs_to_insert.append(document.to_son())\n\n if not is_valid:\n return\n\n doc_ids = await self.coll(alias).insert(docs_to_insert)\n\n for object_index, object_id in enumerate(doc_ids):\n documents[object_index]._id = object_id\n\n return documents", "def add(self, documents):\n\n if self.cluster:\n self.cluster.add(documents)\n else:\n super().add(documents)\n\n return documents", "def finish_documents():\n\n doc_ids = json.loads(request.form['doc_ids'])\n\n for docid in doc_ids:\n\n document = Document.query.filter_by(id=docid).first_or_404()\n\n document.status = \"OK\"\n\n db.session.add(document)\n\n db.session.commit()", "def store_documents(self, partner, documents):\n for docs in documents:\n if docs and docs['type'] in DOCS_TYPES:\n document = DocumentDetails()\n document.partner_id = partner\n document.type = DOCS_TYPES[docs['type']]\n document.file_name = docs['file']\n document.file_data = os.path.join('documents/partner_doc', docs['file'])\n document.save()", "def updateDocumentAll(self, documents):\n docs = []\n for document in documents:\n if isinstance(document, couch.Document):\n document = document.getData()\n\n # these are required params\n if \"_id\" not in document or \"_rev\" not in document:\n raise Exception(\"Both _id & _rev fields are required!\")\n\n docs.append(document)\n\n return self.client.post(self.name +\"/_bulk_docs\", None,\n {\"docs\": docs}).getBodyData()", "def add_documents(self, docs):\n if 'sentences' in docs:\n for sent in docs.sentences:\n sent = map(self.process_token, [t for t in sent.tokens if not t.is_stopword])\n self._token_count.update(sent)\n\n else:\n sent = list(map(self.process_token, [t for t in docs.tokens if not t.is_stopword]))\n self._token_count.update(sent)", "def save(self, batch_of_documents, destination=\"exports\", *args, **kwargs):\n raise NotImplementedError", "def run(self, mapping={}, *args, **kwargs):\n self.processed = 0\n for batch in self._process_by_batch(self.load(*args, **kwargs)):\n batch = list(map(lambda doc: self._apply_mapping(doc, mapping), batch))\n for doc in batch:\n self._ingest(iterable=doc, doctype=doc[\"doctype\"])\n self.processed += 1\n logger.info(\"Added {} documents to the database.\".format(self.processed))", "def add_documents(\n self,\n index: str,\n documents: List[Dict[str, Any]],\n routing: Callable[[Dict[str, Any]], str] = None,\n doc_id: Callable[[Dict[str, Any]], str] = None,\n ) -> int:\n\n def map_doc_2_action(doc: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Configures bulk action\"\"\"\n data = {\n \"_op_type\": \"index\",\n \"_index\": index,\n \"_routing\": routing(doc) if routing else None,\n **doc,\n }\n\n _id = doc_id(doc) if doc_id else None\n if _id is not None:\n data[\"_id\"] = _id\n\n return data\n\n success, failed = es_bulk(\n self.__client__,\n index=index,\n actions=map(map_doc_2_action, documents),\n raise_on_error=True,\n refresh=\"wait_for\",\n )\n return len(failed)", "async def index_documents(self, app_id, namespace, index_name, documents):\n collection = get_collection_name(app_id, namespace, index_name)\n solr_documents = [_to_solr_document(doc) for doc in documents]\n await self.solr.put_documents(collection, solr_documents)", "def parallel_import_documents(self, index, documents, **kwargs):\n \n # Set default values in passed as kwargs\n chunk_size = kwargs.get('chunk_size', None)\n if chunk_size is None:\n chunk_size = 20000\n kwargs['chunk_size'] = chunk_size\n \n request_timeout = kwargs.get('request_timeout', None)\n if request_timeout is None:\n request_timeout = 3600\n kwargs['request_timeout'] = request_timeout\n \n doc_type = kwargs.get('doc_type', None)\n if doc_type is None:\n doc_type = \"_doc\"\n kwargs['doc_type'] = doc_type\n \n raise_on_exception = kwargs.get('raise_on_exception', None)\n if raise_on_exception is None:\n raise_on_exception = False\n kwargs['raise_on_exception'] = raise_on_exception\n \n raise_on_error = kwargs.get('raise_on_error', None)\n if raise_on_error is None:\n raise_on_error = False\n kwargs['raise_on_error'] = raise_on_error\n \n self._logger.info('%s documents to index into %s', len(documents), index)\n doc_count = 0 \n \n if len(documents) > 0:\n for success, info in helpers.parallel_bulk(self.es, documents, index=index, **kwargs):\n if not success:\n self._logger.error(f'A document failed: {info}')\n else:\n doc_count += 1\n \n self._logger.info('%s documents indexed into %s', doc_count, index)\n \n return doc_count", "def insert(self, index, documents, batch_size=100):\n actions = []\n latest_index_id, begin_timestamp = self.__get_latest_index(index)\n\n for idx, doc in enumerate(documents):\n index_id = latest_index_id\n\n if doc[\"request_time\"] <= begin_timestamp:\n index_id = self.get_query_index(index, doc[\"request_time\"])\n\n action = {\n \"_index\": index + \"_\" + str(index_id),\n \"_type\": \"docs\",\n \"_source\": doc,\n }\n actions.append(action)\n\n if len(actions) == batch_size or idx == len(documents) - 1:\n print(\"Bulk ingesting started...\")\n\n try:\n bulk(self.client, actions, raise_on_error=True, request_timeout=200)\n except:\n print(\"Could not write the data.\")\n raise\n \n actions.clear()\n print(\"Bulk ingesting done\")\n if self.__get_index_size(index, latest_index_id) >= self.THRESHOLD:\n begin_timestamp = self.__update_index_timerange(\n index, latest_index_id\n )\n latest_index_id = self.__create_new_index(\n index, latest_index_id + 1, begin_timestamp\n )", "def ingest_all(self, docs):\n for doc in docs:\n self.ingest(doc)", "def createMultipleDocuments(cred, payload):\n url = cred.base_url + \"documents:commit\"\n data = { 'writes': [] }\n\n for path, fieldData in payload.iteritems():\n pathData = createFirestoreDataObject(cred, path, fieldData)\n del pathData['updateMask']\n data['writes'].append(pathData)\n\n makeRequest(cred, url, 'POST', data)", "def batch(self, requests):\n return AlgoliaUtils_request(self.headers, self.write_hosts, \"POST\", \"/1/indexes/*/batch\", self.timeout, {\"requests\": requests})", "def upload(self, documents: List[ElasticDocument], vectorise_func, index: str = None) -> None:\n if not index:\n index = self._index\n\n # Add doc_store to documents\n for d in documents:\n d.doc_store = self\n # Check ID uniqueness\n check_duplicate_documents(documents)\n # Check type consistency\n check_document_types(documents)\n # Batching\n batches = batch_items(documents)\n\n for batch in batches:\n payload = []\n # Calculate vectors\n vectorise_func(batch, self)\n\n for document in batch:\n # JSON representation of document\n doc_json = document.to_elastic()\n\n # Add correct index\n doc_json[\"_index\"] = index\n\n # Rename id key\n doc_json[\"_id\"] = doc_json[\"id\"]\n del doc_json[\"id\"]\n\n payload.append(doc_json)\n\n # Bulk upload to elasticsearch\n helpers.bulk(self._client, payload)\n\n # Update index\n self._client.indices.refresh(index=self._index)", "def add(self, docs: DocumentArray, *args, **kwargs):\n cursor = self.connection.cursor()\n try:\n psycopg2.extras.execute_batch(\n cursor,\n f'INSERT INTO {self.table} (ID, DOC) VALUES (%s, %s)',\n [\n (\n doc.id,\n doc.SerializeToString(),\n )\n for doc in docs\n ],\n )\n except psycopg2.errors.UniqueViolation as e:\n self.logger.warning(\n f'Document already exists in PSQL database. {e}. Skipping entire transaction...'\n )\n self.connection.rollback()\n self.connection.commit()", "def insert_boost(connection: DBConnection, documents: Sequence[Document]) -> None:\n max_ = len(documents)\n current = 0\n print() # print an extra line, because we will delete lines with printing \\r\n for chunk in chunks(documents):\n\n connection.execute(\"BEGIN TRANSACTION\")\n for doc in chunk:\n connection.execute(\n \"INSERT INTO boost(did, date, page) VALUES (?, ?, ?)\", (doc.id, doc.date, doc.page))\n connection.execute(\"COMMIT\")\n current += len(chunk)\n print(f\"\\r[{current}/{max_}] boost done\", end='')\n print()", "def add_document_lists(self, docs):\n for sent in docs:\n sent = map(self.process_token, sent)\n self._token_count.update(sent)", "def updateMultipleDocuments(cred, payload):\n\n url = cred.base_url + \"documents:commit\"\n data = { 'writes': [] }\n\n for path, fieldData in payload.iteritems():\n pathData = createFirestoreDataObject(cred, path, fieldData)\n data['writes'].append(pathData)\n \n makeRequest(cred, url, 'POST', data)" ]
[ "0.74635196", "0.68780833", "0.6735508", "0.66293705", "0.65308714", "0.64703435", "0.6461322", "0.6320846", "0.6297765", "0.6272216", "0.61952156", "0.618961", "0.6179138", "0.6159434", "0.61464745", "0.6131274", "0.6127692", "0.60197544", "0.60097003", "0.6002732", "0.59492266", "0.5928816", "0.5923453", "0.58978164", "0.589677", "0.5849082", "0.57858676", "0.5769239", "0.5731439", "0.5711891" ]
0.73529315
1
Loads a document database with the specified version from the directory.
def load(db_path="data/documents/trigrams", version=None): # create database at the desired path and with the desired version db = DocumentDatabase(db_path, version) # loop through batches for batch in db._get_batches(): # get the path to the stats file stats_file = db._get_batch_stat_file(batch) # load the stats stats_json = json.loads(codecs.open(stats_file, "rb", "utf-8").read()) # save in the batch statistics hash db.batch_stats[batch] = BatchStats(stats_json["total_sentences"]) # return the database return db
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_or_create_db(self):\n try:\n with open(self._filename, 'rb') as f:\n self.db = pickle.load(f)\n except FileNotFoundError:\n pass", "def load_DB(self):\n\t\tprint 'Loadind Data Base...'\n\t\tstream = open(self.DB_file)\n\t\tself.DB = cPickle.load(stream)\n\t\tstream.close()\n\t\tprint \"Number of documents in the Data Base: \", self.DB.nb_doc_total\n\t\tprint 'Loading completed'\n\t\treturn", "def load_db(path_to_db):\n db_run = db(path_to_db) # Instantiates the DB by reading the file\n db_run.import_config_db() # Imports configuration DB\n db_run.conn.row_factory = sqlite3.Row # Better select results\n return(db_run)", "def _load_vdb_with_mode(cls, vdb, mode): \n # {{\n db = anydbm.open(vdb.filename, mode)\n try:\n if db[\"--Reserved--type\"] != vdb.type:\n raise ValueError(\"Not a %s database\" % (vdb.type,))\n except KeyError:\n raise ValueError(\"Not a recognized database\")\n vdb.db = db\n # }}", "def openDB(self, dbpath, updateOnIdle=True):\n\t\tself.openDBFile( last_file_in_directory(dbpath, \"*sqlite\"), updateOnIdle )", "def load_db(file):\n if os.path.isfile(file):\n try:\n start = time.time()\n db = []\n with open(file, 'r') as f:\n for item in json_lines.reader(f):\n db.append(item)\n stop = time.time() - start\n print(\"load_db time: \", stop, 'sec')\n return db\n except Exception as e:\n print(file, \"is probably corrupted. Creating empty db now...\")\n DbManager.erase_db(file)\n raise e\n\n else:\n # corrupt...\n print(\"database not found. creating new\")\n DbManager.new_db(file)", "def load(file_path = \"database.pkl\"):\n while True:\n doLoad = input(\"Database contents will be overwritten. Proceed? (y/n): \")\n if doLoad == 'y':\n break\n elif doLoad == 'n':\n return\n else:\n continue\n try:\n with open(file_path, 'rb') as f:\n global person_database\n person_database = pickle.load(f)\n print(\"Database loaded.\")\n except:\n print(\"Database not available.\")", "def load_db(dbpath):\n\n if not os.path.exists(dbpath):\n print(\"Cannot find %s directory, rerun from MacInfoPkg directory!\" % dbpath)\n sys.exit(1)\n\n db = []\n\n for root, dirs, files in os.walk(dbpath):\n for file in fnmatch.filter(files, '*.yaml'):\n path = os.path.join(root, file)\n with open(path, 'r') as fh:\n try:\n db.append(yaml.safe_load(fh))\n except yaml.YAMLError as e:\n print(\"Failed to parse file %s - %s\" % (path, e))\n sys.exit(1)\n\n if len(db) == 0:\n print(\"Empty database!\")\n sys.exit(1)\n\n # Sorting is required for fast lookup.\n return sorted(db, key=operator.itemgetter('SystemProductName'))", "def get_document(self, docid):\n try:\n return self.sql_session.query(Document).get(docid)\n except OperationalError:\n raise IOError(\"Sorry, this database is incompatible with the \"\n \"current version of Luminoso. If you want, you can \"\n \"delete the model directory and start again.\")", "def database(db):\n if type(db) is str:\n # Database name\n if db.endswith('.py'):\n # Python source, exec it\n globals = {}\n exec(compile(open(db).read(), db, 'exec'), globals)\n if 'DB' in globals:\n db = globals['DB']\n else:\n storage = globals['Storage']\n from ZODB.DB import DB\n db = DB(storage, cache_size=4000)\n elif db.endswith(\".fs\"):\n from ZODB.DB import DB\n from ZODB.FileStorage import FileStorage\n storage = FileStorage(db)\n db = DB(storage, cache_size=4000)\n\n # The following will fail unless the application has been configured.\n from zope.event import notify\n notify(zope.processlifetime.DatabaseOpened(db))\n\n return db", "def db_file():\n return abspath('vmchecker.db')", "def _load_document(path, app):\n start_inventor()\n document_type_enum = {\n 12289: 'UnnownDocument',\n 12290: 'PartDocument',\n 12291: 'AssemblyDocument',\n 12292: 'DrawingDocument',\n 12293: 'PresentationDocument',\n 12294: 'DesignElementDocument',\n 12295: 'ForeignModelDocument',\n 12296: 'SATFileDocument',\n 12297: 'NoDocument',\n }\n try:\n app.Documents.Open(str(path))\n document_type = document_type_enum[app.ActiveDocumentType]\n doc = win32com.client.CastTo(app.ActiveDocument, document_type)\n print(doc, document_type)\n return doc\n except:\n print('unable to load file')\n return None", "def test_load_database_from_path(tmp_path):\n path = tmp_path / \"test.db\"\n database = load_database(path_or_database=path, fast_logging=False)\n assert isinstance(database, DataBase)\n assert database.path is not None\n assert database.fast_logging is False", "def load_file():\n global list_of_table, data_base, new_data\n open_name = askopenfilename()\n\n if Path(open_name).suffix == '.db':\n data_base = open_name\n data_base = str(data_base)\n new_data_base = parse(data_base)\n new_data = update_list_tables(new_data_base)\n new_data.clear()\n\n else:\n mistake_db_file()", "def db_version():\n return IMPL.db_version()", "def load_db(db_file):\n db = {}\n logging.info('loading weighted vectors from {0}'.format(db_file))\n with open(db_file, 'r') as f:\n for line in f:\n j = json.loads(line)\n db.update(j)\n return db", "def _get_db(self, db_name: str) -> shelve.DbfilenameShelf:\n db_path = os.path.join(self.cache_folder, db_name)\n db = shelve.open(db_path)\n logging.info(f'Opened cache file {db_path!r}')\n return db", "def create_db(self, path: str) -> None:\n if os.path.isfile(path):\n self.db_path = path\n print(\"DB already exists\")\n return\n\n print(path)\n\n self.db_path = path\n\n print(\"Opening the base db\")\n with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'basedb.xml'), 'r') as f:\n base = f.read()\n print(\"Reading the base as {0}\".format(base))", "def database_file(file):\r\n fpath = path.join('databases', '{0}'.format(file))\r\n db_path = path.join(mod_path, fpath)\r\n return db_path", "def loadDatabase(database):\n for file_name in os.listdir(\"Users\"):\n chemin = os.path.join(\"Users\", file_name)\n key = file_name.lower()\n database[key]=pickle.load(open(chemin,\"rb\"))", "def import_db(import_file):\n import_data(import_file)", "def get_latest_version(db_path):\n\t\t\n\t\t# create a file system and return latest version\n\t\treturn VersionedFile(db_path).get_latest_version()", "def load_database(db_session, fixture):\n # TODO: the fixture file path controls\n\n # load the fixture\n datas = pickle.loads(fixture)\n db_session.add_all(datas)\n db_session.commit()\n print \"load database ok\"", "def loadDB(dir):\n\n try:\n infile = os.path.join(gbl.libPath, dir, mmadir)\n f=file(infile, \"rb\")\n f.readline() # Read/discard comment line\n g = pickle.load(f)\n f.close()\n return g\n except:\n pass\n\n return None", "def read_db(self):\n with open(self.filename, 'r') as database:\n data = json.load(database)\n self.data = data", "def load(self) -> None:\n doc_ref = self.doc_ref\n if not isinstance(doc_ref, DocumentReference):\n return\n\n doc = doc_ref.get()\n if doc.exists:\n self.load_storage_model(doc.to_dict())", "def read_db():\n\n # Look for database in the same folder as this script\n script_dir = os.path.dirname(os.path.realpath(__file__))\n db_filepath = os.path.join(script_dir, 'cn_loads_database.dat')\n\n db = None\n if os.path.isfile(db_filepath):\n with open(db_filepath, 'r') as f:\n db = yaml.load(f.read())\n if db == None:\n db = dict()\n else:\n db = dict()\n\n return db", "def get_db():\n with open(db_file) as f:\n db = json.load(f)\n return db", "def load(self, filename=None):\n prefix = os.path.dirname(filename)\n if not os.path.exists(prefix):\n os.makedirs(prefix)\n\n name = filename or self.filename\n\n if os.path.exists(name):\n with open(name, 'rb') as dbfile:\n self.data = yaml.safe_load(dbfile) or dict()", "def __get_db(self, folder):\n db_dir = os.path.join(self.home, self.ibooks_doc_root, folder)\n db_fullname = None\n\n if not os.path.exists(self.tmp_dir):\n os.makedirs(self.tmp_dir)\n\n for dfile in os.listdir(db_dir):\n src = os.path.join(db_dir, dfile)\n dst = os.path.join(self.tmp_dir, dfile)\n shutil.copy(src, dst)\n if dfile.endswith(\".sqlite\"):\n db_fullname = dst\n \n return db_fullname" ]
[ "0.6015772", "0.59988827", "0.59276336", "0.5849796", "0.5840075", "0.5790545", "0.5701773", "0.56872916", "0.5661583", "0.56539536", "0.56524754", "0.5596583", "0.5581138", "0.5557227", "0.5549356", "0.5532614", "0.55302", "0.5490099", "0.54712546", "0.5464255", "0.5438158", "0.5426713", "0.541934", "0.5417999", "0.5407309", "0.5399795", "0.5398745", "0.539352", "0.5353477", "0.53529686" ]
0.7193331
0
Returns the latest version of the documents inventory at the specified path.
def get_latest_version(db_path): # create a file system and return latest version return VersionedFile(db_path).get_latest_version()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def current_version(self):\n try:\n return self.versions.latest()\n except DocumentVersion.DoesNotExist:\n return None", "async def get_local_version(self, path):\n return_value = ''\n if os.path.isfile(path):\n with open(path, 'r') as local:\n ret = re.compile(\n r\"^\\b(VERSION|__version__)\\s*=\\s*['\\\"](.*)['\\\"]\")\n for line in local.readlines():\n matcher = ret.match(line)\n if matcher:\n return_value = str(matcher.group(2))\n return return_value", "def get_version_from_recent_files(self):\n # full_path = self.fusion_prefs[\"LastCompFile\"]\n # return self.get_version_from_full_path(full_path)\n\n version = None\n rfm = RecentFileManager()\n\n try:\n recent_files = rfm[self.name]\n except KeyError:\n logger.debug('no recent files')\n recent_files = None\n\n if recent_files is not None:\n for i in range(len(recent_files)):\n version = self.get_version_from_full_path(recent_files[i])\n if version is not None:\n break\n\n logger.debug(\"version from recent files is: %s\" % version)\n\n return version", "def get_version(path=VERSION_PATH):\n namespace = {}\n exec(read(path), namespace)\n return namespace['get_version'](short=True)", "def getRowFromPath(self, path):\n query = \"SELECT resource_id, etag, title FROM docs WHERE local_path = ?\"\n res = self.db.execute(query, (path,)).fetchone()\n return res", "def get_last_version(self):\n version = self.get_current_version()\n\n # read the recent file list\n if version is None:\n version = self.get_version_from_recent_files()\n\n return version", "def __queryLatest(versionsPath, versionPattern):\n version = 0\n patternParts = __splitVersionPattern(versionPattern)\n versionRegEx = \"^\"+patternParts['prefix']+\"[0-9]{\"+str(len(patternParts['padding']))+\",}\"+patternParts['suffix']+\"$\"\n\n # finding the latest version\n if os.path.exists(versionsPath):\n for directory in os.listdir(versionsPath):\n if re.match(versionRegEx, directory):\n version = max(\n int(verNumber(directory, versionPattern)),\n version\n )\n return version", "def get_file(self, path):\n return self.client._perform_raw(\n \"GET\", \"/projects/%s/managedfolders/%s/contents/%s\" % (self.project_key, self.odb_id, utils.quote(path)))", "def get_last_version(self):\n version = self.get_current_version()\n\n # read the recent file list\n if version is None:\n version = self.get_version_from_recent_files()\n\n # get the latest possible Version instance by using the workspace path\n if version is None:\n version = self.get_version_from_project_dir()\n\n return version", "def get_current_version(self):\n #full_path = self._root.knob('name').value()\n full_path = os.path.normpath(\n self.comp.GetAttrs()['COMPS_FileName']\n ).replace('\\\\', '/')\n return self.get_version_from_full_path(full_path)", "def get_latest_file(path):\n try:\n latest_iteration = get_latest_iteration(path)\n return os.path.join(path, '{}_{}'.format(FILE_PREFIX, latest_iteration))\n except ValueError:\n return None", "def get_version(course_path):\r\n format_file = course_path / EXPORT_VERSION_FILE\r\n if not format_file.isfile():\r\n return 0\r\n with open(format_file, \"r\") as f:\r\n data = json.load(f)\r\n if EXPORT_VERSION_KEY in data:\r\n return data[EXPORT_VERSION_KEY]\r\n\r\n return None", "def get_version_details(self, project_id, document_id, version=None):\n url = base_url + 'portal/' + str(self.portal_id) + '/projects/' + str(project_id) + '/documents/' + str(document_id) + '/'\n if version is not None: \n param = {\n 'version': version\n }\n else:\n param = None\n response = zoho_http_client.get(url, self.details, param)\n return parser.get_documents(response)[0]", "def versions(self, stored=False) -> List['RadsSolutionVersion']:\n\n if stored:\n fspath = self.storage.fspath(self.path)\n if not os.path.isdir(fspath):\n return [] # solution not in storage\n listing = []\n for path in os.listdir(fspath):\n if not os.path.isdir(os.path.join(fspath, path)):\n continue\n listing.append(path)\n else:\n logger.debug(f\"retrieve versions of {self}\")\n listing = self.storage.request_text(f\"{self.path}/releaselisting\").splitlines()\n return sorted(RadsSolutionVersion(self, RadsVersion(l)) for l in listing)", "def getversion_nightly(path=None): # pragma: no cover\n if not path:\n path = _get_program_dir()\n\n with open(os.path.join(path, 'version')) as data:\n (tag, rev, date, hsh) = data.readlines()\n\n date = time.strptime(date[:19], '%Y-%m-%dT%H:%M:%S')\n\n if not date or not tag or not rev:\n raise VersionParseError\n return (tag, rev, date, hsh)", "def _get_version(self, identifier: Identifier,\n version: Optional[int] = None) -> DocMetadata:\n parent_path = self._get_parent_path(identifier=identifier,\n version=version)\n path = os.path.join(parent_path,\n (f'{identifier.filename}.abs' if not version\n else f'{identifier.filename}v{version}.abs'))\n return self.parse_abs_file(filename=path)", "def getVersion(self):\n self.getDocumentedObject().getVersion()", "def getRepoRev(self, path):\r\n\r\n if self.verbose:\r\n print(\"INFO : Getting info in {}\".format(path))\r\n\r\n rev = None\r\n with workInDirectory(path):\r\n\r\n rev_cmd_args = ['git', 'rev-parse', 'HEAD']\r\n\r\n if self.verbose:\r\n print(\"INFO : Running command : {}\".format(\" \".join(rev_cmd_args)))\r\n\r\n rev = SubProcessUtility.runCommand(rev_cmd_args)\r\n\r\n if rev == None:\r\n print(\"Unable to get revision for {}, make sure config is correct\".format(path))\r\n\r\n return rev", "def get_latest_saved(self):\n doc = (get_latest_released_app_doc(self.domain, self._id)\n or get_latest_build_doc(self.domain, self._id))\n return self.__class__.wrap(doc) if doc else None", "def get_version_from_full_path(cls, full_path):\n if full_path is None or full_path == \"\":\n return\n\n logger.debug(\"full_path: %s\" % full_path)\n # convert '\\\\' to '/'\n full_path = os.path.normpath(os.path.expandvars(full_path)).replace(\"\\\\\", \"/\")\n\n # trim repo path\n from stalker import Repository, Version\n\n os_independent_path = Repository.to_os_independent_path(full_path)\n\n # try to get a version with that info\n logger.debug(\"getting a version with path: %s\" % full_path)\n\n version = Version.query.filter(Version.full_path == os_independent_path).first()\n logger.debug(\"version: %s\" % version)\n return version", "def get_rev(self, docid):\n response = self._request_session.head(self._database_path(docid))\n try:\n response.raise_for_status()\n except HTTPError as e:\n if e.response.status_code == 404:\n raise ResourceNotFound\n raise\n # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag\n return response.headers['ETag'].strip('\"').lstrip('W/\"')", "def get_version(self, directory, version_file_name='.version'):\n if self.path_exists(directory) and (version_file_name in os.listdir(directory)):\n f = open(directory + '/' + version_file_name)\n version = f.read()\n f.close()\n return version\n return None", "def svn_rev_info(path): # pragma: no cover\n if not os.path.isdir(os.path.join(path, '.svn')):\n path = os.path.join(path, '..')\n\n _program_dir = path\n filename = os.path.join(_program_dir, '.svn/entries')\n if os.path.isfile(filename):\n with open(filename) as entries:\n version = entries.readline().strip()\n if version != '12':\n for _ in range(3):\n entries.readline()\n tag = entries.readline().strip()\n t = tag.split('://', 1)\n t[1] = t[1].replace('svn.wikimedia.org/svnroot/pywikipedia/',\n '')\n tag = '[{}] {}'.format(*t)\n for _ in range(4):\n entries.readline()\n date = time.strptime(entries.readline()[:19],\n '%Y-%m-%dT%H:%M:%S')\n rev = entries.readline()[:-1]\n return tag, rev, date\n\n # We haven't found the information in entries file.\n # Use sqlite table for new entries format\n from sqlite3 import dbapi2 as sqlite\n with closing(\n sqlite.connect(os.path.join(_program_dir, '.svn/wc.db'))) as con:\n cur = con.cursor()\n cur.execute(\"\"\"select\nlocal_relpath, repos_path, revision, changed_date, checksum from nodes\norder by revision desc, changed_date desc\"\"\")\n _name, tag, rev, date, _checksum = cur.fetchone()\n cur.execute('select root from repository')\n tag, = cur.fetchone()\n\n tag = os.path.split(tag)[1]\n date = time.gmtime(date / 1_000_000)\n return tag, rev, date", "def getMostRecentPublishedEdit(self, show, sequence, version):\n\n # grab shot edits cache path\n mode = Mode(show, sequence)\n shotEditsCachePath = mode.get('[editorialFLEFilesCache]')\n\n # load the file and grab the published versions\n root = self.fileService.loadXMLFile(shotEditsCachePath)\n isPublished = lambda e: e.attrib['published'] == 'true'\n publishedEdits = filter(isPublished, root.getchildren())\n versions = map(lambda e: int(e.attrib['version']), publishedEdits)\n\n # drop edits after the requested version\n versions.sort(reverse=True)\n versions = list(itertools.dropwhile(lambda v: v >= int(version), versions))\n\n # return path to fle\n if len(versions) > 0:\n return flix.core2.shotCutList.ShotCutList.getDefaultPath(mode, versions[0])\n\n # couldn't find a valid publisehd version\n return None", "def ReadVersion():\n return _ReadNumericFile(pathutils.JOB_QUEUE_VERSION_FILE)", "def version(self):\n self._get_latest_content()\n return self._data.get('version', None)", "def updateDoc(self, path):\n self.db.setDb(self.db_file)\n \n if not self.authd:\n self._authorize()\n \n db_row = self.db.getRowFromPath(path)\n if not db_row:\n return False\n \n resource_id = db_row[0]\n etag = db_row[1]\n title = db_row[2]\n \n ms = gdata.data.MediaSource(file_path=path, content_type=MIMETYPES['ODT'])\n doc = self.client.GetDoc(resource_id.replace(':', '%3A'))\n new_version = self.client.Update(doc, media_source=ms)\n print 'Document pushed:', new_version.GetAlternateLink().href\n \n self.db.resetEtag(new_version)", "def get_latest_version(self):\n try:\n version = self.sourcestudyversion_set.filter(\n i_is_deprecated=False\n ).order_by( # We can't use \"latest\" since it only accepts one field in Django 1.11.\n '-i_version',\n '-i_date_added'\n ).first()\n except ObjectDoesNotExist:\n return None\n return version", "def get_latest_revision(self):\n revision_list = self.get_revision_list()\n if revision_list:\n return revision_list[-1]\n else:\n raise NoRevisionsExistError()", "def GetVersion(self):\n return self._SendRequest(HTTP_GET, \"/version\", None, None)" ]
[ "0.64565796", "0.59338427", "0.59115607", "0.5859803", "0.58483046", "0.5716326", "0.5692346", "0.5668634", "0.56481016", "0.5610619", "0.56099266", "0.556473", "0.5553882", "0.55534357", "0.5535717", "0.55330545", "0.55328286", "0.5523551", "0.547328", "0.54688823", "0.546578", "0.5459197", "0.54324514", "0.5426627", "0.5378384", "0.53709376", "0.5360439", "0.5353064", "0.53516084", "0.5306917" ]
0.67488974
0
Returns OAuth2 credentials if we have valid credentials in the session. This is a 'truthy' value. Return None if we don't have credentials, or if they have expired or are otherwise invalid. This is a 'falsy' value.
def valid_credentials(): if 'credentials' not in flask.session: return None credentials = client.OAuth2Credentials.from_json( flask.session['credentials']) if (credentials.invalid or credentials.access_token_expired): return None return credentials
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def valid_credentials():\n if 'credentials' not in flask.session:\n return None\n\n credentials = client.OAuth2Credentials.from_json(\n flask.session['credentials'])\n\n if (credentials.invalid or credentials.access_token_expired):\n return None\n return credentials", "def get_credentials():\n credentials = tools.get_credentials_file()\n session_credentials = session.get_session_credentials()\n for credentials_key in credentials:\n\n # checking for not false, but truthy value here is the desired behavior\n session_value = session_credentials.get(credentials_key)\n if session_value is False or session_value:\n credentials[credentials_key] = session_value\n return credentials", "def get_creds():\n\tcredentials = None\n\tif os.path.exists('token.pickle'):\n\t\twith open('token.pickle', 'rb') as token:\n\t\t\tcredentials = pickle.load(token)\n\t# If there are no (valid) credentials available, let the user log in.\n\tif not credentials or not credentials.valid:\n\t\tif credentials and credentials.expired and credentials.refresh_token:\n\t\t\tcredentials.refresh(Request())\n\t\telse:\n\t\t\tflow = InstalledAppFlow.from_client_secrets_file('config/sa.json', SCOPES)\n\t\t\tcredentials = flow.run_local_server(port=0)\n\t\t# Save the credentials for the next run\n\t\twith open('token.pickle', 'wb') as token:\n\t\t\tpickle.dump(credentials, token)\n\treturn credentials", "def get_credentials():\n store = Storage(CREDENTIAL_PATH)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, None)\n return credentials", "def get_creds():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('inputs/token.pickle'):\n with open('inputs/token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'inputs/credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('inputs/token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n return creds", "def auth_credentials(self) -> Optional[Sequence['outputs.AuthCredentialResponse']]:\n return pulumi.get(self, \"auth_credentials\")", "def GetCredentials(self):\n return self._session.get(_CREDENTIAL_KEY, credentials.MapdCredentials())", "def get_credentials():\n credentials_path = os.path.join(CREDENTIALS_DIR, CREDENTIALS_FILE)\n store = oauth2client.file.Storage(credentials_path)\n credentials = store.locked_get()\n\n if not credentials or credentials.invalid:\n client_secret_path = os.path.join(CREDENTIAL_DIR, CLIENT_SECRET_FILE)\n flow = client.flow_from_clientsecrets(client_secret_path, \n scope='https://www.googleapis.com/auth/admin.directory.resource.calendar',\n redirect_uri='urn:ietf:wg:oauth:2.0:oob')\n\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n\n print(\"Storing credentials to: \" + credentials_path)\n\n\n return credentials", "def get_credentials():\n store = Storage(CLIENT_CREDENTIALS_FILE)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + CLIENT_CREDENTIALS_FILE)\n return credentials", "def load_session_credentials(request_handler):\n session = sessions.LilCookies(request_handler, SESSION_SECRET)\n userid = session.get_secure_cookie(name='userid')\n if userid:\n return userid, StorageByKeyName(Credentials, userid, 'credentials').get()\n else:\n return None, None", "def credentials(self):\n if self.user and self.is_authenticated():\n return AuthCredentials(['authenticated'] + self.user.permissions)\n else:\n return AuthCredentials()", "def authorize_credentials():\n credentials = STORAGE.get()\n # If the credentials doesn't exist in the storage location then run the flow\n if credentials is None or credentials.invalid:\n flow = flow_from_clientsecrets(CREDENTIAL_JSON, scope=SCOPE)\n http = httplib2.Http()\n credentials = run_flow(flow, STORAGE, http=http)\n return credentials", "def get_auth_token(self):\n creds = None\n # The file token.pickle stores the user's access and refresh tokens,\n # and is created automatically when the authorization flow completes\n # for the first time.\n if os.path.exists(self.token_path):\n with open(self.token_path, 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n self.credentials_path, self.scopes)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(self.token_path, 'wb') as token:\n pickle.dump(creds, token)\n return creds", "def get_credentials(self, **kwargs):\n creds_file = os.path.join(kwargs['user_dir'], 'credentials.json')\n\n # Getting credentials from Storage\n store = file.Storage(creds_file)\n creds = store.get()\n\n # Validating or refreshing credentials, if necessary\n if creds is None or creds.invalid:\n flow = client.flow_from_clientsecrets(self.client_secret_file,\n self.scopes)\n creds = tools.run_flow(flow, store)\n elif creds.access_token_expired:\n creds.refresh(httplib2.Http())\n else:\n pass\n\n return creds", "def credentials_given(self):\n return self.key and self.secret", "def getsessionpasswd(cls, session):\n sessionkey = cls.sessionkey(session)\n if sessionkey in sessionmgr.keys():\n return True, sessionmgr[sessionkey]['password']\n return False, None", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'calendar-python-quickstart.json')\r\n\r\n store = oauth2client.file.Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def GetCredentials(self, credentials_path: str,\n client_secrets_path: str) -> Optional[Any]:\n scopes = ['openid', 'https://www.googleapis.com/auth/userinfo.email']\n credentials = None\n\n # Load credentials file if it exists\n if os.path.exists(credentials_path):\n try:\n credentials = Credentials.from_authorized_user_file(\n credentials_path, scopes)\n except ValueError as exception:\n msg = f'Error loading credentials: {exception!s}'\n self.ModuleError(msg, critical=True)\n # Refresh credentials using existing refresh_token\n if credentials and credentials.refresh_token:\n self.logger.debug('Found a refresh token. Requesting new id_token...')\n try:\n credentials.refresh(Request())\n except google_exceptions.RefreshError as exception:\n self.logger.debug(f'Error refreshing credentials: {exception!s}')\n else:\n # No credentials file, acquire new credentials from secrets file.\n self.logger.debug(\n 'Could not find existing credentials. Requesting new tokens.')\n try:\n appflow = flow.InstalledAppFlow.from_client_secrets_file(\n client_secrets_path, scopes)\n except FileNotFoundError as exception:\n msg = f'Client secrets file not found: {exception!s}'\n self.ModuleError(msg, critical=True)\n\n self.logger.info(\n 'Starting local HTTP server on localhost:8888 for OAUTH flow. '\n 'If running dftimewolf remotely over SSH you will need to tunnel '\n 'port 8888.')\n appflow.run_local_server(host='localhost', port=8888, open_browser=False)\n credentials = appflow.credentials\n\n # Save credentials\n if credentials:\n with open(credentials_path, 'w', encoding='utf-8') as token:\n token.write(credentials.to_json())\n\n return credentials", "def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'bis-python-quickstart.json')\r\n\r\n store = oauth2client.file.Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials", "def validate_auth():\n try:\n token = oidc.get_access_token()\n except TypeError:\n # raised when the token isn't accessible to the oidc lib\n raise Unauthorized(\"missing auth token\")\n\n if not oidc.validate_token(token):\n terminate_session()\n raise Unauthorized(\"invalid auth token\")\n return token", "def valid_credentials(self):\n path = '/api/session-user'\n url = '{}{}'.format(self._url_base, path)\n response, content = super(DSBaseService, self)._request(url,\n headers=self._headers(with_content_type=False))\n return int(response['status']) == 200", "def get_credentials():\n credential_dir = os.path.realpath('.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path) # stores the users credentials --> TODO: put in database\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n\n credentials = tools.run_flow(flow, store, flags)\n\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'reseller-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'clockwise.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatability with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials", "def get_credentials(self):\r\n \r\n try:\r\n import argparse\r\n #flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\r\n if self.noauth == True:\r\n flags = tools.argparser.parse_args(args=['--noauth_local_webserver'])\r\n else:\r\n flags = tools.argparser.parse_args(args=[])\r\n except ImportError:\r\n flags = None \r\n \r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,'sheets.googleapis.com-allstarbot.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n secret = Path(self.CLIENT_SECRET_FILE)\r\n if secret.exists():\r\n flow = client.flow_from_clientsecrets(self.CLIENT_SECRET_FILE, self.SCOPES)\r\n else:\r\n print(\"client_secret.json not found, using env vars\")\r\n if not os.environ.get('client_id') or not os.environ.get('client_secret'): \r\n print(\"env vars client_id and client_secret not found. canceling\")\r\n raise Exception(\"client secret error\")\r\n else:\r\n flow = OAuth2WebServerFlow(\r\n os.environ.get('client_id'),\r\n os.environ.get('client_secret'),\r\n self.SCOPES) \r\n \r\n flow.params['access_type'] = 'offline'\r\n flow.user_agent = self.APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials" ]
[ "0.8314805", "0.67917407", "0.66121364", "0.6458647", "0.644396", "0.6412883", "0.6292584", "0.6262902", "0.6216184", "0.6159604", "0.60996187", "0.60749036", "0.60712475", "0.5996059", "0.5910496", "0.59005594", "0.5890843", "0.5890843", "0.5890843", "0.5890843", "0.5890843", "0.5879354", "0.5873639", "0.5870393", "0.5835825", "0.5814248", "0.5809308", "0.58041394", "0.5768607", "0.575834" ]
0.83706456
1
Read time in a humancompatible format and interpret as ISO format with local timezone. May throw exception if time can't be interpreted. In that case it will also flash a message explaining accepted formats.
def interpret_time( text ): app.logger.debug("Decoding time '{}'".format(text)) time_formats = ["ha", "h:mma", "h:mm a", "H:mm"] try: as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal()) as_arrow = as_arrow.replace(year=2016) #HACK see below app.logger.debug("Succeeded interpreting time") except: app.logger.debug("Failed to interpret time") flask.flash("Time '{}' didn't match accepted formats 13:30 or 1:30pm" .format(text)) raise return as_arrow.isoformat() #HACK #Workaround # isoformat() on raspberry Pi does not work for some dates # far from now. It will fail with an overflow from time stamp out # of range while checking for daylight savings time. Workaround is # to force the date-time combination into the year 2016, which seems to # get the timestamp into a reasonable range. This workaround should be # removed when Arrow or Dateutil.tz is fixed. # FIXME: Remove the workaround when arrow is fixed (but only after testing # on raspberry Pi --- failure is likely due to 32-bit integers on that platform)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interpret_time(text):\n app.logger.debug(\"Decoding time '{}'\".format(text))\n time_formats = [\"ha\", \"h:mma\", \"h:mm a\", \"H:mm\"]\n try:\n as_arrow = arrow.get(text, time_formats).replace(tzinfo=tz.tzlocal())\n as_arrow = as_arrow.replace(year=2016) # HACK see below\n app.logger.debug(\"Succeeded interpreting time\")\n except:\n app.logger.debug(\"Failed to interpret time\")\n flask.flash(\"Time '{}' didn't match accepted formats 13:30 or 1:30pm\"\n .format(text))\n raise\n return as_arrow.isoformat()\n # HACK Workaround\n # isoformat() on raspberry Pi does not work for some dates\n # far from now. It will fail with an overflow from time stamp out\n # of range while checking for daylight savings time. Workaround is\n # to force the date-time combination into the year 2016, which seems to\n # get the timestamp into a reasonable range. This workaround should be\n # removed when Arrow or Dateutil.tz is fixed.\n # FIXME: Remove the workaround when arrow is fixed (but only after testing\n # on rasp Pi failure is likely due to 32-bit integers on that platform)", "def test_parseTimeInvalidFormat(self):\n self.assertRaises(ValueError, imap4.parseTime, u\"invalid\")", "def _parse_time(time_string: str, source: str = \"input\") -> Optional[datetime.datetime]:\n if not time_string:\n return None\n\n format_string = \"%Y-%m-%d\" if source == \"input\" else \"%Y-%m-%dT%H:%M:%SZ\"\n try:\n return datetime.datetime.strptime(time_string, format_string)\n except ValueError:\n raise AnalyzerError(\"Incorrect date format\")", "def _CopyTimeFromStringISO8601(self, time_string):\n if time_string.endswith('Z'):\n time_string = time_string[:-1]\n\n time_string_length = len(time_string)\n\n # The time string should at least contain 'hh'.\n if time_string_length < 2:\n raise ValueError('Time string too short.')\n\n try:\n hours = int(time_string[0:2], 10)\n except ValueError:\n raise ValueError('Unable to parse hours.')\n\n if hours not in range(0, 24):\n raise ValueError('Hours value: {0:d} out of bounds.'.format(hours))\n\n minutes = None\n seconds = None\n microseconds = None\n time_zone_offset = None\n\n time_string_index = 2\n\n # Minutes are either specified as 'hhmm', 'hh:mm' or as a fractional part\n # 'hh[.,]###'.\n if (time_string_index + 1 < time_string_length and\n time_string[time_string_index] not in ('.', ',')):\n if time_string[time_string_index] == ':':\n time_string_index += 1\n\n if time_string_index + 2 > time_string_length:\n raise ValueError('Time string too short.')\n\n try:\n minutes = time_string[time_string_index:time_string_index + 2]\n minutes = int(minutes, 10)\n except ValueError:\n raise ValueError('Unable to parse minutes.')\n\n time_string_index += 2\n\n # Seconds are either specified as 'hhmmss', 'hh:mm:ss' or as a fractional\n # part 'hh:mm[.,]###' or 'hhmm[.,]###'.\n if (time_string_index + 1 < time_string_length and\n time_string[time_string_index] not in ('.', ',')):\n if time_string[time_string_index] == ':':\n time_string_index += 1\n\n if time_string_index + 2 > time_string_length:\n raise ValueError('Time string too short.')\n\n try:\n seconds = time_string[time_string_index:time_string_index + 2]\n seconds = int(seconds, 10)\n except ValueError:\n raise ValueError('Unable to parse day of seconds.')\n\n time_string_index += 2\n\n time_zone_string_index = time_string_index\n while time_zone_string_index < time_string_length:\n if time_string[time_zone_string_index] in ('+', '-'):\n break\n\n time_zone_string_index += 1\n\n # The calculations that follow rely on the time zone string index\n # to point beyond the string in case no time zone offset was defined.\n if time_zone_string_index == time_string_length - 1:\n time_zone_string_index += 1\n\n if (time_string_length > time_string_index and\n time_string[time_string_index] in ('.', ',')):\n time_string_index += 1\n time_fraction_length = time_zone_string_index - time_string_index\n\n try:\n time_fraction = time_string[time_string_index:time_zone_string_index]\n time_fraction = int(time_fraction, 10)\n time_fraction = (\n decimal.Decimal(time_fraction) /\n decimal.Decimal(10 ** time_fraction_length))\n except ValueError:\n raise ValueError('Unable to parse time fraction.')\n\n if minutes is None:\n time_fraction *= 60\n minutes = int(time_fraction)\n time_fraction -= minutes\n\n if seconds is None:\n time_fraction *= 60\n seconds = int(time_fraction)\n time_fraction -= seconds\n\n time_fraction *= definitions.MICROSECONDS_PER_SECOND\n microseconds = int(time_fraction)\n\n if minutes is not None and minutes not in range(0, 60):\n raise ValueError('Minutes value: {0:d} out of bounds.'.format(minutes))\n\n # TODO: support a leap second?\n if seconds is not None and seconds not in range(0, 60):\n raise ValueError('Seconds value: {0:d} out of bounds.'.format(seconds))\n\n if time_zone_string_index < time_string_length:\n if (time_string_length - time_zone_string_index != 6 or\n time_string[time_zone_string_index + 3] != ':'):\n raise ValueError('Invalid time string.')\n\n try:\n hours_from_utc = int(time_string[\n time_zone_string_index + 1:time_zone_string_index + 3])\n except ValueError:\n raise ValueError('Unable to parse time zone hours offset.')\n\n if hours_from_utc not in range(0, 15):\n raise ValueError('Time zone hours offset value out of bounds.')\n\n try:\n minutes_from_utc = int(time_string[\n time_zone_string_index + 4:time_zone_string_index + 6])\n except ValueError:\n raise ValueError('Unable to parse time zone minutes offset.')\n\n if minutes_from_utc not in range(0, 60):\n raise ValueError('Time zone minutes offset value out of bounds.')\n\n # pylint: disable=invalid-unary-operand-type\n time_zone_offset = (hours_from_utc * 60) + minutes_from_utc\n\n if time_string[time_zone_string_index] == '-':\n time_zone_offset = -time_zone_offset\n\n return hours, minutes, seconds, microseconds, time_zone_offset", "def parseTime(string):\t\n \n if string == \"\":\n result = None\n if 'T' in string:\n string = string.replace('T', ' ')\n if 'Z' in string:\n string = string.replace('Z', '') \n\n if len(string) < 19:\n # string has some single digits\n p = \"\"\"^([0-9]{4})-([0-9]{1,2})-([0-9]{1,2}) \n ([0-9]{1,2}):([0-9]{1,2}):([0-9]{1,2}).*$\"\"\"\n s = re.findall(p, string)\n if len(s) > 0:\n string = '{0}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}'\\\n .format(*[int(x) for x in s[0]])\n\n for date_format in DATE_FORMATS:\n try:\n result = datetime.datetime.strptime(string, date_format)\n except ValueError:\n pass\n\n return result", "def parse_time(text):\n try:\n if len(text) == 17:\n date = datetime.datetime.strptime(text, '%Y-%m-%dT%H:%MZ')\n elif len(text) == 20:\n date = datetime.datetime.strptime(text, '%Y-%m-%dT%H:%M:%SZ')\n else:\n date = datetime.datetime.utcnow()\n except Exception as _:\n date = datetime.datetime.utcnow()\n return date", "def test_parse_time_with_invalid_absolute_datetime(self):\n self.assert_TPVE(parse_time, \"\", None)\n self.assert_TPVE(parse_time, \"blahblah\", None)\n # This is detected as a YYYYMMDD string, but it's invalid.\n self.assert_TPVE(parse_time, \"20150231\", None)\n\n # Graphite accepts the following, we don't.\n self.assert_TPVE(parse_time, \"2015_02_01\", None)\n self.assert_TPVE(parse_time, \"12:35 20150201\", None)\n self.assert_TPVE(parse_time, \"12:3520150201\", None)\n self.assert_TPVE(parse_time, \"12/31/99\", None)\n self.assert_TPVE(parse_time, \"6pm today\", None)\n self.assert_TPVE(parse_time, \"noon tomorrow\", None)\n self.assert_TPVE(parse_time, \"january 1\", None)\n self.assert_TPVE(parse_time, \"monday\", None)", "def validate_and_parse_input(time: str):\n if time is None or not re.match(r'^\\d{1,2}:\\d{1,2}$', time):\n return False\n hour, minute = map(int, time.split(r':'))\n if type(hour) != int or type(minute) != int:\n return False\n\n if 0 <= hour < 24 and 0 <= minute < 60:\n hour = hour % 12\n minute = minute\n return hour, minute\n else:\n return False", "def _parse_time_str(self, time_str):\n time_fmt = \"%I:%M%p\"\n time_str = re.sub(\n r\":+\",\n \":\",\n re.sub(r\"\\s+\", \"\", re.sub(r\"to|from|\\.\", \"\", time_str.lower())).replace(\n \"o\", \"0\"\n ),\n )\n if \":\" not in time_str:\n time_fmt = \"%I%p\"\n elif len(time_str) < 6:\n time_fmt = \"%I%p\"\n time_str = time_str.replace(\":\", \"\")\n return datetime.strptime(time_str, time_fmt).time()", "def format_time(self, data):\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_time(data)\r\n\r\n return data.isoformat()", "def parse_time(time: Union[str, datetime]) -> datetime:\n if isinstance(time, str):\n try:\n from ciso8601 import parse_datetime # pylint: disable=wrong-import-position # noqa: F401\n return parse_datetime(time)\n except (ImportError, ValueError): # pragma: no cover\n return dateutil.parser.parse(time)\n\n return time", "def parse_time(time_input, *, force_datetime=False, allow_undefined=False, **kwargs):\n\n if allow_undefined and time_input in [None, '..']:\n return None\n\n if isinstance(time_input, dt.date):\n if force_datetime and not isinstance(time_input, dt.datetime):\n return date_to_datetime(time_input)\n\n if kwargs.get('ignoretz') and isinstance(time_input, dt.datetime):\n return time_input.replace(tzinfo=None)\n\n return time_input\n\n time = dateutil.parser.parse(time_input, **kwargs)\n if force_datetime or len(time_input) > 10: # This check is not very accurate but it works for iso format\n return time\n return time.date()", "def parse_isotime(timestr):\r\n try:\r\n return iso8601.parse_date(timestr)\r\n except iso8601.ParseError as e:\r\n raise ValueError(unicode(e))\r\n except TypeError as e:\r\n raise ValueError(unicode(e))", "def _change_time_format(time_string):\n datetime_object = parser.isoparse(time_string)\n return datetime_object", "def properTimeInput(time_):\r\n if not time_.isdigit() or len(time_) > 4 or len(time_) < 4 or int(time_) > 2400 or int(time_) < 0 or int(time_[2])>5:\r\n print(\"'\",time_, \"' is an invalid input for the time. Use 24 hr format.\\nExamples: 8 a.m = 0800, 1 p.m = 1300, 2:30 = 1430, 12:50 a.m = 0050\\n\")\r\n return False\r\n return True", "def __parse_time(self, time_obj):\n if time_obj:\n resp = ''\n if isinstance(time_obj, int) or isinstance(time_obj, str):\n resp = time_obj\n elif isinstance(time_obj, datetime.datetime):\n resp = calendar.timegm(time_obj.timetuple())\n else:\n raise Exception(\"Unknown __parse_time format for {0}\".format(time_obj))\n return str(resp)\n return None", "def test_parse_no_timezine_strict():\n iso8601.parse_datetime(\"2007-01-01T08:00:00\")", "def try_parsing_date(text):\n for fmt in ('%I %p', '%I %M %p', '%I:%M %p'):\n try:\n return datetime.datetime.strptime(text, fmt)\n except ValueError:pass\n if \":\" in text:\n return datetime.datetime.strptime(text+\" \"+\n (\"AM\" if int(text.split(\":\")[0])>=8 else \"PM\"), '%I:%M %p')\n return datetime.datetime.strptime(text+\" \"+\n (\"AM\" if int(text)>=8 else \"PM\"), '%I %p')", "def read_time(time_string):\n factors = {\n \"n\": 1e-9,\n \"u\": 1e-6,\n \"m\": 1e-3,\n \"s\": 1\n }\n \n # Check that the time string is properly formatted, e. g. time part\n # is followed by the unit part. The string should contain at least two\n # character, otherwise splitting it into two parts will raise an IndexError.\n try:\n number, unit = time_string[:-1], time_string[-1]\n except (IndexError, TypeError):\n raise ValueError(\"Invalid time string given.\")\n\n # If the 'time part' cannot be converted to float, this raises a ValueError.\n number = float(number)\n \n if number < 0:\n raise ValueError(\"Negative time values are not allowed.\")\n \n # Check that a valid time unit was specified. If no unit was specified,\n # then what we call 'unit' will in fact be the last digit of the time value\n # and as we do not use numeric unit symbols, we still get an error.\n try:\n factor = factors[unit]\n except KeyError:\n raise ValueError(\"Invalid time unit given.\")\n\n time = number * factor\n return time", "def test_parse_time(\n test_input: str,\n expected: datetime.time,\n):\n assert tvmaze.parsers.parse_time(test_input) == expected", "def fromisoformat(string):\n string = string.replace(\"T\", \" \")\n if \".\" in string:\n return datetime.strptime(string, \"%Y-%m-%d %H:%M:%S.%f\")\n return datetime.strptime(string, \"%Y-%m-%d %H:%M:%S\")", "def _get_date(str_time, time_formats = [\"%Y-%m-%d %H:%M:%S.%f\", \"%Y-%m-%d %H:%M:%S\"]):\r\n time = None\r\n for time_format in time_formats:\r\n try:\r\n time = datetime.strptime(str_time, time_format)\r\n if time:\r\n break\r\n except:\r\n pass\r\n return time", "def parse_time_str(self, time_str):\n try:\n return datetime.strptime(self.force_hour_two_digits(time_str), TIME_FORMAT).time()\n except ValueError:\n return None", "def fromisoformat(cls, time_string):\n if not isinstance(time_string, str):\n raise TypeError(\"fromisoformat: argument must be str\")\n\n try:\n return cls(*_parse_isoformat_time(time_string))\n except Exception:\n raise ValueError(f\"Invalid isoformat string\")", "def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):\r\n return datetime.datetime.strptime(timestr, fmt)", "def parse_time(time_string):\n return calendar.timegm(time.strptime(time_string, \"%Y%m%dT%H%M%SZ\"))", "def parse_time(s: str):\n return utils.parsers.parse_eng_unit(s, base_unit='s', default=1e-12)", "def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):\n return datetime.datetime.strptime(timestr, fmt)", "def _parse_time(time_string: str) -> datetime:\n\n # Strings with timezone (+01:00) in v2 are not easily parsed. But time\n # zones are not important here, so we just omit them.\n time_string = time_string.rsplit('+')[0]\n\n time_formats = [\n '%Y-%m-%dT%H:%M:%S.%fZ', # Default\n '%Y-%m-%dT%H:%M:%SZ', # Imported UNCCD data\n '%Y-%m-%dT%H:%M:%S.%f', # Stripped timezone format (v2)\n ]\n for t_format in time_formats:\n try:\n return datetime.strptime(time_string, t_format)\n except ValueError:\n continue", "def time_trans(datetime_str):\n\t\tif re.compile(\"(\\d+)-(\\d+)-(\\d+) (\\d+):(\\d+):(\\d+)\").match(datetime_str):\n\t\t\treturn datetime.strptime(datetime_str, \"%Y-%m-%d %H:%M:%S\")" ]
[ "0.67903686", "0.66916007", "0.6613034", "0.6586266", "0.6439515", "0.64221704", "0.6381868", "0.6360631", "0.63137174", "0.6284302", "0.62411284", "0.62141776", "0.61804426", "0.61793673", "0.6178851", "0.6156706", "0.61117786", "0.6111385", "0.6104648", "0.6101974", "0.6100496", "0.6089722", "0.6082327", "0.607483", "0.6044045", "0.60433596", "0.6010412", "0.6008228", "0.6007424", "0.6006593" ]
0.6865028
0
Convert text of date to ISO format used internally, with the local time zone.
def interpret_date( text ): try: as_arrow = arrow.get(text, "MM/DD/YYYY").replace( tzinfo=tz.tzlocal()) except: flask.flash("Date '{}' didn't fit expected format 12/31/2001") raise return as_arrow.isoformat()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def date_to_iso(string):\r\n\r\n # disregard tokenisation, if it's there, to make this an easier conversion for GUTime\r\n string = re.sub(r'<([^~]*)~.+?>', r'\\1 ', string)\r\n\r\n # Defaults\r\n d = None\r\n m = None\r\n y = None\r\n h = None\r\n min = None\r\n s = None\r\n fs = None\r\n zone = None\r\n\r\n # ACE format\r\n match = re.search(r'(\\d\\d\\d\\d\\d\\d\\d\\d:\\d\\d\\d\\d)', re.sub('\\s', '', string))\r\n if match is not None:\r\n d = match.group(1)\r\n d = re.sub(r':', r'T', d)\r\n return d\r\n\r\n # Already in ISO format\r\n match = re.search(r'(\\d\\d\\d\\d-?\\d\\d-?\\d\\d)(-?(T\\d\\d(:?\\d\\d)?(:?\\d\\d)?([+-]\\d{1,4})?))?', re.sub('\\s', '', string))\r\n if match is not None:\r\n d = match.group(1)\r\n d = re.sub(r'-', r'', d)\r\n h = match.group(3)\r\n if h is not None:\r\n h = re.sub(r':', r'', h)\r\n return d + h\r\n else:\r\n return d\r\n\r\n # some pre-processing\r\n match = re.search('T\\d\\d(:?\\d\\d)?(:?\\d\\d)?([+-]\\d{1,4})?', re.sub('\\s', '', string))\r\n if match is not None:\r\n return re.sub(r':', r'', re.sub('\\s', '', string))\r\n\r\n # extract date\r\n if re.search(\r\n r'(\\d\\d?|' + expressions.ORDINAL_WORDS + r'|' + expressions.ORDINAL_NUMS + r')\\s+'\r\n r'(' + expressions.MONTHS + r'|' + expressions.MONTH_ABBRS + r'\\s*\\.?)\\s*,?\\s+(\\d\\d(\\s|\\Z)|\\d{4}\\b)',\r\n string, re.I) is not None:\r\n match = re.search(\r\n r'(\\d\\d?|' + expressions.ORDINAL_WORDS + r'|' + expressions.ORDINAL_NUMS + r')\\s+'\r\n r'(' + expressions.MONTHS + r'|' + expressions.MONTH_ABBRS + r'\\s*\\.?)\\s*,?\\s+(\\d\\d(\\s|\\Z)|\\d{4}\\b)',\r\n string, re.I)\r\n d = ordinal_to_num(match.group(1))\r\n m = month_to_num(match.group(5))\r\n y = match.group(7)\r\n\r\n elif re.search(\r\n r'(' + expressions.MONTHS + r'|' + expressions.MONTH_ABBRS + r'\\s*\\.?)\\s+'\r\n r'(\\d\\d?|' + expressions.ORDINAL_WORDS + r'|' + expressions.ORDINAL_NUMS + r')\\b,?\\s*(\\d\\d(\\s|\\Z)|\\d{4}\\b)',\r\n string, re.I) is not None:\r\n match = re.search(\r\n r'(' + expressions.MONTHS + r'|' + expressions.MONTH_ABBRS + r'\\s*\\.?)\\s+'\r\n r'(\\d\\d?|' + expressions.ORDINAL_WORDS + r'|' + expressions.ORDINAL_NUMS + r')\\b,?\\s*(\\d\\d(\\s|\\Z)|\\d{4}\\b)',\r\n string, re.I)\r\n d = ordinal_to_num(match.group(4))\r\n m = month_to_num(match.group(1))\r\n y = match.group(7)\r\n\r\n elif re.search(r'(\\d\\d\\d\\d)(\\/|\\-)(\\d\\d?)\\2(\\d\\d?)', re.sub('\\s', '', string)) is not None:\r\n match = re.search(r'(\\d\\d\\d\\d)(\\/|\\-)(\\d\\d?)\\2(\\d\\d?)', re.sub('\\s', '', string))\r\n m = match.group(3)\r\n d = match.group(4)\r\n y = match.group(1)\r\n\r\n elif re.search(r'(\\d\\d?)(\\/|\\-|\\.)(\\d\\d?)\\2(\\d\\d(\\d\\d)?)', re.sub('\\s', '', string)) is not None:\r\n match = re.search(r'(\\d\\d?)(\\/|\\-|\\.)(\\d\\d?)\\2(\\d\\d(\\d\\d)?)', re.sub('\\s', '', string))\r\n m = match.group(1)\r\n d = match.group(3)\r\n y = match.group(4)\r\n\r\n if y is not None:\r\n # check for European style date\r\n if 12 < int(m) <= 31 and int(d) <= 12:\r\n new_d = m\r\n m = d\r\n d = new_d\r\n\r\n # check for 2 digit year\r\n y = normalise_two_digit_year(str(y))\r\n\r\n iso = \"%4d%02d%02d\" % (int(y), int(m), int(d))\r\n\r\n else:\r\n iso = \"XXXXXXXX\"\r\n\r\n # Extract time\r\n match = re.search(r'(\\d?\\d):(\\d\\d)(:(\\d\\d)(\\.\\d+)?)?(([AP])\\.?M\\.?)?(([+\\-]\\d+|[A-Z][SD]T|GMT([+\\-]\\d+)?))?',\r\n re.sub('\\s', '', string), re.I)\r\n if match is not None:\r\n h = match.group(1)\r\n min = match.group(2)\r\n s = match.group(4)\r\n fs = match.group(5)\r\n ampm = match.group(7)\r\n zone = match.group(9)\r\n\r\n if ampm is not None and ampm[0].lower() == 'p':\r\n h = str(int(h) + 12)\r\n\r\n if zone is not None:\r\n zm = re.search(r'(GMT)([+\\-]\\d+)', zone)\r\n if zm is not None:\r\n zone = zm.group(2)\r\n elif zone.lower().find('gmt') > -1:\r\n zone = 'Z'\r\n elif re.search(r'([A-Z])([SD])T', zone) is not None:\r\n zm = re.search(r'([A-Z])([SD])T', zone)\r\n # Timezone offsets from GMT\r\n timezones = {\r\n \"R\": 1,\r\n \"E\": -5,\r\n \"C\": -6,\r\n \"M\": -7,\r\n \"P\": -8\r\n }\r\n if zm.group(1).upper() in timezones:\r\n zone = timezones[zm.group(1).upper()]\r\n if zm.group(2).lower() == 'd':\r\n zone += 1\r\n if zone < 0:\r\n zone = '-%02d00' % (-1 * zone)\r\n else:\r\n zone = '+%02d00' % zone\r\n elif re.search(r'(\\d\\d)(\\d\\d)\\s+(h(ou)?rs?|(on\\s+)?\\d\\d?\\/\\d)', string, re.I) is not None:\r\n match = re.search(r'(\\d\\d)(\\d\\d)\\s+(h(ou)?rs?|(on\\s+)?\\d\\d?\\/\\d)', string, re.I)\r\n h = match.group(1)\r\n min = match.group(2)\r\n\r\n if h is not None:\r\n if fs is not None:\r\n fs = re.sub(r'\\.', r'', fs)\r\n iso += 'T%02d%02d%02d.%02d' % (int(h), int(min), int(s), int(fs))\r\n elif s is not None:\r\n iso += 'T%02d%02d%02d' % (int(h), int(min), int(s))\r\n elif min is not None:\r\n iso += 'T%02d%02d' % (int(h), int(min))\r\n\r\n if zone is not None:\r\n iso += zone.lstrip()\r\n\r\n return iso", "def _make_iso_time(time: datetime,\n date: datetime,\n time_zone: pytz.timezone) -> str:\n time_combined = time.replace(year=date.year,\n month=date.month,\n day=date.day)\n return time_zone.localize(time_combined).isoformat()", "def convertFromISODate(date):\n if date:\n try:\n datetime_object = datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%fZ')\n except ValueError:\n return date\n else:\n return datetime_object.strftime('%Y-%m-%d')\n else:\n return None", "def format_date(self, data):\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_date(data)\r\n\r\n return data.isoformat()", "def convert_date(iso_string): ## ##\n d = datetime.strptime(iso_string, \"%Y-%m-%dT%H:%M:%S%z\") ##\n return d.strftime(\"%A %d %B %Y\") ##", "def interpret_date(text):\n try:\n as_arrow = arrow.get(text, \"MM/DD/YYYY\").replace(\n tzinfo=tz.tzlocal())\n except:\n flask.flash(\"Date '{}' didn't fit expected format 12/31/2001\")\n raise\n return as_arrow.isoformat()", "def datetime_to_isoformat(obj: datetime.datetime) -> str:\n return obj.replace(tzinfo=datetime.timezone.utc).isoformat().replace(\"+00:00\", \"Z\")", "def isoformat(dt):\n return dt.isoformat().replace(\"+00:00\", \"Z\")", "def format_iso(dt, default_tzinfo=local_timezone):\n dt = dt if dt.tzinfo else dt.replace(tzinfo=default_tzinfo)\n return dt.astimezone(utc_timezone).replace(tzinfo=None).isoformat()+'Z'", "def iso_date(self, t=None):\n if t is None:\n t = time.time()\n time_str = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(t))\n\n return time_str", "def convert_from_iso(s):\n # TODO: Allow for more timezones than just -6 GMT\n return datetime.datetime.strptime(s, \"%Y-%m-%dT%H:%M:%S-06:00\")", "def to_iso(dt):\n return dt.strftime(ISO_FORMAT)", "def format_datetime(self, data):\r\n data = make_naive(data)\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_datetime(data)\r\n\r\n return data.isoformat()", "def date_string_to_iso(string):\n date=None\n if string is not None:\n try:\n #if separator is \"-\"\n if \"-\" in string:\n strings=string.split(\"-\")\n else:\n strings=string.split(\"/\")\n\n #~ print \"strings\"\n #~ print strings\n \n #if year is first\n if len(strings[0])==4:\n year, month, day=strings[0], strings[1], strings[2]\n #if year is last\n else:\n #the year must be coded on 4 digits\n year, month, day=strings[2], strings[1], strings[0]\n date=date_split_to_iso(year, month, day)\n except Exception, e:\n print \"pb\", string\n print \"wrong date format\", e\n\n #return None if date string is None\n return date", "def date_to_str(obj: \"date\") -> str:\n return obj.isoformat()", "def convert_date(iso_string):\n d = datetime.strptime(iso_string, \"%Y-%m-%dT%H:%M:%S%z\")\n return d.strftime('%I:%M %p %A %d %B %Y')", "def date_to_iso8601(date):\n dateTimeStr = date.strftime('%Y-%m-%dT%H:%M:%S')\n timeZone_Sign = date.strftime('%z')[0:1]\n timeZone_Str = '%s:%s' % (\n date.strftime('%z')[1:3], date.strftime('%z')[3:5]\n )\n return '{dateTimeStr}{tzsign}{timezone}'.format(\n dateTimeStr=dateTimeStr,\n tzsign=timeZone_Sign,\n timezone=timeZone_Str\n ).replace(':', '%3A').replace('+', '%2B')", "def convert_date(iso_string):\n d = datetime.strptime(iso_string, \"%Y-%m-%dT%H:%M:%S%z\")\n return d.strftime(\"%A %d %B %Y\")", "def format_datestr(v):\n return v.isoformat() + 'Z'", "def convert_date(iso_string):\n d = datetime.strptime(iso_string, \"%Y-%m-%dT%H:%M:%S%z\")\n return d.strftime('%A %d %B %Y')", "def convert_date(iso_string):\n d = datetime.strptime(iso_string, \"%Y-%m-%dT%H:%M:%S%z\")\n return d.strftime('%A %d %B %Y')", "def stamp2iso(string):\n return str(datetime.fromtimestamp(int(string)).strftime(\"%Y-%m-%dT%H:%M:%S\"))", "def toisostring(dt):\n return dt.format(ISOFORMAT) + 'Z'", "def parse_iso8601(self, date_str, tz=None):\n date = iso8601.parse_date(date_str, default_timezone=None)\n if date.tzinfo:\n return date\n else:\n local_tz = pytz.timezone(tz) if tz else pytz.timezone(\n self.default_tz)\n return local_tz.localize(date)", "def numeric_date_recover(self):\n \n sp_time_zone, current_datetime = self.setup_datetime() \n converter2sptimezone = current_datetime.astimezone(sp_time_zone)\n \n return converter2sptimezone.strftime('%d-%m-%Y')", "def _convert_to_isoformat(date_time):\n if not date_time:\n return None\n if date_time[-1] == \"Z\":\n delta = 0\n timestamp = date_time[:-1]\n else:\n timestamp = date_time[:-6]\n sign, offset = date_time[-6], date_time[-5:]\n delta = int(sign + offset[:1]) * 60 + int(sign + offset[-2:])\n\n check_decimal = timestamp.split(\".\")\n if len(check_decimal) > 1:\n decimal_str = \"\"\n for digit in check_decimal[1]:\n if digit.isdigit():\n decimal_str += digit\n else:\n break\n if len(decimal_str) > 6:\n timestamp = timestamp.replace(decimal_str, decimal_str[0:6])\n\n if delta == 0:\n tzinfo = TZ_UTC\n else:\n tzinfo = timezone(datetime.timedelta(minutes=delta))\n\n try:\n deserialized = datetime.datetime.strptime(timestamp, \"%Y-%m-%dT%H:%M:%S.%f\")\n except ValueError:\n deserialized = datetime.datetime.strptime(timestamp, \"%Y-%m-%dT%H:%M:%S\")\n\n deserialized = deserialized.replace(tzinfo=tzinfo)\n return deserialized", "def to_iso_datetime(value: Union[datetime.datetime, datetime.time]) -> str:\n retval = value.isoformat()\n if value.tzinfo is None:\n retval += 'Z'\n else:\n # replace +00:00 timezone with Z\n retval = re.sub('[+-]00:00$', 'Z', retval)\n return retval", "def format_date(d):\n if type(d) == str:\n d = dateutil_parse(d)\n return d.isoformat()", "def american_date_to_iso(connection):\n _update_date_by_regexp(connection=connection,\n regexp=\"^[0-9]{2}/[0-9]{2}/[0-9]{4}$\",\n new_value=\"\"\"CONCAT_WS('-',\n SUBSTR(cav.attribute_value, 7, 4),\n SUBSTR(cav.attribute_value, 1, 2),\n SUBSTR(cav.attribute_value, 4, 2))\n \"\"\")", "def to_isoformat(self) -> str:\n return self.isoformat()" ]
[ "0.6905238", "0.65572876", "0.6553589", "0.64506024", "0.64370036", "0.6369056", "0.6347456", "0.62363726", "0.6200606", "0.6184233", "0.6160375", "0.6148745", "0.61423504", "0.6141841", "0.6125747", "0.60974556", "0.606667", "0.60619223", "0.6043991", "0.60352457", "0.60352457", "0.598807", "0.59829974", "0.5967835", "0.59618247", "0.5956251", "0.5947979", "0.5935931", "0.59106636", "0.5909712" ]
0.6582443
1
Given a google 'service' object, return a list of calendars. Each calendar is represented by a dict. The returned list is sorted to have the primary calendar first, and selected (that is, displayed in Google Calendars web app) calendars before unselected calendars.
def list_calendars(service): app.logger.debug("Entering list_calendars") calendar_list = service.calendarList().list().execute()["items"] result = [ ] for cal in calendar_list: kind = cal["kind"] id = cal["id"] if "description" in cal: desc = cal["description"] else: desc = "(no description)" summary = cal["summary"] # Optional binary attributes with False as default selected = ("selected" in cal) and cal["selected"] primary = ("primary" in cal) and cal["primary"] result.append( { "kind": kind, "id": id, "summary": summary, "selected": selected, "primary": primary }) return sorted(result, key=cal_sort_key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_calendars(service):\n app.logger.debug(\"Entering list_calendars with service\")\n calendar_list = service.calendarList().list().execute()[\"items\"]\n app.logger.debug(\"Got calendar list\")\n result = []\n for cal in calendar_list:\n kind = cal[\"kind\"]\n id = cal[\"id\"]\n if \"description\" in cal:\n desc = cal[\"description\"]\n else:\n desc = \"(no description)\"\n summary = cal[\"summary\"]\n # Optional binary attributes with False as default\n selected = (\"selected\" in cal) and cal[\"selected\"]\n primary = (\"primary\" in cal) and cal[\"primary\"]\n\n result.append(\n {\"kind\": kind, \"id\": id, \"summary\": summary, \"selected\": selected,\n \"primary\": primary})\n app.logger.debug(\"About to return from list_calendars with: \", result)\n return sorted(result, key=cal_sort_key)", "def getUsrCals(self, service):\n return self.service.calendarList().list().execute()", "def func_calendar_list():\r\n creds = None\r\n global page_token\r\n #global new_calendar_list=[]\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('token.pickle'):\r\n with open('token.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'credentials.json', SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('token.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n\r\n service = build('calendar', 'v3', credentials=creds)\r\n\r\n calendar_list = service.calendarList().list(pageToken=page_token).execute()\r\n new_calendar_list = []\r\n for calendar_list_entry in calendar_list['items']:\r\n new_calendar_list.append(calendar_list_entry['summary'])\r\n page_token = calendar_list.get('nextPageToken')\r\n return (new_calendar_list)", "def calendars(self):\r\n return c.Calendars(self)", "def calendars(self):\r\n return c.Calendars(self)", "def get_calendar(gtfs_info):\n # Parse calendar\n use_cols = ['service_id', 'weekdays', 'start_date', 'end_date']\n calendar = gtfs_info.drop_duplicates(subset=use_cols)\n calendar = calendar[use_cols].copy()\n calendar = calendar.reset_index(drop=True)\n\n # Container for final results\n gtfs_calendar = pd.DataFrame()\n\n # Parse weekday columns\n for idx, row in calendar.iterrows():\n # Get dayinfo\n dayinfo = row['weekdays']\n\n # Parse day information\n dayrow = parse_day_range(dayinfo)\n\n # Add service and operation range info\n dayrow['service_id'] = row['service_id']\n dayrow['start_date'] = row['start_date']\n dayrow['end_date'] = row['end_date']\n\n # Add to container\n gtfs_calendar = gtfs_calendar.append(dayrow, ignore_index=True, sort=False)\n\n # Fix column order\n col_order = ['service_id', 'monday', 'tuesday', 'wednesday',\n 'thursday', 'friday', 'saturday', 'sunday',\n 'start_date', 'end_date']\n gtfs_calendar = gtfs_calendar[col_order].copy()\n\n # Ensure correct datatypes\n int_types = ['monday', 'tuesday', 'wednesday',\n 'thursday', 'friday', 'saturday', 'sunday']\n for col in int_types:\n gtfs_calendar[col] = gtfs_calendar[col].astype(int)\n\n return gtfs_calendar", "def calendar_choices(self):\n if not self._calendars:\n if self.authenticated:\n default = self.account.schedule().get_default_calendar()\n # {\n # \"default\" : <DEFAULT_CALENDAR>,\n # \"<CALENDAR_NAME>: <CALENDAR>,\n # ...\n # }\n self._calendars = {\n DEFAULT_CALENDAR: default,\n **{\n c.name: c\n for c in self.account.schedule().list_calendars() if c.name != default.name\n }\n }\n\n return self._calendars", "def list_calendars(self, limit=None, *, query=None, order_by=None):\n url = self.build_url(self._endpoints.get('root_calendars'))\n\n params = {}\n if limit:\n params['$top'] = limit\n if query:\n params['$filter'] = str(query)\n if order_by:\n params['$orderby'] = order_by\n\n response = self.con.get(url, params=params or None)\n if not response:\n return []\n\n data = response.json()\n\n # Everything received from cloud must be passed as self._cloud_data_key\n contacts = [self.calendar_constructor(parent=self, **{\n self._cloud_data_key: x}) for x in data.get('value', [])]\n\n return contacts", "def calendars(self):\n return self.calendar_home_set.calendars()", "def calendar_list(self, calendar_id):\r\n return CalendarList(self, calendar_id)", "def calendars(self):\n cals = []\n\n data = self.children(cdav.Calendar.tag)\n for c_url, c_type, c_name in data:\n try:\n cal_id = c_url.split(\"/\")[-2]\n except:\n log.error(f\"Calendar {c_name} has unexpected url {c_url}\")\n cal_id = None\n cals.append(\n Calendar(self.client, id=cal_id, url=c_url, parent=self, name=c_name)\n )\n\n return cals", "def get_events():\n\n all_calendar_events = {}\n\n # Suppress warning in logs\n # https://github.com/googleapis/google-api-python-client/issues/299\n service = build('calendar', 'v3', credentials=google_auth.creds, cache_discovery=False)\n\n now = datetime.datetime.utcnow().today().isoformat() + 'Z' # 'Z' indicates UTC time\n\n for calendar_name, calendar_id in config.GOOGLE_CALENDARS.items():\n all_events = []\n events_result = service.events().list(calendarId=calendar_id, timeMin=now,\n maxResults=10, singleEvents=True, orderBy='startTime').execute()\n events = events_result.get('items', [])\n if not events:\n all_events.append(['Ei tulevia tapahtumia'])\n for event in events:\n start = event['start'].get('dateTime', event['start'].get('date'))[:10]\n all_events.append([start, event[\"summary\"], event[\"htmlLink\"]])\n all_calendar_events[calendar_name] = all_events\n\n return all_calendar_events", "def calendars(self):\n if \"calendars\" in self._prop_dict:\n return CalendarsCollectionPage(self._prop_dict[\"calendars\"])\n else:\n return None", "def calendars(self):\n return self.properties.get('calendars',\n EntityCollection(self.context, Calendar,\n ResourcePath(\"calendars\", self.resource_path)))", "def get_gcal_service(credentials):\n app.logger.debug(\"Entering get_gcal_service\")\n http_auth = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http_auth)\n plusService = discovery.build('plus', 'v1', http=http_auth)\n app.logger.debug(\"Returning service\")\n return [service, plusService]", "def get_gcal_events(service, from_time):\n\n # The list() method returns a dict containing various metadata along with the actual calendar entries (if any). \n # It is not guaranteed to return all available events in a single call, and so may need called multiple times\n # until it indicates no more events are available, signalled by the absence of \"nextPageToken\" in the result dict\n\n logger.debug('Retrieving Google Calendar events')\n\n # make an initial call, if this returns all events we don't need to do anything else,,,\n eventsResult = service.events().list(calendarId=CALENDAR_ID, \n timeMin=from_time, \n singleEvents=True, \n orderBy='startTime', \n showDeleted=True).execute()\n\n events = eventsResult.get('items', [])\n # if nextPageToken is NOT in the dict, this should be everything\n if 'nextPageToken' not in eventsResult:\n logger.info('> Found {:d} upcoming events in Google Calendar (single page)'.format(len(events)))\n return events\n\n # otherwise keep calling the method, passing back the nextPageToken each time\n while 'nextPageToken' in eventsResult:\n token = eventsResult['nextPageToken']\n eventsResult = service.events().list(calendarId=CALENDAR_ID, \n timeMin=from_time, \n pageToken=token, \n singleEvents=True, \n orderBy='startTime', \n showDeleted=True).execute()\n newevents = eventsResult.get('items', [])\n events.extend(newevents)\n logger.debug('> Found {:d} events on new page, {:d} total'.format(len(newevents), len(events)))\n \n logger.info('> Found {:d} upcoming events in Google Calendar (multi page)'.format(len(events)))\n return events", "def get_cal_events(user, calservice):\r\n cal_page_token = None\r\n while True:\r\n try:\r\n #the next for loop retrives the calendar events\r\n #list to be checked for matching criteria\r\n prieml = user['primaryEmail']\r\n creator_to_del = '[email protected]'\r\n event_to_del = 'Digital Directorate Team Meeting'\r\n events = calservice.events().list(calendarId=prieml,\r\n pageToken=cal_page_token).execute()\r\n for event in events['items']:\r\n if event['status'] != 'cancelled':\r\n try:\r\n #this is the criteri to be checked against\r\n organiser = event['organizer']['email']\r\n summary = event['summary']\r\n if organiser == creator_to_del \\\r\n and summary == event_to_del:\r\n try:\r\n #checking for specific start date \r\n #in the event some events have different\r\n #dateTime\\date keywords\r\n if event['start']['dateTime']:\r\n evdate = event['start']['dateTime']\r\n startDate = datetime.strptime(evdate[0:10],\r\n '%Y-%m-%d')\r\n today = datetime.today()\r\n if startDate > today:\r\n print('{0} ({1}) {2} {3}'.format(prieml,\r\n event['summary'],\r\n event['organizer']['email'],\r\n evdate[0:10]))\r\n except KeyError:\r\n #if the keyword is not dateTime \r\n #then fetch date keyword\r\n evdate = event['start']['date']\r\n startDate = datetime.strptime(evdate, '%Y-%m-%d')\r\n today = datetime.today()\r\n if startDate > today:\r\n print('{0} ({1}) {2} {3}'.format(prieml,\r\n event['summary'],\r\n event['organizer']['email'],\r\n evdate))\r\n except KeyError:\r\n continue\r\n cal_page_token = events.get('nextPageToken')\r\n if not cal_page_token:\r\n break\r\n except ValueError:\r\n print('Oops! Thhe last event has an error. Try again...')", "def getAllCampaigns(service):\n # Using AWQL to retrieve campaigns.\n query = (adwords.ServiceQueryBuilder()\n .Select('Id', 'Name', 'Status', 'StartDate', 'EndDate',\n 'BudgetId', 'BudgetStatus', 'BudgetName', 'Amount',\n 'BudgetReferenceCount', 'IsBudgetExplicitlyShared')\n .Limit(0, pageSize)\n .Build())\n campaigns = []\n for page in query.Pager(service):\n if page['entries']:\n for campaign in page['entries']:\n campaigns.append(campaign)\n else:\n pass\n return campaigns", "def calendar_lists(self):\r\n return CalendarLists(self)", "def choose_calendar(self):\n page_token = None\n self.calendar_list = self.service.calendarList().list(pageToken=page_token).execute()\n for calendar_list_entry in self.calendar_list['items']:\n if similar(calendar_list_entry['summary'], self.args[\"calendar_name\"]) > 0.8:\n self.chosen_calendar = calendar_list_entry['id']\n return\n raise CalendarNotFoundException(\"No calendar with the provided name was found\")", "def calendar(self, calendar_id):\r\n return c.Calendar(self, calendar_id)", "def calendar(self, calendar_id):\r\n return c.Calendar(self, calendar_id)", "def readGoogleCal(self):\r\n creds = None\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('token.pickle'):\r\n with open('token.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'credentials.json', SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('token.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n\r\n service = build('calendar', 'v3', credentials=creds)\r\n\r\n # Call the Calendar API\r\n now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time\r\n print('Getting the upcoming 10 events')\r\n events_result = service.events().list(calendarId='primary', timeMin=now,\r\n maxResults=10, singleEvents=True,\r\n orderBy='startTime').execute()\r\n events = events_result.get('items', [])\r\n\r\n if not events:\r\n print('No upcoming events found.')\r\n\r\n for event in events:\r\n start = event['start'].get('dateTime', event['start'].get('date'))\r\n\r\n dateVar, timeVar = start.split('T')\r\n eventVar = event['summary']\r\n\r\n self.calDate.append(dateVar)\r\n self.calTime.append(timeVar)\r\n self.calEvent.append(eventVar)\r\n #print(calDate[count]+' ' + calTime[count] + ' ' +calEvent[count])\r", "def get_gcal_service(credentials):\n app.logger.debug(\"Entering get_gcal_service\")\n http_auth = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http_auth)\n app.logger.debug(\"Returning service\")\n return service", "def get_gcal_service(credentials):\n app.logger.debug(\"Entering get_gcal_service\")\n http_auth = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http_auth)\n app.logger.debug(\"Returning service\")\n return service", "def _get_service_list(self, service_name):\n service_list = self.service_dict[service_name]\n\n return service_list", "def list_services(service='http://arcgis.inei.gob.pe:6080/arcgis/rest/services'):\n all_services = []\n r = _post(service)\n for s in r['services']:\n all_services.append('/'.join([service, s['name'], s['type']]))\n for s in r['folders']:\n new = '/'.join([service, s])\n endpt = _post(new)\n for serv in endpt['services']:\n all_services.append('/'.join([service, serv['name'], serv['type']]))\n return all_services", "def main():\r\n creds = None\r\n # The file token.json stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('cal_token.json'):\r\n creds = Credentials.from_authorized_user_file('cal_token.json', SCOPES)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'client_secret.json', SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('cal_token.json', 'w') as token:\r\n token.write(creds.to_json())\r\n\r\n service = build('calendar', 'v3', credentials=creds)\r\n\r\n return service", "def get_calendar_events(calendar_url, params=None):\n return cache_calendar_events(calendar_url, params=params)\n # return CALENDAR_CACHED or cache_calendar(calendar_url)", "def selectable_services():\n\n db = current.db\n s3db = current.s3db\n\n stable = s3db.org_service\n query = (stable.deleted == False)\n rows = db(query).select(stable.id,\n stable.name,\n )\n services = {row.id: row.name for row in rows}\n return services" ]
[ "0.82477534", "0.6850524", "0.6471131", "0.63463163", "0.63463163", "0.63192886", "0.62622386", "0.6232313", "0.61395335", "0.61257684", "0.6111868", "0.61005104", "0.603974", "0.59506303", "0.5812229", "0.5798276", "0.56924987", "0.5668386", "0.5658008", "0.55682445", "0.5539597", "0.5539597", "0.55091655", "0.5496159", "0.5496159", "0.5475961", "0.5406943", "0.53625625", "0.53276706", "0.5286893" ]
0.8079838
1
A helper method that generates a dictionary of arguments needed to instantiate a BaseBoto object. The purpose of this method is to abstract out the code to handle optional CLI arguments and not duplicate the None handling code.
def __get_arguments(args=None, logger=None, stats=None): if not args: parser = get_parser() add_boto_cli_arguments(parser) # Parse only the known arguments added by add_boto_cli_arguments(). # We only need those arguments to create Boto object, nothing else. # parse_known_args() return (Namespace, list of unknown arguments), # we only care about the Namespace object here. args = parser.parse_known_args()[0] if not logger: logger = get_logger(name=NAME) if not stats: stats = get_stats(prefix=NAME) return { 'log_level': getattr(args, 'boto_log_level', DEFAULT['log_level']()), 'access_key': getattr(args, 'boto_access_key', DEFAULT['access_key']()), 'secret_key': getattr(args, 'boto_secret_key', DEFAULT['secret_key']()), 'region': getattr(args, 'boto_region', DEFAULT['region']()), 'logger': logger, 'stats': stats, }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_argdict(cls, toolchain, args):\n return {} # Empty must be overloaded (if required)", "def GetArgs():\n \n UserArgs = {}\n UserArgs['help'] = False\n UserArgs['RsodFileName'] = \"\"\n UserArgs['BiosPathX64'] = \"\"\n\n for i in range(1,len(sys.argv)):\n if sys.argv[i].lower() == \"-help\" : UserArgs[\"help\"] = True\n elif sys.argv[i].lower() == \"-h\" : UserArgs[\"help\"] = True\n elif \"-rsodfile=\" in sys.argv[i].lower() : UserArgs['RsodFileName'] = sys.argv[i].split ('=', 1)[1]\n elif \"-biospathx64=\" in sys.argv[i].lower() : UserArgs['BiosPathX64'] = sys.argv[i].split ('=', 1)[1]\n\n return UserArgs", "def parse_generate_arguments(arguments):\n return_value = {}\n for key in arguments:\n return_value[key] = CONFIG_KEY_PARSER[key](arguments[key])\n\n return return_value", "def _build_instance_common_args(self, ec2_keyname, availability_zone,\r\n keep_alive, hadoop_version):\r\n params = {\r\n 'Instances.KeepJobFlowAliveWhenNoSteps' : str(keep_alive).lower(),\r\n 'Instances.HadoopVersion' : hadoop_version\r\n }\r\n\r\n if ec2_keyname:\r\n params['Instances.Ec2KeyName'] = ec2_keyname\r\n if availability_zone:\r\n params['Instances.Placement.AvailabilityZone'] = availability_zone\r\n\r\n return params", "def __make_params(args):\n data = {}\n for i in range(len(args)):\n if i == 0: # saltando a primeira iteracao pra\n # saltar o parametro que é o nome do arquivo de execução\n continue\n if not i % 2 == 0:\n data[args[i]] = args[i + 1]\n return data", "def get_args_dict(class_, options: Options) -> dict:\n\n argspec = getfullargspec(class_.__init__)\n init_args = argspec.args\n init_args.pop(0) # self\n result = {k: v for k, v in options.items() if k in init_args}\n\n positional_args = init_args[:-len(argspec.defaults)]\n\n missing_args = [a for a in positional_args if a not in options]\n if missing_args:\n raise BadConfigError(\n f'Some required parameters are missing in \"{options[\"name\"]}\" config: ' +\n ', '.join(missing_args)\n )\n return result", "def generate_command_args_with_additional_fields(additional_fields):\n command_args: Dict[str, str] = {}\n actual_additional_fields: Dict[str, str] = {}\n for each_field in additional_fields:\n if each_field in DEFAULT_ARGS:\n command_args[each_field] = additional_fields[each_field]\n else:\n actual_additional_fields[each_field] = additional_fields[each_field]\n command_args[\"additional_fields\"] = remove_null_fields_and_convert_additional_fields_in_string(\n actual_additional_fields)\n return command_args", "def _get_init_args(self):\n\n return dict(enum=self.enum, dflt=self._defname,\n base=self.base, shape=self.shape)", "def _arg_parse(self, **options) -> Dict[str, Any]:\n extra_options = dict()\n for key, value in options.items():\n private_key = f\"__{key}\"\n if hasattr(self, private_key):\n setattr(self, private_key, value)\n else:\n extra_options[key] = value\n\n return extra_options", "def _getArgs():\n parser = getCommonArgsParser(\n 'Generate OpenShift deployment YAML file'\n )\n\n addArgOverlayUuid(parser)\n addArgOutputFile(parser, None)\n\n return parser.parse_args()", "def add_extra_args(self):\n super(AwsCreateInstancesMethod, self).add_extra_args()\n self.parser.add_argument(\"--key_pair_name\", default=os.environ.get(\"YB_EC2_KEY_PAIR_NAME\"),\n help=\"AWS Key Pair name\")\n self.parser.add_argument(\"--security_group_id\", default=None,\n help=\"AWS comma delimited security group IDs.\")\n self.parser.add_argument(\"--volume_type\", choices=[\"gp3\", \"gp2\", \"io1\"], default=\"gp2\",\n help=\"Volume type for volumes on EBS-backed instances.\")\n self.parser.add_argument(\"--spot_price\", default=None,\n help=\"Spot price for each instance (if desired)\")\n self.parser.add_argument(\"--cmk_res_name\", help=\"CMK arn to enable encrypted EBS volumes.\")\n self.parser.add_argument(\"--iam_profile_arn\", help=\"ARN string for IAM instance profile\")\n self.parser.add_argument(\"--disk_iops\", type=int, default=1000,\n help=\"desired iops for aws v4 instance volumes\")\n self.parser.add_argument(\"--disk_throughput\", type=int, default=125,\n help=\"desired throughput for aws gp3 instance volumes\")", "def get_kwargs():\n\treturn get_kwargs_raw(sys.argv)", "def _generate_options(self, **kwargs: Any) -> dict:\n raise NotImplementedError", "def _arg2kw(self, mixed_args):\n def insert(dict_, k, v):\n if k in dict_:\n print \"duplicated args : %s \" % kv[0]\n raise ArgParseError\n dict_[k] = v\n \n opts = []\n args = {}\n\n n = len(mixed_args)\n i = 0\n while i < n:\n a = mixed_args[i]\n if a == '-' or a == '--' :\n opts.append(a)\n elif a.startswith(\"---\"):\n print \"invalid args: %s\" % mixed_args\n print \"only the following formats are supported:\"\n print \" arg1\"\n print \" --input=name1\"\n print \" --output name3\"\n print \" -oname2\"\n print \" -o name4\"\n raise ArgParseError\n elif a.startswith(\"--\"):\n kv = a[2:].split(\"=\", 1)\n if len(kv) == 2:\n insert(args, kv[0], kv[1])\n else:\n i += 1\n insert(args, kv[0], mixed_args[i])\n elif a.startswith(\"-\"):\n if len(a) > 2:\n insert(args, a[1], a[2:])\n else:\n i += 1\n insert(args, a[1], mixed_args[i])\n else:\n opts.append(a)\n i += 1\n \n return opts, args", "def as_kwargs(self) -> Dict[str, Any]:\n ret = {}\n for arg in self.args.values():\n ret[arg.name] = arg.value\n return ret", "def get_defaults(self):\n default_dict = {}\n args, varargs, keyword, defaults = inspect.getargspec(self.exec_obj)\n if defaults:\n default_dict = dict(zip(args[-len(defaults):], defaults))\n return default_dict", "def _get_args(self):\n parser = ArgumentParser(\n description=\"Dynamically generates Snakefiles for data \"\n \"integration and machine learning pipelines.\"\n )\n\n parser.add_argument(\n \"-c\",\n \"--config\",\n help=(\n \"Configuration filepath. (Will look for file named config.yml \"\n \"in current working directory, if none specified.)\"\n ),\n )\n\n parser.add_argument(\n \"-r\",\n \"--run\",\n default=False,\n help=(\n \"Runs pipeline, in addition to generating Snakefile.\"\n ),\n )\n\n # convert command-line args to a dict and return\n args = parser.parse_args()\n\n args = dict(\n (k, v) for k, v in list(vars(args).items()) if v is not None\n )\n\n return args", "def getOptions( argv ):\n opts = {} # Empty dictionary to store key-value pairs.\n while argv: # While there are arguments left to parse...\n if argv[0][0] == '-': # Found a \"-name value\" pair.\n opts[argv[0][1:]] = argv[1] # Add key and value to the dictionary.\n argv = argv[1:] # Reduce the argument list by copying it starting from index 1.\n if 'seed' in opts:\n cons.random_seed = int( opts['seed'] )\n if 'N' in opts:\n cons.N = int( opts['N'] )\n if 'MCMC' in opts:\n bcons.B = int( opts['MCMC'] )\n if 'minPop' in opts:\n bcons.min_pop_size = int( opts['minPop'] )\n if 'maxParents' in opts:\n bcons.max_parents = int( opts['maxParents'] )\n if 'localPopSize' in opts:\n bcons.A = int( opts['localPopSize'] )\n if 'binaryBOA' in opts:\n bcons.binary_coding = True if opts['binaryBOA'].lower() == 'true' else False\n if 'useBOA' in opts:\n # True by default\n if opts['useBOA'].lower() == 'true':\n cons.is_boa = True\n else:\n cons.is_boa = False\n return opts", "def args_map_custom(cls) -> dict:\n args = {}\n args.update(cls.args_map_export())\n args.update({\"json_flat\": False})\n return args", "def _template_kwargs(*, logical_name: str, bucket: str, key: str) -> Dict[str, str]:\n if logical_name == \"ArtifactBuilder\":\n return dict(ArtifactBucketName=bucket, WorkersS3Key=key)\n elif logical_name == \"LayerBuilder\":\n return dict(ReplicationBucket=bucket, WorkersS3Key=key)\n else:\n raise ValueError(f\"Unknown logical name: {logical_name}\")", "def get_args():\n\n params = {}\n\n if len(argv) == 1:\n\n input_file = input('Please enter the path to the parameter file: ')\n\n else:\n\n input_file = argv[1]\n\n if path.isfile(input_file) == False:\n\n print('ERROR: Cannot find input parameter file')\n exit()\n\n flines = open(input_file,'r').readlines()\n\n str_keys = ['catalog_file', 'red_dir',\n 'target_ra', 'target_dec',\n 'star_class', 'isochrone_file',\n 'target_lc_file_g', 'target_lc_file_r', 'target_lc_file_i']\n\n for line in flines:\n\n (key, value) = line.replace('\\n','').split()\n\n if key in str_keys:\n\n params[key] = value\n\n else:\n\n if 'none' not in str(value).lower():\n params[key] = float(value)\n else:\n params[key] = None\n\n return params", "def oic_pre_construct(self, cli_info, request_args=None, **kwargs):\n for prop in self.msg_type.c_param.keys():\n if prop in request_args:\n continue\n try:\n request_args[prop] = cli_info.behaviour[prop]\n except KeyError:\n pass\n\n if \"post_logout_redirect_uris\" not in request_args:\n try:\n request_args[\n \"post_logout_redirect_uris\"] = \\\n cli_info.post_logout_redirect_uris\n except AttributeError:\n pass\n\n if \"redirect_uris\" not in request_args:\n try:\n request_args[\"redirect_uris\"] = cli_info.redirect_uris\n except AttributeError:\n raise MissingRequiredAttribute(\"redirect_uris\", request_args)\n\n try:\n if cli_info.provider_info[\n 'require_request_uri_registration'] is True:\n request_args['request_uris'] = cli_info.generate_request_uris(\n cli_info.requests_dir)\n except KeyError:\n pass\n\n return request_args, {}", "def _parse_create_args(self, args):\r\n data = {\r\n \"hourly\": args['--hourly'],\r\n \"cpus\": args['--cpu'],\r\n \"domain\": args['--domain'],\r\n \"hostname\": args['--hostname'],\r\n \"private\": args['--private'],\r\n \"dedicated\": args['--dedicated'],\r\n \"disks\": args['--disk'],\r\n \"local_disk\": not args['--san'],\r\n }\r\n\r\n try:\r\n memory = int(args['--memory'])\r\n if memory < 1024:\r\n memory = memory * 1024\r\n except ValueError:\r\n unit = args['--memory'][-1]\r\n memory = int(args['--memory'][0:-1])\r\n if unit in ['G', 'g']:\r\n memory = memory * 1024\r\n if unit in ['T', 'r']:\r\n memory = memory * 1024 * 1024\r\n\r\n data[\"memory\"] = memory\r\n\r\n if args['--monthly']:\r\n data['hourly'] = False\r\n\r\n if args.get('--os'):\r\n data['os_code'] = args['--os']\r\n\r\n if args.get('--image'):\r\n data['image_id'] = args['--image']\r\n\r\n if args.get('--datacenter'):\r\n data['datacenter'] = args['--datacenter']\r\n\r\n if args.get('--network'):\r\n data['nic_speed'] = args.get('--network')\r\n\r\n if args.get('--userdata'):\r\n data['userdata'] = args['--userdata']\r\n elif args.get('--userfile'):\r\n with open(args['--userfile'], 'r') as userfile:\r\n data['userdata'] = userfile.read()\r\n\r\n if args.get('--postinstall'):\r\n data['post_uri'] = args.get('--postinstall')\r\n\r\n # Get the SSH keys\r\n if args.get('--key'):\r\n keys = []\r\n for key in args.get('--key'):\r\n key_id = resolve_id(SshKeyManager(self.client).resolve_ids,\r\n key, 'SshKey')\r\n keys.append(key_id)\r\n data['ssh_keys'] = keys\r\n\r\n if args.get('--vlan_public'):\r\n data['public_vlan'] = args['--vlan_public']\r\n\r\n if args.get('--vlan_private'):\r\n data['private_vlan'] = args['--vlan_private']\r\n\r\n return data", "def _initiate_meta(kwargs, activity, ignores=()):\n meta = {AssociatedObjectId.ACTIVITY_ID: str(_retrieve_object_id(activity))}\n # also add the keys' in their snake case appearance so noPadding and no_padding, customHeight and custom_height\n keys_in_kwargs = KECARD_COMMON_KEYS + [snakecase(k) for k in KECARD_COMMON_KEYS]\n\n # initiate the meta based on known kwarg arguments\n for key in list(set(keys_in_kwargs)):\n if key in kwargs:\n meta[camelcase(key)] = kwargs.pop(key)\n\n # we check for custom_height specifically and deal with it.\n if snakecase(MetaWidget.CUSTOM_HEIGHT) in kwargs:\n meta[MetaWidget.CUSTOM_HEIGHT] = kwargs.pop(snakecase(MetaWidget.CUSTOM_HEIGHT))\n\n # remove the 'ignores' from the meta\n for key in ignores:\n if key in meta:\n del meta[key]\n\n return meta", "def check_args(args):\n map_args = {}\n\n if args['frequencies'] is None:\n return None\n\n if args['instance_type'] is None:\n return None\n\n if args['name'] is None:\n return None\n\n instance_details = AWS_INSTANCES.get(args['instance_type'])\n if instance_details is None:\n LOGGER.error('The instance type {0} is not supported.'.format(args['instance_type']))\n return None\n else:\n LOGGER.info(\n 'instance: {0}, vCPU: {1}, RAM: {2}GB, Disks: {3}x{4}GB, IOPS: {5}'.format(\n args['instance_type'],\n instance_details.vCPU,\n instance_details.memory,\n instance_details.number_disks,\n instance_details.size,\n instance_details.iops_support))\n\n map_args.update({\n 'ami_id': args['ami_id'] if args['ami_id'] is not None else AWS_AMI_ID,\n 'created_by': args['created_by'] if args['created_by'] is not None else getpass.getuser(),\n 'spot_price': args['spot_price'] if args['spot_price'] is not None else None,\n 'user_data': get_script(args['bash_script'] if args['bash_script'] is not None else BASH_SCRIPT_CLEAN_02),\n 'setup_disks': get_script(BASH_SCRIPT_SETUP_DISKS),\n 'instance_details': instance_details,\n })\n return map_args", "def init_json_args(cls):\n defaults = cls._base_json_args()\n\n if cls.__json_args__ is None:\n cls.__json_args__ = defaults\n else:\n cls.__json_args__ = mapping_extend(defaults, cls.__json_args__)", "def get_args():\n parser = ag.ArgumentParser(description='Places cations between oxygens '\n 'according to doi://10.1261/rna.2390311')\n parser.add_argument('-f', '--file', type=ag.FileType('r'),\n help='PDB file', required=True)\n\n parser.add_argument('-o', '--output', type=ag.FileType('w+'),\n help='Output PDB file', required=True)\n\n parser.add_argument('-c', '--cation-type', type=str,\n help='Cation type: NA, MG or CA (default NA)',\n choices = ['NA', 'MG', 'CA'],\n default='NA')\n \n parser.add_argument('-a', '--acid-type', type=str,\n help='Amino Acid type: RNA or DNA (default RNA)',\n choices = ['RNA', 'DNA'],\n default='RNA')\n\n parser.add_argument('-r', '--rotation-angle', type=int,\n help='Rotation angle in degrees (default 5)',\n default = 5)\n \n parser.add_argument('-n', '--number', type=int,\n help='Number of added cations, 0 - infinite (default 0)',\n default = 0)\n \n parser.add_argument('--na-run', action='store_true', \n help='Fill with Na cations')\n \n parser.add_argument('--na-limit', type=int,\n help='Number of Na cations to be added during Na fill'\n 'run. 0 - infinite (default 0)', default = 0)\n \n get_args = parser.parse_args()\n\n args_dict = {\n 'file' : get_args.file,\n 'output' : get_args.output,\n 'cation' : get_args.cation_type,\n 'acid' : get_args.acid_type,\n 'angle' : get_args.rotation_angle,\n 'number' : get_args.number,\n 'na_run' : get_args.na_run,\n 'na_limit': get_args.na_limit}\n\n return args_dict", "def _extract_params(self, kwargs, hyperparameters):\n init_params = dict()\n fit_params = dict()\n produce_params = dict()\n\n for name, param in hyperparameters.get('fixed', dict()).items():\n if name in kwargs:\n value = kwargs.pop(name)\n\n elif 'default' in param:\n value = param['default']\n\n else:\n raise TypeError(\"{} required argument '{}' not found\".format(self.name, name))\n\n init_params[name] = value\n\n for name, param in hyperparameters.get('tunable', dict()).items():\n if name in kwargs:\n init_params[name] = kwargs.pop(name)\n\n if not isinstance(self.fit_args, str):\n fit_args = [arg['name'] for arg in self.fit_args]\n else:\n fit_args = []\n\n if not isinstance(self.produce_args, str):\n produce_args = [arg['name'] for arg in self.produce_args]\n else:\n produce_args = []\n\n for name in list(kwargs.keys()):\n if name in fit_args:\n fit_params[name] = kwargs.pop(name)\n\n elif name in produce_args:\n produce_params[name] = kwargs.pop(name)\n\n if kwargs:\n error = \"Unexpected hyperparameters '{}'\".format(', '.join(kwargs.keys()))\n raise TypeError(error)\n\n return init_params, fit_params, produce_params", "def cmd_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--image\",\n help=\"Full image path can be optionally supplied.\")\n args = parser.parse_args()\n return args", "def _get_input_args(bam_file, data, out_base, background):\n if dd.get_genome_build(data) in [\"hg19\"]:\n return [\"--PileupFile\", _create_pileup(bam_file, data, out_base, background)]\n else:\n return [\"--BamFile\", bam_file]" ]
[ "0.64982295", "0.6270005", "0.60605526", "0.6001975", "0.5956914", "0.5867535", "0.586068", "0.5848655", "0.58337194", "0.57882416", "0.5759382", "0.57521", "0.5736829", "0.57335955", "0.572861", "0.571756", "0.5702735", "0.5665511", "0.5653056", "0.5628118", "0.5623889", "0.5611042", "0.56103534", "0.5593993", "0.55859137", "0.55763775", "0.5568918", "0.55402416", "0.5539436", "0.5535271" ]
0.7327621
0
Return a usable Boto object without creating a class around it. In the context of a krux.cli (or similar) interface the 'args', 'logger' and 'stats' objects should already be present. If you don't have them, however, we'll attempt to provide usable ones for the boto setup. (If you omit the add_boto_cli_arguments() call during other cli setup, the Boto object will still work, but its cli options won't show up in help output)
def get_boto(args=None, logger=None, stats=None): return Boto(**__get_arguments(args, logger, stats))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_boto3(args=None, logger=None, stats=None):\n return Boto3(**__get_arguments(args, logger, stats))", "def __get_arguments(args=None, logger=None, stats=None):\n\n if not args:\n parser = get_parser()\n add_boto_cli_arguments(parser)\n # Parse only the known arguments added by add_boto_cli_arguments().\n # We only need those arguments to create Boto object, nothing else.\n # parse_known_args() return (Namespace, list of unknown arguments),\n # we only care about the Namespace object here.\n args = parser.parse_known_args()[0]\n\n if not logger:\n logger = get_logger(name=NAME)\n\n if not stats:\n stats = get_stats(prefix=NAME)\n\n return {\n 'log_level': getattr(args, 'boto_log_level', DEFAULT['log_level']()),\n 'access_key': getattr(args, 'boto_access_key', DEFAULT['access_key']()),\n 'secret_key': getattr(args, 'boto_secret_key', DEFAULT['secret_key']()),\n 'region': getattr(args, 'boto_region', DEFAULT['region']()),\n 'logger': logger,\n 'stats': stats,\n }", "def get_elb(args=None, logger=None, stats=None):\n if not args:\n parser = get_parser()\n add_elb_cli_arguments(parser)\n args = parser.parse_args()\n if not logger:\n logger = get_logger(name=NAME)\n\n if not stats:\n stats = get_stats(prefix=NAME)\n\n boto = Boto3(\n log_level=args.boto_log_level,\n access_key=args.boto_access_key,\n secret_key=args.boto_secret_key,\n region=args.boto_region,\n logger=logger,\n stats=stats,\n )\n return ELB(\n boto=boto,\n logger=logger,\n stats=stats,\n )", "def createaws() -> my_aws_api_library.MyAws:\r\n aws_cred_file_path = os.environ['AWS_CRED_FILE']\r\n comp_pubkey = os.environ['COMPANY_PUBKEY']\r\n my_aws = my_aws_api_library.MyAws(aws_cred_file_path, comp_pubkey)\r\n return my_aws", "def aws_cli(args: List[str]):\n\n try:\n text_output = subprocess.check_output(['aws'] + args, text=True)\n except subprocess.CalledProcessError as e:\n raise Exception(f\"failed to call AWS CLI ({e.returncode}): \\n{e.stdout}\\n\\n{e.stderr}\") from e\n\n try:\n json_obj = json.loads(text_output)\n except json.JSONDecodeError as e:\n raise Exception(f\"AWS CLI did not output JSON as expected ({e.msg}). Output was:\\n{text_output}\") from e\n\n return json_obj", "def get_boto_client(self) -> S3Client:\n if self._boto_client is None:\n config = Config(signature_version=botocore.UNSIGNED)\n self._boto_client = self.session.client(\n \"s3\",\n region_name=settings.S3_REGION,\n endpoint_url=settings.S3_ENDPOINT_URL,\n config=config,\n )\n return self._boto_client", "def main():\n\n parser = get_args()\n args = parser.parse_args()\n\n if args.verbose:\n LOG.setLevel(logging.INFO)\n LOG.info('Verbose: on')\n else:\n ## If not verbose, turn down boto3.\n boto3.set_stream_logger(name='boto3', level=logging.WARNING)\n boto3.set_stream_logger(name='botocore', level=logging.WARNING)\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n\n ## Ensure credentials.\n if not args.credentials:\n die_screaming('need a credentials argument')\n LOG.info('Will use credentials: ' + args.credentials)\n ## Ensure directory.\n if not args.directory:\n die_screaming('need a directory argument')\n args.directory = args.directory.rstrip('//')\n LOG.info('Will operate in: ' + args.directory)\n ## Ensure bucket.\n if not args.bucket:\n die_screaming('need a bucket argument')\n bucket, slash, toppath = args.bucket.partition('/')\n if toppath != '':\n LOG.info('Will put to bucket: ' + bucket + '; with path: ' + toppath)\n else:\n LOG.info('Will put to bucket at top level: ' + bucket)\n ## Ensure mimetype metadata.\n if not args.mimetypes:\n LOG.info('Will use internal mimetype defaults')\n else:\n LOG.info('TODO: Will get mimetype metadata from: ' + args.metadata)\n ## Ensure bucket location.\n if not args.location:\n args.location = 'us-east-1'\n LOG.info('Will use S3 bucket location default: ' + args.location)\n else:\n LOG.info('Will use S3 bucket location: ' + args.location)\n\n ## Extract S3 credentials.\n creds = None\n with open(args.credentials) as chandle:\n creds = json.loads(chandle.read())\n #LOG.info(creds)\n\n s3 = boto3.resource('s3', region_name=args.location,\n aws_access_key_id=creds['accessKeyId'],\n aws_secret_access_key=creds['secretAccessKey'])\n\n # s3 = boto3.resource(\"s3\", creds['accessKeyId'], creds['secretAccessKey'])\n\n #s3.Object('mybucket', 'hello.txt').put(Body=open('/tmp/hello.txt', 'rb'))\n\n ## Walk tree.\n for curr_dir, dirs, files in os.walk(args.directory):\n\n ## We can navigate up if we are not in the root.\n relative_to_start = curr_dir.rstrip('//')[len(args.directory):]\n relative_to_start = relative_to_start.lstrip('//')\n LOG.info('curr_dir: ' + curr_dir + ' (' + relative_to_start + ')')\n\n ## Note files and directories.\n for fname in files:\n\n ## Get correct mime type.\n fext = os.path.splitext(fname)[1].lstrip('.')\n mime = MIMES.get('') # start with default\n if MIMES.get(fext, False):\n mime = MIMES.get(fext)\n\n ## Figure out S3 path/key and final filename, keeping in\n ## mind that relative_to_Start can be empty if root.\n s3path = fname\n if relative_to_start:\n s3path = relative_to_start + '/' + fname\n filename = os.path.join(curr_dir, fname)\n\n tags = {}\n if args.number:\n tags['build-number'] = args.number\n if args.pipeline:\n tags['build-pipeline'] = args.pipeline\n tags_str = urllib.parse.urlencode(tags)\n\n ## Visual check.\n LOG.info('file: ' + filename)\n if toppath != '':\n s3path = toppath + '/' + s3path\n LOG.info(' -> [' + bucket + '] ' + s3path + \\\n '(' + mime + ', ' + tags_str + ')')\n\n ## Create the new object that we want.\n s3bucket = s3.Bucket(bucket)\n multipart_upload(filename, s3bucket, s3path, content_type=mime, metadata=tags, policy=\"public-read\")\n\n # newobj = s3.Object(args.bucket, s3path)\n # outfile = open(filename, 'rb')\n # newobj.put(Body=outfile, \\\n # ContentType=mime, \\\n # Metadata=tags,\n # ACL='public-read') #Tagging=tags_str)\n\n # outbod = open(os.path.join(curr_dir, fname), 'rb')\n # .put(Body=outbod, 'rb')\n\n # for dname in dirs:\n # #LOG.info('dir: ' + os.path.join(curr_dir, dname))\n # pass", "def __init__(\n self,\n service_name: str,\n account_id: str,\n region_name: Optional[str] = None,\n aws_creds: Optional[Dict[str, str]] = None,\n profile_name: Optional[str] = None,\n placebo: Optional[Any] = None,\n placebo_data_path: Optional[str] = None,\n placebo_mode: Optional[str] = \"record\",\n max_attempts: int = 20,\n config: Optional[Config] = None,\n max_attempts_on_client_error: int = 10,\n ):\n self._service_name = service_name\n self._region_name = region_name\n self._account_id = account_id\n self._max_attempts_on_client_error = max_attempts_on_client_error\n\n # Build a clojure in order to recreate boto3 client if needed\n\n def _create_client(service: str = None):\n return get_client(\n session=get_session(\n aws_creds=aws_creds,\n profile_name=profile_name,\n placebo=placebo,\n placebo_data_path=placebo_data_path,\n placebo_mode=placebo_mode,\n ),\n service_name=service if service else service_name,\n region_name=region_name,\n max_attempts=max_attempts,\n config=config,\n )\n\n # set client factory\n self.create_client = _create_client\n\n # Build boto3 client\n self._client = self.create_client()", "def _aws_get_object(bucket, key, request_pays=True, client=None):\n if not client:\n session = boto3_session(region_name=REGION)\n client = session.client(\"s3\")\n\n params = {\"Bucket\": bucket, \"Key\": key}\n if request_pays:\n params[\"RequestPayer\"] = \"requester\"\n response = client.get_object(**params)\n return response[\"Body\"].read()", "def _get_client(self):\n if self._client is None:\n self._client = self.boto.client(service_name='elb', region_name=self.boto.cli_region)\n\n return self._client", "def __init__(self):\n self.aws = AWS()", "def bcbio_s3_instance_profile(conn, args):\n import boto\n if hasattr(args, \"nocreate\") and args.nocreate:\n return {\"instance_profile\": \"\"}\n base_name = args.cluster if hasattr(args, \"cluster\") and args.cluster else \"bcbio\"\n name = \"%s_full_s3_access\" % (base_name)\n try:\n ip = conn.get_instance_profile(name)\n except boto.exception.BotoServerError:\n print(\"Instance profile %s doesn't exist, creating\" % name)\n ip = conn.create_instance_profile(name)\n try:\n conn.get_role(name)\n except boto.exception.BotoServerError:\n print(\"Role %s doesn't exist, creating\" % name)\n conn.create_role(name)\n conn.put_role_policy(name, name, S3_POLICY)\n if not tz.get_in([\"get_instance_profile_response\", \"get_instance_profile_result\", \"instance_profile\", \"roles\"],\n ip):\n conn.add_role_to_instance_profile(name, name)\n print(\"Instance profile: %s\" % name)\n return {\"instance_profile\": name}", "def xray_botocore_api_call(wrapped, instance, args, kwargs):\n return generic_xray_wrapper(\n wrapped,\n instance,\n args,\n kwargs,\n name=get_service_name,\n namespace=\"aws\",\n metadata_extractor=extract_aws_metadata,\n error_handling_type=ERROR_HANDLING_BOTOCORE,\n )", "def extract_aws_metadata(wrapped, instance, args, kwargs, return_value):\n response = return_value\n LOGGER.debug(\n \"Extracting AWS metadata\", args=args, kwargs=kwargs,\n )\n if \"operation_name\" in kwargs:\n operation_name = kwargs[\"operation_name\"]\n else:\n operation_name = args[0]\n\n # Most of the time the actual keyword arguments to the client call are\n # passed in as a positial argument after the operation name.\n if len(kwargs) == 0 and len(args) == 2:\n kwargs = args[1]\n\n region_name = instance._client_config.region_name\n\n response_metadata = response.get(\"ResponseMetadata\")\n\n metadata = {\"aws\": {\"operation\": operation_name, \"region\": region_name}}\n\n if \"TableName\" in kwargs:\n metadata[\"aws\"][\"table_name\"] = kwargs[\"TableName\"]\n if \"QueueUrl\" in kwargs:\n metadata[\"aws\"][\"queue_url\"] = kwargs[\"QueueUrl\"]\n\n if response_metadata is not None:\n metadata[\"http\"] = {\n \"response\": {\"status\": response_metadata[\"HTTPStatusCode\"]},\n }\n metadata[\"aws\"][\"request_id\"] = response_metadata[\"RequestId\"]\n\n return metadata", "def client() -> botocore.client.BaseClient:\n global _client\n if _client is None:\n endpoint_url = os.environ.get('LOCALSTACK_S3_URL')\n # If endpoint_url is None, botocore constructs the default AWS URL\n _client = boto3.client('s3', endpoint_url=endpoint_url)\n return _client", "def __init__(self):\n super(AWSBase, self).__init__()\n self.region = config.ENV_DATA['region']\n self.aws = AWSUtil(self.region)", "def botoconn(args):\n try:\n return boto.ec2.autoscale.connect_to_region(args.region)\n except:\n print(\"FATAL ERROR:\")\n traceback.print_exc(file=sys.stdout)\n sys.exit(\"Failed to connect to AWS. Did you set the shell vars right?\")", "def make_sdk(options=None, **kwargs):\n from openstack import connection\n cloud = get_config(options=options, **kwargs)\n return connection.from_config(cloud_config=cloud, options=options)", "def aws(ctx): # pylint: disable=unused-argument\n pass # pylint: disable=unnecessary-pass", "def __init__(self, bucket, aws_profile=None, logger=None):\n self.bucket = bucket\n self.s3helper = S3Helper(aws_profile=aws_profile)\n self.print_func = print\n if logger:\n self.print_func = logger.info", "def create_boto3_client(config, service):\n session = boto3.Session(profile_name=config.get('AWS_ACCESS', 'AWS_PROFILE'))\n return session.client(service, region_name=config.get('AWS_ACCESS', 'AWS_REGION'))", "def aws():\n pass", "def main():\n t0 = time.time()\n parser = argparse.ArgumentParser()\n parser.add_argument('-e', '--env', default='LOCAL', help='Enter one of DOCKER, LOCAL or S3')\n parser.add_argument('--bucket-name', help='Enter S3 bucket')\n parser.add_argument('--aws-access-key-id', help='Enter AWS access key id')\n parser.add_argument('--aws-secret-access-key', help='Enter AWS secrest access key')\n parser.add_argument('--aws-region', default='us-west-2', help='Enter AWS region')\n # subparser = parser.add_subparsers(dest='subcommand', help='Can choose bucket name if S3 is chosen')\n # parser_bucket = subparser.add_parser('S3')\n # parser_bucket.add_argument('bucket', help='S3 bucket name')\n args = vars(parser.parse_args())\n args['env'] = args['env'].upper()\n if args['env'] != 'S3' and args['bucket_name']:\n parser.error('Can specify a bucket name with only S3...')\n if args['env'] == 'S3' and not (args['bucket_name'] and \n args['aws_access_key_id'] and\n args['aws_secret_access_key']):\n parser.error('Specify a bucket, access key and secret access key...')\n # print(args)\n # print(args['env'])\n # print(args['subcommand'])\n\n if args['env'] == 'S3' and args['aws_region'] != '':\n s3_client = create_client(\n \"s3\",\n region=args['aws_region'],\n access_key_id=args['aws_access_key_id'],\n secret_access_key=args['aws_secret_access_key']\n )\n os.environ['AWS_ACCESS_KEY_ID'] = args['aws_access_key_id'].strip()\n os.environ['AWS_SECRET_ACCESS_KEY'] = args['aws_secret_access_key'].strip()\n logger.info('Check to see whether s3 bucket exits...')\n try:\n s3.meta.client.head_bucket(Bucket=args['bucket_name'])\n logger.info(f\"S3 bucket {args['bucket_name']} exits...\")\n except Exception as e:\n logger.warn(f\"Bucket {args['bucket_name']} doesn't exist...\")\n logger.info('Creating bucket...')\n create_s3_bucket(s3_client, args['bucket_name'], args['aws_region'])\n\n\n config = configparser.ConfigParser()\n if args['env'] == 'DOCKER':\n CFG_FILE = r'/usr/local/airflow/config/etl_config.cfg'\n try:\n config.read(CFG_FILE)\n except Exception as e:\n print('Configuration file is missing or cannot be read...')\n raise\n elif args['env'] == 'S3':\n obj = s3_client.get_object(Bucket=args['bucket_name'], Key='config/etl_config.cfg')\n try:\n config.read_string(obj['Body'].read().decode())\n except Exception as e:\n print('Configuration file is missing or cannot be read...')\n raise\n else:\n CFG_FILE = r'/Users/home/Documents/dend/Data-Engineering-ND/Capstone/config/etl_config.cfg'\n try:\n config.read(CFG_FILE)\n except Exception as e:\n print('Configuration file is missing or cannot be read...')\n raise\n\n sas_jar_ver = config['APP']['sas_jar_ver']\n os.environ['SAS_JAR'] = \".\".join(sas_jar_ver.split('.')[:-1])\n\n if args['env'] == 'DOCKER':\n base_dir = config['DOCKER']['base_dir']\n data_dir = config['DOCKER']['data_dir']\n path = config['DOCKER']['sas_data_dir']\n sas_file_path = os.path.join(base_dir, data_dir, path)\n dict_dir = config['DOCKER']['dict_dir']\n files = json.loads(config['DOCKER']['input_files'])\n airport_file = os.path.join(base_dir, data_dir, config['DOCKER']['airports_file'])\n demographic_file = os.path.join(base_dir, data_dir, config['DOCKER']['us_demographics_file'])\n dictionary_file = os.path.join(base_dir, dict_dir, config['DOCKER']['dictionary_file'])\n output_dir = os.path.join(base_dir, config['DOCKER']['output_dir'])\n log_dir = os.path.join(base_dir, config['LOCAL']['log_dir'])\n log_file = config['LOCAL']['log_file']\n elif args['env'] == 'S3':\n bucket = args['bucket_name']\n path = config['S3']['s3_sas_key']\n dict_dir = config['S3']['s3_dict_key']\n csv_dir = config['S3']['s3_csv_key']\n sas_file_path = os.path.join(\"s3a://\", bucket, csv_dir, path)\n files = json.loads(config['S3']['input_files'])\n airport_file = os.path.join(\"s3a://\", bucket, csv_dir, config['S3']['airports_file'])\n demographic_file = os.path.join(\"s3a://\", bucket, csv_dir, config['S3']['us_demographics_file'])\n dictionary_file = os.path.join(\"s3a://\", bucket, config['S3']['dictionary_file'])\n output_dir = os.path.join(\"s3a://\", bucket, config['S3']['output_dir'])\n else:\n base_dir = config['LOCAL']['base_dir']\n data_dir = config['LOCAL']['data_dir']\n path = config['LOCAL']['sas_data_dir']\n sas_file_path = os.path.join(base_dir, data_dir, path)\n dict_dir = config['LOCAL']['dict_dir']\n files = json.loads(config['LOCAL']['input_files'])\n airport_file = os.path.join(base_dir, data_dir, config['LOCAL']['airports_file'])\n demographic_file = os.path.join(base_dir, data_dir, config['LOCAL']['us_demographics_file'])\n dictionary_file = os.path.join(base_dir, dict_dir, config['LOCAL']['dictionary_file'])\n output_dir = os.path.join(base_dir, config['LOCAL']['output_dir'])\n log_dir = os.path.join(base_dir, config['LOCAL']['log_dir'])\n log_file = config['LOCAL']['log_file']\n \n try:\n # Log file written to Hadoop EMR env\n base_dir = config['HADOOP']['base_dir']\n log_dir = os.path.join(base_dir, config['HADOOP']['log_dir'])\n log_file = config['HADOOP']['log_file']\n pathlib.Path(log_dir).mkdir(exist_ok=True)\n file_handler = enable_logging(log_dir, log_file)\n logger.addHandler(file_handler)\n print(\"Create log dir if it doesn't exist...\")\n except:\n base_dir = config['LOCAL']['base_dir']\n log_dir = os.path.join(base_dir, config['LOCAL']['log_dir'])\n log_file = config['LOCAL']['log_file']\n pathlib.Path(log_dir).mkdir(exist_ok=True)\n file_handler = enable_logging(log_dir, log_file)\n logger.addHandler(file_handler)\n print(\"Create log dir if it doesn't exist...\")\n\n\n logger.info('ETL parsing has started...')\n logger.info(\"Create output dir if it doesn't exist...\")\n if args['env'] != 'S3':\n pathlib.Path(output_dir).mkdir(exist_ok=True)\n else:\n # config.set('S3', 's3_bucket_name', args['bucket_name'])\n # s3_client.put_object(Bucket=args['bucket_name'], Key=config['S3']['config_dir'], Body=)\n s3_client.put_object(Bucket=args['bucket_name'], Key=config['S3']['output_dir'])\n logger.info('Created S3 bucket...')\n \n spark = create_spark_session()\n logger.info('Pyspark session created...')\n logger.info('Register UDFs...')\n \n spark.udf.register('SASDateConverter', sas_date_converter, Date())\n logger.info('Register sas_date_converter UDF...')\n\n # change_date_format_1 = F.udf(lambda x: datetime.strptime(x.strip(), '%Y%m%d'), Date())\n # change_date_format_2 = F.udf(lambda x: datetime.strptime(x.strip(), '%m%d%Y'), Date())\n dt = F.udf(change_date_format, Date())\n\n logger.info('Read and concatenate the raw SAS files...')\n dfs = []\n for file in files:\n try:\n df = spark.read.format('com.github.saurfang.sas.spark')\\\n .load(os.path.join(sas_file_path, file))\n dfs.append(df)\n except Exception as e:\n logger.info(f'File {file} is not available. Skipping...')\n logger.info(f'Read {len(files)} files successfully...')\n df = []\n if len(dfs) > 0:\n df = concat_df(*dfs)\n logger.info(f'Successfully concatenated {len(files)}...')\n if not isinstance(df, list):\n # SAS raw data table creation begins here\n cols = ['cicid', 'i94yr', 'i94mon', 'i94port', 'i94mode', 'visapost', \n 'entdepa', 'entdepd', 'entdepu', 'matflag', \n 'dtadfile', 'dtaddto']\n parquet_tables = ['i94_immigrations', 'i94_trips', 'i94_visitors', 'i94_flights']\n f_transforms = [i94_immigrations, i94_trips, i94_visitors, i94_flights]\n res_df = None\n for table, f_transform in zip(parquet_tables, f_transforms):\n if table == 'i94_immigrations':\n # only table not using spark sql\n res_df = create_and_write_df(df, table, f_transform, \n output_dir,\n spark=None, cols=cols,\n udf=dt, fmt='parquet',\n is_partition=True,\n is_overwrite=True,\n crate_date_df=False)\n elif table == 'i94_flights':\n res_df = create_and_write_df(df, table, f_transform, \n output_dir,\n spark=spark, cols=None,\n udf=None, fmt='csv',\n is_partition=False,\n is_overwrite=True,\n crate_date_df=False)\n else:\n res_df = create_and_write_df(df, table, f_transform, \n output_dir,\n spark=spark, cols=None,\n udf=None, fmt='parquet',\n is_partition=True,\n is_overwrite=True,\n crate_date_df=False)\n\n if table == 'i94_trips':\n table = 'i94_dates'\n create_and_write_df(res_df, table, i94_dates, \n output_dir,\n spark=spark, cols=None,\n udf=None, fmt='parquet',\n is_partition=True,\n is_overwrite=True,\n crate_date_df=False)\n\n # Reference data for airports and us city demographics begins here\n airport_df = spark.createDataFrame([], R([]))\n demographic_df = spark.createDataFrame([], R([]))\n logger.info('Read the airports reference file...')\n try:\n airport_df = spark.read.option('header', True) \\\n .csv(airport_file)\n except Exception as e:\n logger.error(f'File {airport_file} is not available. Skipping...')\n\n logger.info('Read the US demographics reference file...')\n try:\n demographic_df = spark.read.options(header='True', delimiter=';') \\\n .csv(demographic_file) \n except Exception as e:\n logger.error(f'File {demographic_file} is not available. Skipping...')\n if airport_df.count() > 0 and demographic_df.count() > 0: \n csv_tables = ['i94_airports', 'i94_us_states_demographic', \n 'i94_us_cities_demographic']\n f_transforms = [i94_airports, i94_us_states_demographic, i94_us_cities_demographic]\n csv_dfs = [airport_df, demographic_df, demographic_df]\n for table, f_transform, df in zip(csv_tables, f_transforms, csv_dfs):\n res_df = create_and_write_df(df, table, f_transform, \n output_dir,\n spark=spark, cols=None,\n udf=dt, fmt='csv',\n is_partition=False,\n is_overwrite=True)\n\n # SAS reference data creation begins here\n ref_csv_tables = ['i94_countries', 'i94_port_state_mapping', 'i94_travel_mode', \n 'i94_state_mapping', 'i94_visa']\n table_pos_dict = {\n 'i94_countries': [2, 3, 'country', 'country_id'],\n 'i94_port_state_mapping': [3, 4, 'city', 'i94_port'],\n 'i94_travel_mode': [4, 5, 'mode', 'mode_id'],\n 'i94_state_mapping': [5, 6, 'state', 'state_id'],\n 'i94_visa': [6, 7, 'visa_purpose', 'visa_id']\n }\n logger.info('Read the SAS data dictionary reference file...') \n for table in ref_csv_tables:\n create_and_write_ref_df(dictionary_file, table, output_dir, spark, \n fmt='csv', start_pos=table_pos_dict[table][0], \n end_pos=table_pos_dict[table][1],\n col_name=table_pos_dict[table][2], \n index_name=table_pos_dict[table][3],\n is_partition=False,\n is_overwrite=True)\n\n logger.info('ETL parsing has completed...')\n logger.info('Time taken to complete job {} minutes'.format((time.time() - t0) / 60))", "def mock_amazon():\n amazon = Amazon()\n amazon.carrot1 = 'cenoura normal'\n amazon.carrot2 = 'cenoura radioativa'\n amazon.carrot_number = 575\n return amazon", "def __init__(__self__, resource_name, opts=None, aws_kms_key_arn=None, content_config=None, content_config_permissions=None, input_bucket=None, name=None, notifications=None, output_bucket=None, role=None, thumbnail_config=None, thumbnail_config_permissions=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['aws_kms_key_arn'] = aws_kms_key_arn\n __props__['content_config'] = content_config\n __props__['content_config_permissions'] = content_config_permissions\n if input_bucket is None:\n raise TypeError(\"Missing required property 'input_bucket'\")\n __props__['input_bucket'] = input_bucket\n __props__['name'] = name\n __props__['notifications'] = notifications\n __props__['output_bucket'] = output_bucket\n if role is None:\n raise TypeError(\"Missing required property 'role'\")\n __props__['role'] = role\n __props__['thumbnail_config'] = thumbnail_config\n __props__['thumbnail_config_permissions'] = thumbnail_config_permissions\n __props__['arn'] = None\n super(Pipeline, __self__).__init__(\n 'aws:elastictranscoder/pipeline:Pipeline',\n resource_name,\n __props__,\n opts)", "def cli(profile, region, clear):\n global SESSION, BUCKET_MANAGER, DOMAIN_MANAGER, CERT_MANAGER, \\\n DIST_MANAGER, EC2_MANAGER, ECS_MANAGER\n session_cfg = {}\n if profile:\n session_cfg['profile_name'] = profile\n\n if region:\n session_cfg['region_name'] = region\n\n if clear:\n util.clear_scr()\n\n# using **<variable> python expands it as a parameter=content\n SESSION = boto3.Session(**session_cfg)\n BUCKET_MANAGER = BucketManager(SESSION)\n DOMAIN_MANAGER = DomainManager(SESSION)\n CERT_MANAGER = CertificateManager(SESSION)\n DIST_MANAGER = DistributionManager(SESSION)\n EC2_MANAGER = EC2Manager(SESSION)\n ECS_MANAGER = ECSManager(SESSION)", "def boto_client(account_id, service_name, region):\n logger.info('Creating boto3 client for account_id: {}, '\n 'service_name: {}'.format(account_id, service_name))\n return boto3.client(service_name, region_name=region)", "def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ..." ]
[ "0.7120333", "0.6446755", "0.53901947", "0.53732514", "0.5315737", "0.5265874", "0.5252197", "0.522644", "0.5211605", "0.521066", "0.5205573", "0.51422757", "0.5054793", "0.5037921", "0.5032993", "0.50301266", "0.49558958", "0.49513885", "0.49364442", "0.491438", "0.49119216", "0.4907558", "0.489213", "0.4874849", "0.4851639", "0.4844455", "0.48054138", "0.4789765", "0.4789765", "0.4789765" ]
0.81121886
0
Return a usable Boto3 object without creating a class around it. In the context of a krux.cli (or similar) interface the 'args', 'logger' and 'stats' objects should already be present. If you don't have them, however, we'll attempt to provide usable ones for the boto setup. (If you omit the add_boto_cli_arguments() call during other cli setup, the Boto object will still work, but its cli options won't show up in help output)
def get_boto3(args=None, logger=None, stats=None): return Boto3(**__get_arguments(args, logger, stats))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_boto(args=None, logger=None, stats=None):\n return Boto(**__get_arguments(args, logger, stats))", "def __get_arguments(args=None, logger=None, stats=None):\n\n if not args:\n parser = get_parser()\n add_boto_cli_arguments(parser)\n # Parse only the known arguments added by add_boto_cli_arguments().\n # We only need those arguments to create Boto object, nothing else.\n # parse_known_args() return (Namespace, list of unknown arguments),\n # we only care about the Namespace object here.\n args = parser.parse_known_args()[0]\n\n if not logger:\n logger = get_logger(name=NAME)\n\n if not stats:\n stats = get_stats(prefix=NAME)\n\n return {\n 'log_level': getattr(args, 'boto_log_level', DEFAULT['log_level']()),\n 'access_key': getattr(args, 'boto_access_key', DEFAULT['access_key']()),\n 'secret_key': getattr(args, 'boto_secret_key', DEFAULT['secret_key']()),\n 'region': getattr(args, 'boto_region', DEFAULT['region']()),\n 'logger': logger,\n 'stats': stats,\n }", "def get_boto_client(self) -> S3Client:\n if self._boto_client is None:\n config = Config(signature_version=botocore.UNSIGNED)\n self._boto_client = self.session.client(\n \"s3\",\n region_name=settings.S3_REGION,\n endpoint_url=settings.S3_ENDPOINT_URL,\n config=config,\n )\n return self._boto_client", "def client() -> botocore.client.BaseClient:\n global _client\n if _client is None:\n endpoint_url = os.environ.get('LOCALSTACK_S3_URL')\n # If endpoint_url is None, botocore constructs the default AWS URL\n _client = boto3.client('s3', endpoint_url=endpoint_url)\n return _client", "def main():\n\n parser = get_args()\n args = parser.parse_args()\n\n if args.verbose:\n LOG.setLevel(logging.INFO)\n LOG.info('Verbose: on')\n else:\n ## If not verbose, turn down boto3.\n boto3.set_stream_logger(name='boto3', level=logging.WARNING)\n boto3.set_stream_logger(name='botocore', level=logging.WARNING)\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n\n ## Ensure credentials.\n if not args.credentials:\n die_screaming('need a credentials argument')\n LOG.info('Will use credentials: ' + args.credentials)\n ## Ensure directory.\n if not args.directory:\n die_screaming('need a directory argument')\n args.directory = args.directory.rstrip('//')\n LOG.info('Will operate in: ' + args.directory)\n ## Ensure bucket.\n if not args.bucket:\n die_screaming('need a bucket argument')\n bucket, slash, toppath = args.bucket.partition('/')\n if toppath != '':\n LOG.info('Will put to bucket: ' + bucket + '; with path: ' + toppath)\n else:\n LOG.info('Will put to bucket at top level: ' + bucket)\n ## Ensure mimetype metadata.\n if not args.mimetypes:\n LOG.info('Will use internal mimetype defaults')\n else:\n LOG.info('TODO: Will get mimetype metadata from: ' + args.metadata)\n ## Ensure bucket location.\n if not args.location:\n args.location = 'us-east-1'\n LOG.info('Will use S3 bucket location default: ' + args.location)\n else:\n LOG.info('Will use S3 bucket location: ' + args.location)\n\n ## Extract S3 credentials.\n creds = None\n with open(args.credentials) as chandle:\n creds = json.loads(chandle.read())\n #LOG.info(creds)\n\n s3 = boto3.resource('s3', region_name=args.location,\n aws_access_key_id=creds['accessKeyId'],\n aws_secret_access_key=creds['secretAccessKey'])\n\n # s3 = boto3.resource(\"s3\", creds['accessKeyId'], creds['secretAccessKey'])\n\n #s3.Object('mybucket', 'hello.txt').put(Body=open('/tmp/hello.txt', 'rb'))\n\n ## Walk tree.\n for curr_dir, dirs, files in os.walk(args.directory):\n\n ## We can navigate up if we are not in the root.\n relative_to_start = curr_dir.rstrip('//')[len(args.directory):]\n relative_to_start = relative_to_start.lstrip('//')\n LOG.info('curr_dir: ' + curr_dir + ' (' + relative_to_start + ')')\n\n ## Note files and directories.\n for fname in files:\n\n ## Get correct mime type.\n fext = os.path.splitext(fname)[1].lstrip('.')\n mime = MIMES.get('') # start with default\n if MIMES.get(fext, False):\n mime = MIMES.get(fext)\n\n ## Figure out S3 path/key and final filename, keeping in\n ## mind that relative_to_Start can be empty if root.\n s3path = fname\n if relative_to_start:\n s3path = relative_to_start + '/' + fname\n filename = os.path.join(curr_dir, fname)\n\n tags = {}\n if args.number:\n tags['build-number'] = args.number\n if args.pipeline:\n tags['build-pipeline'] = args.pipeline\n tags_str = urllib.parse.urlencode(tags)\n\n ## Visual check.\n LOG.info('file: ' + filename)\n if toppath != '':\n s3path = toppath + '/' + s3path\n LOG.info(' -> [' + bucket + '] ' + s3path + \\\n '(' + mime + ', ' + tags_str + ')')\n\n ## Create the new object that we want.\n s3bucket = s3.Bucket(bucket)\n multipart_upload(filename, s3bucket, s3path, content_type=mime, metadata=tags, policy=\"public-read\")\n\n # newobj = s3.Object(args.bucket, s3path)\n # outfile = open(filename, 'rb')\n # newobj.put(Body=outfile, \\\n # ContentType=mime, \\\n # Metadata=tags,\n # ACL='public-read') #Tagging=tags_str)\n\n # outbod = open(os.path.join(curr_dir, fname), 'rb')\n # .put(Body=outbod, 'rb')\n\n # for dname in dirs:\n # #LOG.info('dir: ' + os.path.join(curr_dir, dname))\n # pass", "def get_s3_client(args: argparse.Namespace) -> botocore.clients.s3:\n\n assert args.s3_region_name is not None, \"set COMPSYN_S3_REGION_NAME\"\n assert args.s3_access_key_id is not None, \"set COMPSYN_S3_ACCESS_KEY_ID\"\n assert args.s3_secret_access_key is not None, \"set COMPSYN_S3_SECRET_ACCESS_KEY\"\n assert args.s3_bucket is not None, \"set COMPSYN_S3_BUCKET\"\n\n return boto3.session.Session().client(\n \"s3\",\n region_name=args.s3_region_name,\n endpoint_url=args.s3_endpoint_url,\n aws_access_key_id=args.s3_access_key_id,\n aws_secret_access_key=args.s3_secret_access_key,\n )", "def aws_cli(args: List[str]):\n\n try:\n text_output = subprocess.check_output(['aws'] + args, text=True)\n except subprocess.CalledProcessError as e:\n raise Exception(f\"failed to call AWS CLI ({e.returncode}): \\n{e.stdout}\\n\\n{e.stderr}\") from e\n\n try:\n json_obj = json.loads(text_output)\n except json.JSONDecodeError as e:\n raise Exception(f\"AWS CLI did not output JSON as expected ({e.msg}). Output was:\\n{text_output}\") from e\n\n return json_obj", "def create_boto3_client(config, service):\n session = boto3.Session(profile_name=config.get('AWS_ACCESS', 'AWS_PROFILE'))\n return session.client(service, region_name=config.get('AWS_ACCESS', 'AWS_REGION'))", "def bcbio_s3_instance_profile(conn, args):\n import boto\n if hasattr(args, \"nocreate\") and args.nocreate:\n return {\"instance_profile\": \"\"}\n base_name = args.cluster if hasattr(args, \"cluster\") and args.cluster else \"bcbio\"\n name = \"%s_full_s3_access\" % (base_name)\n try:\n ip = conn.get_instance_profile(name)\n except boto.exception.BotoServerError:\n print(\"Instance profile %s doesn't exist, creating\" % name)\n ip = conn.create_instance_profile(name)\n try:\n conn.get_role(name)\n except boto.exception.BotoServerError:\n print(\"Role %s doesn't exist, creating\" % name)\n conn.create_role(name)\n conn.put_role_policy(name, name, S3_POLICY)\n if not tz.get_in([\"get_instance_profile_response\", \"get_instance_profile_result\", \"instance_profile\", \"roles\"],\n ip):\n conn.add_role_to_instance_profile(name, name)\n print(\"Instance profile: %s\" % name)\n return {\"instance_profile\": name}", "def get_aioboto3_version() -> str:\n try:\n from aioboto3 import __version__ as version # type: ignore\n except ImportError:\n raise RuntimeError(\"aioboto3 is not installed\")\n\n return version", "def get_s3_client():\n return boto3.resource('s3')", "def _aws_get_object(bucket, key, request_pays=True, client=None):\n if not client:\n session = boto3_session(region_name=REGION)\n client = session.client(\"s3\")\n\n params = {\"Bucket\": bucket, \"Key\": key}\n if request_pays:\n params[\"RequestPayer\"] = \"requester\"\n response = client.get_object(**params)\n return response[\"Body\"].read()", "def main():\n t0 = time.time()\n parser = argparse.ArgumentParser()\n parser.add_argument('-e', '--env', default='LOCAL', help='Enter one of DOCKER, LOCAL or S3')\n parser.add_argument('--bucket-name', help='Enter S3 bucket')\n parser.add_argument('--aws-access-key-id', help='Enter AWS access key id')\n parser.add_argument('--aws-secret-access-key', help='Enter AWS secrest access key')\n parser.add_argument('--aws-region', default='us-west-2', help='Enter AWS region')\n # subparser = parser.add_subparsers(dest='subcommand', help='Can choose bucket name if S3 is chosen')\n # parser_bucket = subparser.add_parser('S3')\n # parser_bucket.add_argument('bucket', help='S3 bucket name')\n args = vars(parser.parse_args())\n args['env'] = args['env'].upper()\n if args['env'] != 'S3' and args['bucket_name']:\n parser.error('Can specify a bucket name with only S3...')\n if args['env'] == 'S3' and not (args['bucket_name'] and \n args['aws_access_key_id'] and\n args['aws_secret_access_key']):\n parser.error('Specify a bucket, access key and secret access key...')\n # print(args)\n # print(args['env'])\n # print(args['subcommand'])\n\n if args['env'] == 'S3' and args['aws_region'] != '':\n s3_client = create_client(\n \"s3\",\n region=args['aws_region'],\n access_key_id=args['aws_access_key_id'],\n secret_access_key=args['aws_secret_access_key']\n )\n os.environ['AWS_ACCESS_KEY_ID'] = args['aws_access_key_id'].strip()\n os.environ['AWS_SECRET_ACCESS_KEY'] = args['aws_secret_access_key'].strip()\n logger.info('Check to see whether s3 bucket exits...')\n try:\n s3.meta.client.head_bucket(Bucket=args['bucket_name'])\n logger.info(f\"S3 bucket {args['bucket_name']} exits...\")\n except Exception as e:\n logger.warn(f\"Bucket {args['bucket_name']} doesn't exist...\")\n logger.info('Creating bucket...')\n create_s3_bucket(s3_client, args['bucket_name'], args['aws_region'])\n\n\n config = configparser.ConfigParser()\n if args['env'] == 'DOCKER':\n CFG_FILE = r'/usr/local/airflow/config/etl_config.cfg'\n try:\n config.read(CFG_FILE)\n except Exception as e:\n print('Configuration file is missing or cannot be read...')\n raise\n elif args['env'] == 'S3':\n obj = s3_client.get_object(Bucket=args['bucket_name'], Key='config/etl_config.cfg')\n try:\n config.read_string(obj['Body'].read().decode())\n except Exception as e:\n print('Configuration file is missing or cannot be read...')\n raise\n else:\n CFG_FILE = r'/Users/home/Documents/dend/Data-Engineering-ND/Capstone/config/etl_config.cfg'\n try:\n config.read(CFG_FILE)\n except Exception as e:\n print('Configuration file is missing or cannot be read...')\n raise\n\n sas_jar_ver = config['APP']['sas_jar_ver']\n os.environ['SAS_JAR'] = \".\".join(sas_jar_ver.split('.')[:-1])\n\n if args['env'] == 'DOCKER':\n base_dir = config['DOCKER']['base_dir']\n data_dir = config['DOCKER']['data_dir']\n path = config['DOCKER']['sas_data_dir']\n sas_file_path = os.path.join(base_dir, data_dir, path)\n dict_dir = config['DOCKER']['dict_dir']\n files = json.loads(config['DOCKER']['input_files'])\n airport_file = os.path.join(base_dir, data_dir, config['DOCKER']['airports_file'])\n demographic_file = os.path.join(base_dir, data_dir, config['DOCKER']['us_demographics_file'])\n dictionary_file = os.path.join(base_dir, dict_dir, config['DOCKER']['dictionary_file'])\n output_dir = os.path.join(base_dir, config['DOCKER']['output_dir'])\n log_dir = os.path.join(base_dir, config['LOCAL']['log_dir'])\n log_file = config['LOCAL']['log_file']\n elif args['env'] == 'S3':\n bucket = args['bucket_name']\n path = config['S3']['s3_sas_key']\n dict_dir = config['S3']['s3_dict_key']\n csv_dir = config['S3']['s3_csv_key']\n sas_file_path = os.path.join(\"s3a://\", bucket, csv_dir, path)\n files = json.loads(config['S3']['input_files'])\n airport_file = os.path.join(\"s3a://\", bucket, csv_dir, config['S3']['airports_file'])\n demographic_file = os.path.join(\"s3a://\", bucket, csv_dir, config['S3']['us_demographics_file'])\n dictionary_file = os.path.join(\"s3a://\", bucket, config['S3']['dictionary_file'])\n output_dir = os.path.join(\"s3a://\", bucket, config['S3']['output_dir'])\n else:\n base_dir = config['LOCAL']['base_dir']\n data_dir = config['LOCAL']['data_dir']\n path = config['LOCAL']['sas_data_dir']\n sas_file_path = os.path.join(base_dir, data_dir, path)\n dict_dir = config['LOCAL']['dict_dir']\n files = json.loads(config['LOCAL']['input_files'])\n airport_file = os.path.join(base_dir, data_dir, config['LOCAL']['airports_file'])\n demographic_file = os.path.join(base_dir, data_dir, config['LOCAL']['us_demographics_file'])\n dictionary_file = os.path.join(base_dir, dict_dir, config['LOCAL']['dictionary_file'])\n output_dir = os.path.join(base_dir, config['LOCAL']['output_dir'])\n log_dir = os.path.join(base_dir, config['LOCAL']['log_dir'])\n log_file = config['LOCAL']['log_file']\n \n try:\n # Log file written to Hadoop EMR env\n base_dir = config['HADOOP']['base_dir']\n log_dir = os.path.join(base_dir, config['HADOOP']['log_dir'])\n log_file = config['HADOOP']['log_file']\n pathlib.Path(log_dir).mkdir(exist_ok=True)\n file_handler = enable_logging(log_dir, log_file)\n logger.addHandler(file_handler)\n print(\"Create log dir if it doesn't exist...\")\n except:\n base_dir = config['LOCAL']['base_dir']\n log_dir = os.path.join(base_dir, config['LOCAL']['log_dir'])\n log_file = config['LOCAL']['log_file']\n pathlib.Path(log_dir).mkdir(exist_ok=True)\n file_handler = enable_logging(log_dir, log_file)\n logger.addHandler(file_handler)\n print(\"Create log dir if it doesn't exist...\")\n\n\n logger.info('ETL parsing has started...')\n logger.info(\"Create output dir if it doesn't exist...\")\n if args['env'] != 'S3':\n pathlib.Path(output_dir).mkdir(exist_ok=True)\n else:\n # config.set('S3', 's3_bucket_name', args['bucket_name'])\n # s3_client.put_object(Bucket=args['bucket_name'], Key=config['S3']['config_dir'], Body=)\n s3_client.put_object(Bucket=args['bucket_name'], Key=config['S3']['output_dir'])\n logger.info('Created S3 bucket...')\n \n spark = create_spark_session()\n logger.info('Pyspark session created...')\n logger.info('Register UDFs...')\n \n spark.udf.register('SASDateConverter', sas_date_converter, Date())\n logger.info('Register sas_date_converter UDF...')\n\n # change_date_format_1 = F.udf(lambda x: datetime.strptime(x.strip(), '%Y%m%d'), Date())\n # change_date_format_2 = F.udf(lambda x: datetime.strptime(x.strip(), '%m%d%Y'), Date())\n dt = F.udf(change_date_format, Date())\n\n logger.info('Read and concatenate the raw SAS files...')\n dfs = []\n for file in files:\n try:\n df = spark.read.format('com.github.saurfang.sas.spark')\\\n .load(os.path.join(sas_file_path, file))\n dfs.append(df)\n except Exception as e:\n logger.info(f'File {file} is not available. Skipping...')\n logger.info(f'Read {len(files)} files successfully...')\n df = []\n if len(dfs) > 0:\n df = concat_df(*dfs)\n logger.info(f'Successfully concatenated {len(files)}...')\n if not isinstance(df, list):\n # SAS raw data table creation begins here\n cols = ['cicid', 'i94yr', 'i94mon', 'i94port', 'i94mode', 'visapost', \n 'entdepa', 'entdepd', 'entdepu', 'matflag', \n 'dtadfile', 'dtaddto']\n parquet_tables = ['i94_immigrations', 'i94_trips', 'i94_visitors', 'i94_flights']\n f_transforms = [i94_immigrations, i94_trips, i94_visitors, i94_flights]\n res_df = None\n for table, f_transform in zip(parquet_tables, f_transforms):\n if table == 'i94_immigrations':\n # only table not using spark sql\n res_df = create_and_write_df(df, table, f_transform, \n output_dir,\n spark=None, cols=cols,\n udf=dt, fmt='parquet',\n is_partition=True,\n is_overwrite=True,\n crate_date_df=False)\n elif table == 'i94_flights':\n res_df = create_and_write_df(df, table, f_transform, \n output_dir,\n spark=spark, cols=None,\n udf=None, fmt='csv',\n is_partition=False,\n is_overwrite=True,\n crate_date_df=False)\n else:\n res_df = create_and_write_df(df, table, f_transform, \n output_dir,\n spark=spark, cols=None,\n udf=None, fmt='parquet',\n is_partition=True,\n is_overwrite=True,\n crate_date_df=False)\n\n if table == 'i94_trips':\n table = 'i94_dates'\n create_and_write_df(res_df, table, i94_dates, \n output_dir,\n spark=spark, cols=None,\n udf=None, fmt='parquet',\n is_partition=True,\n is_overwrite=True,\n crate_date_df=False)\n\n # Reference data for airports and us city demographics begins here\n airport_df = spark.createDataFrame([], R([]))\n demographic_df = spark.createDataFrame([], R([]))\n logger.info('Read the airports reference file...')\n try:\n airport_df = spark.read.option('header', True) \\\n .csv(airport_file)\n except Exception as e:\n logger.error(f'File {airport_file} is not available. Skipping...')\n\n logger.info('Read the US demographics reference file...')\n try:\n demographic_df = spark.read.options(header='True', delimiter=';') \\\n .csv(demographic_file) \n except Exception as e:\n logger.error(f'File {demographic_file} is not available. Skipping...')\n if airport_df.count() > 0 and demographic_df.count() > 0: \n csv_tables = ['i94_airports', 'i94_us_states_demographic', \n 'i94_us_cities_demographic']\n f_transforms = [i94_airports, i94_us_states_demographic, i94_us_cities_demographic]\n csv_dfs = [airport_df, demographic_df, demographic_df]\n for table, f_transform, df in zip(csv_tables, f_transforms, csv_dfs):\n res_df = create_and_write_df(df, table, f_transform, \n output_dir,\n spark=spark, cols=None,\n udf=dt, fmt='csv',\n is_partition=False,\n is_overwrite=True)\n\n # SAS reference data creation begins here\n ref_csv_tables = ['i94_countries', 'i94_port_state_mapping', 'i94_travel_mode', \n 'i94_state_mapping', 'i94_visa']\n table_pos_dict = {\n 'i94_countries': [2, 3, 'country', 'country_id'],\n 'i94_port_state_mapping': [3, 4, 'city', 'i94_port'],\n 'i94_travel_mode': [4, 5, 'mode', 'mode_id'],\n 'i94_state_mapping': [5, 6, 'state', 'state_id'],\n 'i94_visa': [6, 7, 'visa_purpose', 'visa_id']\n }\n logger.info('Read the SAS data dictionary reference file...') \n for table in ref_csv_tables:\n create_and_write_ref_df(dictionary_file, table, output_dir, spark, \n fmt='csv', start_pos=table_pos_dict[table][0], \n end_pos=table_pos_dict[table][1],\n col_name=table_pos_dict[table][2], \n index_name=table_pos_dict[table][3],\n is_partition=False,\n is_overwrite=True)\n\n logger.info('ETL parsing has completed...')\n logger.info('Time taken to complete job {} minutes'.format((time.time() - t0) / 60))", "def __init__(self, bucket, aws_profile=None, logger=None):\n self.bucket = bucket\n self.s3helper = S3Helper(aws_profile=aws_profile)\n self.print_func = print\n if logger:\n self.print_func = logger.info", "def get_s3_args(\n parser: Optional[argparse.ArgumentParser] = None,\n) -> argparse.ArgumentParser:\n\n if parser is None:\n parser = argparse.ArgumentParser()\n\n s3_parser = parser.add_argument_group(\"s3\")\n\n s3_parser.add_argument(\n \"--s3-bucket\",\n type=str,\n action=env_default(\"COMPSYN_S3_BUCKET\"),\n required=False,\n help=\"bucket where img data is stored in S3\",\n )\n s3_parser.add_argument(\n \"--s3-region-name\",\n type=str,\n required=False,\n action=env_default(\"COMPSYN_S3_REGION_NAME\"),\n help=\"S3 region\",\n )\n s3_parser.add_argument(\n \"--s3-endpoint-url\",\n action=env_default(\"COMPSYN_S3_ENDPOINT_URL\"),\n required=False,\n help=\"S3 endpoint URL (only required for non-AWS S3)\",\n )\n s3_parser.add_argument(\n \"--s3-access-key-id\",\n type=str,\n action=env_default(\"COMPSYN_S3_ACCESS_KEY_ID\"),\n required=False,\n )\n s3_parser.add_argument(\n \"--s3-secret-access-key\",\n type=str,\n action=env_default(\"COMPSYN_S3_SECRET_ACCESS_KEY\"),\n required=False,\n )\n\n return parser", "def __init__(\n self,\n service_name: str,\n account_id: str,\n region_name: Optional[str] = None,\n aws_creds: Optional[Dict[str, str]] = None,\n profile_name: Optional[str] = None,\n placebo: Optional[Any] = None,\n placebo_data_path: Optional[str] = None,\n placebo_mode: Optional[str] = \"record\",\n max_attempts: int = 20,\n config: Optional[Config] = None,\n max_attempts_on_client_error: int = 10,\n ):\n self._service_name = service_name\n self._region_name = region_name\n self._account_id = account_id\n self._max_attempts_on_client_error = max_attempts_on_client_error\n\n # Build a clojure in order to recreate boto3 client if needed\n\n def _create_client(service: str = None):\n return get_client(\n session=get_session(\n aws_creds=aws_creds,\n profile_name=profile_name,\n placebo=placebo,\n placebo_data_path=placebo_data_path,\n placebo_mode=placebo_mode,\n ),\n service_name=service if service else service_name,\n region_name=region_name,\n max_attempts=max_attempts,\n config=config,\n )\n\n # set client factory\n self.create_client = _create_client\n\n # Build boto3 client\n self._client = self.create_client()", "def _get_client(\n session: Optional[boto3.Session] = None, region: Optional[str] = None\n) -> S3Client:\n return session.client(\"s3\") if session else boto3.client(\"s3\", region_name=region)", "def createaws() -> my_aws_api_library.MyAws:\r\n aws_cred_file_path = os.environ['AWS_CRED_FILE']\r\n comp_pubkey = os.environ['COMPANY_PUBKEY']\r\n my_aws = my_aws_api_library.MyAws(aws_cred_file_path, comp_pubkey)\r\n return my_aws", "def get_conn():\n global S3Conn\n\n S3Conn = tinys3.Connection(plug.options['aws_access_key'],\n plug.options['aws_secret_key'],\n default_bucket=plug.options['bucket'], tls=True)\n # Check that the given bucket exists by doing a HEAD request\n try:\n S3Conn.head_bucket()\n except requests.HTTPError as httpe:\n err = u\"Cannot reach Onitu bucket {}\".format(plug.options['bucket'])\n if httpe.response.status_code == 404:\n err += u\": The bucket doesn't exist.\"\n if httpe.response.status_code == 403:\n err += u\": Invalid credentials.\"\n err += u\" Please check your Amazon S3 configuration - {}\".format(httpe)\n raise DriverError(err)\n plug.logger.debug(\"Connection with Amazon S3 account successful\")\n return S3Conn", "def boto_client(account_id, service_name, region):\n logger.info('Creating boto3 client for account_id: {}, '\n 'service_name: {}'.format(account_id, service_name))\n return boto3.client(service_name, region_name=region)", "def get_boto3_version() -> str:\n return boto3_version", "def __get_s3_client(self):\n if self.AWS_ACCESS_KEY:\n s3_client = boto3.client(\n \"s3\",\n aws_access_key_id=self.AWS_ACCESS_KEY,\n aws_secret_access_key=self.AWS_SECRET_ACCESS_KEY,\n )\n else:\n s3_client = boto3.client(\"s3\")\n return s3_client", "def _get_s3_object(self, s3_path):\n bucket_name, key = S3Util.get_bucket_and_key(s3_path)\n return self.s3_resource.Object(bucket_name, key)", "def __init__(self):\n self.s3_resource = boto3.resource('s3')\n self.s3_client = boto3.client('s3')", "def s3_client(self):\n return boto3.client('s3', \n aws_access_key_id=os.environ.get(\"MINIO_ACCESS_KEY\"),\n aws_secret_access_key=os.environ.get(\"MINIO_SECRET_KEY\"),\n endpoint_url=f'http://{os.environ.get(\"MINIO_SERVER\")}',\n config=Config(signature_version='s3v4')\n )", "def mock_s3_boto_returns() -> callable:\n\n def client(aws_res, aws_access_key_id=None, aws_secret_access_key=None):\n return BotoMockReturns()\n\n return client", "def get_boto3_client(\n *,\n aws_lambda_mode: bool,\n service_name: str,\n profile_name: str = 'kreodont',\n connect_timeout: float = 0.2,\n read_timeout: float = 0.4,\n) -> Optional[boto3.client]:\n known_services = ['translate', 'dynamodb', 's3']\n if service_name in global_cached_boto3_clients:\n print(f'{service_name} client taken from cache!')\n return global_cached_boto3_clients[service_name]\n\n if service_name not in known_services:\n raise Exception(\n f'Not known service '\n f'name {service_name}. The following '\n f'service names known: {\", \".join(known_services)}')\n\n if aws_lambda_mode:\n client = boto3.client(\n service_name,\n config=botocore.client.Config(\n connect_timeout=connect_timeout,\n read_timeout=read_timeout,\n parameter_validation=False,\n retries={'max_attempts': 0},\n ),\n )\n else:\n client = boto3.Session(profile_name=profile_name).client(service_name)\n return client\n\n # saving to cache to to spend time to create it next time\n global_cached_boto3_clients[service_name] = client\n return client", "def _get_buckets():\n\n return __opts__[\"s3.buckets\"] if \"s3.buckets\" in __opts__ else {}", "def __init__(self):\n self.aws = AWS()", "def get_elb(args=None, logger=None, stats=None):\n if not args:\n parser = get_parser()\n add_elb_cli_arguments(parser)\n args = parser.parse_args()\n if not logger:\n logger = get_logger(name=NAME)\n\n if not stats:\n stats = get_stats(prefix=NAME)\n\n boto = Boto3(\n log_level=args.boto_log_level,\n access_key=args.boto_access_key,\n secret_key=args.boto_secret_key,\n region=args.boto_region,\n logger=logger,\n stats=stats,\n )\n return ELB(\n boto=boto,\n logger=logger,\n stats=stats,\n )" ]
[ "0.767378", "0.62587637", "0.5730653", "0.5656308", "0.5650132", "0.5583414", "0.5518301", "0.5486912", "0.5486354", "0.5425678", "0.54201967", "0.541126", "0.5337484", "0.5296419", "0.5290054", "0.52891475", "0.52808595", "0.5232185", "0.52223915", "0.5181535", "0.5169401", "0.51678115", "0.513606", "0.5107055", "0.5100332", "0.5096866", "0.5081632", "0.50758284", "0.5073012", "0.50668836" ]
0.81879747
0
Extract plastic class label from Image Name and return it
def ExtractLabel(ImgName): # Each img has name notation "*****a0X*" where X is PlasticType PlasticType = ImgName[7] return { '1': 0, # PET '2': 1, # HDPE '4': 2, # LDPE '5': 3, # PP '6': 4, # PS '7': 5, # Other }[PlasticType]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name_to_label(self, name):\n\t\t\treturn self.classes[name]", "def name_to_label(self, name):\n\t\treturn self.classes[name]", "def get_imagenet_label(index):\n global _CLASS_INDEX\n if _CLASS_INDEX is None:\n with open(os.path.join(os.path.dirname(__file__), '../resources/imagenet_class_index.json')) as f:\n _CLASS_INDEX = json.load(f)\n return _CLASS_INDEX[str(index)][1]", "def get_label(image, model):\n x = Variable(image, volatile=True)\n label = model(x).data.max(1)[1].numpy()[0]\n # We have string labels for ImageNet\n if isinstance(model, torchvision.models.inception.Inception3):\n label_string = labels.get(label)\n return label_string\n return label", "def name_to_label(self, name):\n return self.classes[name]", "def classify_image(img_pil):\n results = tpu.ClassifyWithImage(img_pil, top_k=1)\n if len(results) == 0:\n return None, None\n i, score = results[0]\n label = labels[i]\n # print(label + \": \" + str(score))\n return label, score", "def ocr_core_names(img):\n text = pytesseract.image_to_string(\n img,\n lang='eng',\n config='--psm 7 --oem 3'\n )\n return text", "def get_classification(self, image):\n # Image pre-processing pipeline\n img = cv2.resize(image, None, fx=0.5, fy=0.5)\n img = img.astype(np.float32)\n img = keras.applications.vgg16.preprocess_input(img)\n # Execute prediction\n probs = self.model.predict(np.array([img]), batch_size=1, verbose=1)[0]\n # get label with max probability\n g_x = np.argmax(probs)\n\n # reject if model is not confident\n if probs[g_x] < CONFIDENCE_THRESHOLD:\n return TrafficLight.UNKNOWN\n\n label = self.predictionary[g_x]\n rospy.loginfo(\"label: %d, conf: %f, %f, %f, %f\", g_x, probs[0], probs[1], probs[2], probs[3])\n return label", "def get_label(client, label):\n image_name = get_image_name()\n image = client.images.get(image_name)\n try:\n return image.labels[label]\n except KeyError:\n raise Exception(f\"Image should have a label '{label}'\")", "def overlay_class_names(self, image, predictions):\n scores = predictions.get_field(\"scores\").tolist()\n labels = predictions.get_field(\"labels\").tolist()\n labels = [self.CATEGORIES[int(i)] for i in labels]\n boxes = predictions.bbox\n\n template = \"{}: {:.2f}\"\n for box, score, label in zip(boxes, scores, labels):\n x, y = box[:2]\n s = template.format(label, score)\n cv2.putText(\n image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255), 1\n )\n\n return image", "def _get_label(cls, file_name):\n if cls == \"neg\":\n return \"0\"\n else:\n return \"1\"\n # reg = _REGEX_\n # rmtch = reg.match(file_name)\n # if rmtch:\n # return rmtch.groupdict()[\"label\"]\n # else:\n # return \"unknown_positive\"", "def GetImageLabelFromImage(image, parent=None):\n pixmap = GetPixelMapFromImage(image)\n return GetImageLabelFromPixelMap(pixmap, parent=parent)", "def get_imagenet_classnames():\r\n return np.loadtxt(open(path_data+'/ilsvrc_2012_labels.txt'), dtype=object, delimiter='\\n')", "def show_class_name(img, pos, class_str, font_scale=0.35):\n\n img = img.astype(np.uint8)\n x0, y0 = int(pos[0]), int(pos[1])\n \n # Compute text size.\n txt = class_str\n font = cv2.FONT_HERSHEY_SIMPLEX\n ((txt_w, txt_h), _) = cv2.getTextSize(txt, font, font_scale, 1)\n \n # Place text background.\n back_tl = x0, y0 - int(1.3 * txt_h)\n back_br = x0 + txt_w, y0\n cv2.rectangle(img, back_tl, back_br, _GREEN, -1)\n \n # Show text.\n txt_tl = x0, y0 - int(0.3 * txt_h)\n cv2.putText(img, txt, txt_tl, font, font_scale, _GRAY, lineType=cv2.LINE_AA)\n return img", "def get_classname(self):\n return 'ImageFilm'", "def overlay_class_names(image, predictions):\n scores = predictions.get_field(\"scores\").tolist()\n labels = predictions.get_field(\"labels\").tolist()\n labels_text = [CATEGORIES[i] for i in labels]\n boxes = predictions.bbox\n\n img_h, img_w, _ = image.shape\n\n template = \"{} {}: {:.2f}\"\n abv_map = {\n 'annotation_image': 'am',\n 'annotation_text': 'at',\n 'event_image': 'em',\n 'event_text': 'et',\n 'main_body': 'mb'\n }\n for c_label in DRAW_ORDER:\n for b_id, (box, score, label, label_text) in enumerate(zip(boxes, scores, labels, labels_text)):\n if label != c_label:\n continue\n x, y = box[:2]\n color = CATEGORIES_COLOR[label]\n label_text = abv_map.get(label_text, label_text)\n s = template.format(b_id, label_text, score)\n fontScale = img_w / 2000.0\n (text_width, text_height) = cv2.getTextSize(s, cv2.FONT_HERSHEY_SIMPLEX, fontScale=fontScale, thickness=1)[0]\n box_coords = ((x, y), (x + text_width - 2, y - text_height - 2))\n cv2.rectangle(image, box_coords[0], box_coords[1], color, cv2.FILLED)\n cv2.putText(\n image, s, (x, y), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (255, 255, 255), 1\n )\n\n return image", "def get_label(img_path):\n img_name = img_path.stem\n label_name = img_name + \".txt\"\n label_path = img_path.parent / label_name\n with open(label_path) as f:\n label = json.load(f)\n return label", "def _get_label(self):\n if self.model.name == '':\n return \"KPI\"\n return \"KPI: {} ({})\".format(self.model.name, self.model.objective)", "def extract_labels(filename, num_images):\n gt_imgs = []\n for i in range(1, num_images+1):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n gt_imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(gt_imgs)\n gt_patches = [img_crop(gt_imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE, 0, False) for i in range(num_images)]\n data = numpy.asarray([gt_patches[i][j] for i in range(len(gt_patches)) for j in range(len(gt_patches[i]))])\n labels = numpy.asarray([value_to_class(numpy.mean(data[i])) for i in range(len(data))])\n\n # Convert to dense 1-hot representation.\n return labels.astype(numpy.float32)", "def _classify(self, example):\n neighbors = self.find_neighbor(example)\n class_label = self.find_response(neighbors)\n return class_label", "def label_to_class_name(label):\n try:\n genre_label = pd.read_csv(path.join(DATA_PATH, 'genre_labels.csv'))\n return genre_label[genre_label['label'] == int(label)]['genre'].values[\n 0]\n except IOError:\n return label", "def get_label(repo, title, verbose=None):\n if verbose:\n print \"Checking for label...\"\n label = None\n label_text = None\n try:\n label_start = 1 + title.index('(')\n label_end = title.index(')')\n label_text = title[label_start:label_end]\n except ValueError, e:\n print \"Warning: This tile has no embeded label. {0}\".format(e)\n if label_text:\n try:\n label = [repo.get_label(label_text)]\n if verbose:\n print \"Found label: {0}\".format(label)\n except UnknownObjectException, e:\n print \"Error: The label '{0}' does not exist on \" \\\n \"Github. {1}\".format(label_text, e)\n return label", "def _parse_classification(self, item):\n full_name = item.css('td[headers=Name]::text').extract_first()\n\n if \"Metra\" in full_name and \"Board Meeting\" in full_name:\n return BOARD\n elif \"Citizens Advisory\" in full_name:\n return ADVISORY_COMMITTEE\n elif \"Committee Meeting\" in full_name:\n return COMMITTEE\n else:\n return NOT_CLASSIFIED", "def get_label(self, name):\n label_list = self.wls_board.get_labels()\n for label in label_list:\n if name in label.name: \n return label", "def getImageLabels(bucket, key):\n client = boto3.client('rekognition')\n resp = client.detect_labels(\n Image={\n 'S3Object': {\n 'Bucket': bucket,\n 'Name': key\n }\n }\n )\n\n output = []\n # I'm assuming that we only need the name labels to return to the customer. \n for label in resp['Labels']:\n output.append(label['Name'])\n return output", "def getImageName(self):\r\n return self.imageName", "def get_classification(self, image):\n # Run inference on image\n prediction = None\n prediction = inferOnImage(self.sess, self.model_logits, self.X, image)\n\n # Convert number into label just for debug\n prediction_label = None\n if prediction[0] == 0:\n prediction_label = \"RED\"\n elif prediction[0] == 1:\n prediction_label = \"GREEN\"\n elif prediction[0] == 2:\n prediction_label = \"NOLIGHT\"\n\n # Log the message\n rospy.loginfo(\"The label returned is %s\", prediction_label)\n\n # Return Unknown for now\n return TrafficLight.UNKNOWN", "def extract_labels(filename, num_images, starting_id, context_factor):\n gt_imgs = []\n for i in range(starting_id, num_images+starting_id):\n imageid = \"satImage_%.3d\" % i\n image_filename = filename + imageid + \".png\"\n if os.path.isfile(image_filename):\n print ('Loading ' + image_filename)\n img = mpimg.imread(image_filename)\n gt_imgs.append(img)\n else:\n print ('File ' + image_filename + ' does not exist')\n\n num_images = len(gt_imgs)\n # it means that we base our labels only on the core of the patch, not including the contet added\n context_factor = 0\n gt_patches = [img_crop_context(gt_imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE,context_factor) for i in range(num_images)]\n data = np.asarray([gt_patches[i][j] for i in range(len(gt_patches)) for j in range(len(gt_patches[i]))])\n labels = np.asarray([value_to_class(np.mean(data[i])) for i in range(len(data))])\n\n # Convert to dense 1-hot representation.\n return labels.astype(np.float32)", "def _get_img_label(self, path):\n food_items = self.annotations[path]\n tomato_items = [\n item for item in food_items\n if item['id'] in self.tomato_label_ids\n ]\n return 1 if len(tomato_items) > 0 else 0", "def get_label_name(label):\n\tindex = np.argmax(label)\n\tlabels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n\treturn labels[int(index)]" ]
[ "0.6750595", "0.67307", "0.6638844", "0.6555292", "0.6546279", "0.65367347", "0.64170223", "0.6413202", "0.6403757", "0.6369213", "0.63066596", "0.6250891", "0.6241231", "0.6123709", "0.6118442", "0.61034334", "0.60980034", "0.6039952", "0.6016841", "0.60058093", "0.5924414", "0.59056926", "0.5864966", "0.5863928", "0.5861977", "0.5861145", "0.5860533", "0.58571535", "0.58541787", "0.5839833" ]
0.7817808
0
Support the following DHCP DeviceManager calls. self.plugin.release_dhcp_port(network.id, self.get_device_id(network))
def release_dhcp_port(self, network_id, device_id): LOG.debug("release_dhcp_port: %s %s", network_id, device_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def release_dhcp_port(self, network_id, device_id):\n return self.call(self.context,\n self.make_msg('release_dhcp_port',\n network_id=network_id,\n device_id=device_id,\n host=self.host),\n topic=self.topic)", "def destroy(self, network, device_name):\n if self.conf.use_namespaces:\n namespace = NS_PREFIX + network.id\n else:\n namespace = None\n\n self.driver.unplug(device_name, namespace=namespace)\n\n self.plugin.release_dhcp_port(network.id,\n self.get_device_id(network))", "def unplug_port_from_network(self, device_id, device_owner, hostname,\n port_id, network_id, tenant_id, sg, vnic_type,\n switch_bindings=None, segments=None):", "def dhcp_release(ifname):\n\n logging.debug('Releasing %s...', ifname)\n\n try:\n subprocess.call(['dhcpcd', '--release', ifname])\n except OSError, err:\n if err.errno != errno.ENOENT:\n raise err\n\n try:\n call_timeout(['dhclient', '-1', '-r', ifname], timeout=5)\n except OSError, err:\n if err.errno == errno.ENOENT:\n logging.critical('Neither dhcpcd nor dhclient were found.')\n else:\n raise err", "def network_delete_end(self, payload):\n self.disable_dhcp_helper(payload['network_id'])", "def create_dhcp_port(self, port):\n LOG.debug(\"create_dhcp_port: %s\", port)\n port['port']['id'] = port['port']['network_id']\n\n # The following MAC address will be assigned to the Linux dummy\n # interface that\n # networking_calico.agent.linux.interface.RoutedInterfaceDriver\n # creates. Therefore it will never actually be used or involved in the\n # sending or receiving of any real data. Hence it should not matter\n # that we use a hardcoded value here, and the same value on every\n # networking-calico compute host. The '2' bit of the first byte means\n # 'locally administered', which makes sense for a hardcoded value like\n # this and distinguishes it from the space of managed MAC addresses.\n port['port']['mac_address'] = '02:00:00:00:00:00'\n port['port']['device_owner'] = constants.DEVICE_OWNER_DHCP\n return dhcp.DictModel(port['port'])", "def get_dhcp_port(self, network_id, device_id):\n return DictModel(self.call(self.context,\n self.make_msg('get_dhcp_port',\n network_id=network_id,\n device_id=device_id,\n host=self.host),\n topic=self.topic))", "def network_create_end(self, payload):\n network_id = payload['network']['id']\n self.enable_dhcp_helper(network_id)", "def release_port_fixed_ip(self, network_id, device_id, subnet_id):\n return self.call(self.context,\n self.make_msg('release_port_fixed_ip',\n network_id=network_id,\n subnet_id=subnet_id,\n device_id=device_id,\n host=self.host),\n topic=self.topic)", "def setup_dhcp_config(self, board_config):\n raise NotImplementedError", "def disable_dhcp_helper(self, network_id):\n network = self.cache.get_network_by_id(network_id)\n if network:\n if self.call_driver('disable', network):\n self.cache.remove(network)", "def dhcp(self, dhcp):\n\n self._dhcp = dhcp", "def adb_down(self, port):\n self.adb_transport = None\n self.check_adb([\"disconnect\", \"localhost:%d\" % port])\n\n # Wait until QEMU's forward has expired\n CONNECT_MAX_TRIES = 15\n connect_tries = 0\n while True:\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((\"localhost\", port))\n sock.close()\n connect_tries += 1\n if connect_tries >= CONNECT_MAX_TRIES:\n raise Timeout(\"Wait for port forward to go away\",\n CONNECT_MAX_TRIES)\n time.sleep(1)\n except IOError:\n break", "def port_nic_remove(switch, port):\n client.port.detach_nic(switch, port)", "def dhcp_free(self, dhcp_free):\n\n self._dhcp_free = dhcp_free", "def delete_port_postcommit(self, mech_context):\n\n LOG.debug(\"delete_port_postcommit: called\")\n port = mech_context.current\n port_id = port['id']\n network_id = port['network_id']\n tenant_id = port['tenant_id']\n host_id = mech_context._binding.host\n context = mech_context._plugin_context\n\n try:\n network = seamicro_db.get_network(context, network_id)\n except Exception:\n LOG.exception(\n _LE(\"SeaMicro Mechanism: failed to get network %s from db\"),\n network_id)\n raise Exception(\n _(\"SeaMicro Mechanism: failed to get network %s from db\"),\n network_id)\n\n vlan_id = network['vlan']\n\n switch_ip, server_id, nics = _get_switch_info(self._switch, host_id)\n if switch_ip is not None and server_id is not None and nics is not None:\n try:\n interfaces = self.client[switch_ip].interfaces.list()\n for interface in interfaces:\n interface.remove_tagged_vlan(vlan_id)\n\n server = self.client[switch_ip].servers.get(server_id)\n if nics:\n server.unset_tagged_vlan(vlan_id, nics=nics)\n else:\n server.unset_tagged_vlan(vlan_id)\n except seamicro_client_exception.ClientException as ex:\n LOG.exception(\n _LE(\"SeaMicro driver: failed to delete port\"\n \" with the following error: %(error)s\"),\n {'error': ex.message})\n raise Exception(\n _(\"SeaMicro Mechanism: delete_port_postcommit failed\"))\n\n LOG.info(\n _LI(\"delete port (postcommit): port_id=%(port_id)s\"\n \" network_id=%(network_id)s tenant_id=%(tenant_id)s\"\n \" switch_ip=%(switch_ip)s server_id=%(server_id)s\"),\n {'port_id': port_id,\n 'network_id': network_id, 'tenant_id': tenant_id,\n 'switch_ip': switch_ip, 'server_id': server_id})", "def dhcp_agent_network_remove(self, dhcp_net_info):\n self.turn_on_dhcp_check()", "def plug_port_into_network(self, device_id, host_id, port_id,\n net_id, tenant_id, port_name, device_owner,\n sg, orig_sg, vnic_type, segments=None,\n switch_bindings=None, vlan_type=None):", "def _unplug_interface(self, context, tenant_id, net_id, port_id):\n LOG.debug(_(\"QuantumRestProxyV2: _unplug_interface() called\"))\n\n # delete from network ctrl. Remote error on delete is ignored\n try:\n resource = ATTACHMENT_PATH % (tenant_id, net_id, port_id)\n ret = self.servers.delete(resource)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2: Unable to update remote port: \"\n \"%s\"), e.message)", "def disconnect_port(self, iface):\n self.iface_config(iface, adminMode='Down')", "def network_update_end(self, payload):\n network_id = payload['network']['id']\n if payload['network']['admin_state_up']:\n self.enable_dhcp_helper(network_id)\n else:\n self.disable_dhcp_helper(network_id)", "def dhcp_callback(self, state, target_mac=None, target_ip=None, exception=None):\n self.record_result('dhcp', info=target_mac, ip=target_ip, state=state, exception=exception)\n self.target_mac = target_mac\n self.target_ip = target_ip\n if exception:\n self._state_transition(_STATE.ERROR, _STATE.DHCP)\n self.runner.target_set_error(self.port_set, exception)\n else:\n self._state_transition(_STATE.BASE, _STATE.DHCP)", "def handle_link_down (self, port):\n for dest in self.hosts.keys():\n currPort = self.hosts[dest][0]\n if currPort == port:\n del self.hosts[dest]\n \n deleteDests = set()\n for dest in self.routesToDest:\n currPort = self.routesToDest[dest][0]\n \n if currPort == port:\n\n if dest in self.hosts:\n self.routesToDest[dest] = self.hosts[dest]\n packet = basics.RoutePacket(dest, self.routesToDest[dest][1])\n self.send(packet, self.routesToDest[dest][0], True)\n else:\n self.sendPoison(dest)\n deleteDests.add(dest)\n\n\n for dest in deleteDests:\n del self.routesToDest[dest]\n\n del self.neighbours[port]", "def _close(self):\n \n # Close device\n logger.debug(\"%s: UDP port closing started...\" % \\\n self.__class__.__name__)\n self._router = None\n self._platform = None\n self._udp_socket.close()\n logger.debug(\"%s: ...UDP port closing complete.\" % \\\n self.__class__.__name__)", "def exit(self):\n if self._dbus_iface is None:\n raise Exception('Method invoked on non existing D-Bus interface')\n self._dbus_iface.Release(reply_handler = self._exitUnlock, error_handler = self._exitUnlock) # Call Exit() but ignore whether it gets acknowledged or not... this is because slave process may terminate before even acknowledge\n self._exit_unlock_event.wait(timeout = 5) # Give 5s for slave to acknowledge the Exit() D-Bus method call... otherwise, ignore and continue\n # Once we have instructed the slave to send a Release, we can stop our own D-Bus loop (we won't communicate with the slave anymore)\n # Stop the dbus loop\n if not self._dbus_loop is None:\n self._dbus_loop.quit()\n \n self._dbus_loop = None\n \n logger.debug('Sending Exit() to remote DHCP client')\n self._exit_unlock_event.clear()", "def setup_dhcp_env(device):\n raise NotImplementedError", "def unplug(self, bridge):\n ovsdb = self.bridge.ovsdb\n with ovsdb.transaction() as txn:\n txn.add(ovsdb.del_port(self.patch_port_int_name,\n bridge.br_name))", "def create_port(self, context, port):\n LOG.debug(_(\"NeutronRestProxyV2: create_port() called\"))\n\n # Update DB in new session so exceptions rollback changes\n with context.session.begin(subtransactions=True):\n port[\"port\"][\"admin_state_up\"] = False\n dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])\n new_port = super(NeutronRestProxyV2, self).create_port(context,\n port)\n if (portbindings.HOST_ID in port['port']\n and 'id' in new_port):\n host_id = port['port'][portbindings.HOST_ID]\n porttracker_db.put_port_hostid(context, new_port['id'],\n host_id)\n self._process_port_create_extra_dhcp_opts(context, new_port,\n dhcp_opts)\n new_port = self._extend_port_dict_binding(context, new_port)\n net = super(NeutronRestProxyV2,\n self).get_network(context, new_port[\"network_id\"])\n\n if self.add_meta_server_route:\n if new_port['device_owner'] == 'network:dhcp':\n destination = METADATA_SERVER_IP + '/32'\n self._add_host_route(context, destination, new_port)\n\n # create on network ctrl\n mapped_port = self._map_state_and_status(new_port)\n self.servers.rest_create_port(net, mapped_port)\n\n # connect device to network, if present\n device_id = port[\"port\"].get(\"device_id\")\n if device_id:\n try:\n self.servers.rest_plug_interface(net[\"tenant_id\"], net[\"id\"],\n new_port, device_id)\n except RemoteRestError:\n with excutils.save_and_reraise_exception():\n port_update = {\"port\": {\"status\": \"ERROR\"}}\n super(NeutronRestProxyV2, self).update_port(\n context,\n new_port[\"id\"],\n port_update\n )\n # Set port state up and return that port\n port_update = {\"port\": {\"admin_state_up\": True}}\n new_port = super(NeutronRestProxyV2, self).update_port(context,\n new_port[\"id\"],\n port_update)\n return self._extend_port_dict_binding(context, new_port)", "def _RunDHCPCD(self, **kwargs):\n del kwargs\n clear_ifconfig_command = 'ifconfig {interface} 0.0.0.0'.format(\n interface=self.interface)\n # -K: Don't receive link messages for carrier status. You should\n # only have to use this with buggy device drivers or running\n # dhcpcd through a network manager.\n # -c: Location to the hooks file. If the default location happens to be\n # empty, dhcpcd will fail. So we set the hooks file to /dev/null.\n dhcp_command = ('dhcpcd -K -t {timeout} -c /dev/null {interface}').format(\n timeout=self._dhcp_timeout,\n interface=self.interface)\n dhcp_timeout_command = 'timeout {timeout} {cmd}'.format(\n timeout=self._dhcp_timeout,\n cmd=dhcp_command)\n force_kill_command = 'pgrep dhcpcd | xargs -r kill -9'\n\n logging.info('Killing any existing dhcpcd processes...')\n self._device.Call(force_kill_command)\n\n logging.info('Clearing any existing ifconfig networks...')\n self._device.Call(clear_ifconfig_command)\n\n logging.info('Starting dhcpcd...')\n self._device.CheckCall(dhcp_timeout_command)\n\n logging.info('Verifying IP address...')\n ip = self._LeasedIP()\n if not ip:\n self._device.Call(force_kill_command)\n raise WiFiError('DHCP bind failed')\n logging.info('Success: bound to IP %s', ip)\n\n yield ip # We have bound an IP; yield back to the caller.\n\n logging.info('Killing any remaining dhcpcd processes...')\n self._device.Call(force_kill_command)\n\n yield # We have released the IP.", "def renew_dhcp_lease(self):\n\t\tresponse = os.system(\"/sbin/dhclient -r;/sbin/dhclient\")\n\t\tif response != 0:\n\t\t\tprint \"Network restart failed. DHCP Lease failed.\"" ]
[ "0.8054723", "0.71377176", "0.6566923", "0.6155076", "0.5985578", "0.59473217", "0.58076227", "0.5672177", "0.5659102", "0.5577378", "0.5557", "0.55073285", "0.5491355", "0.54843926", "0.5454663", "0.5447936", "0.5415151", "0.54091704", "0.5385533", "0.53768826", "0.53333277", "0.5326683", "0.5321701", "0.5321672", "0.5281091", "0.5274553", "0.5266224", "0.5262475", "0.52518713", "0.5204779" ]
0.84403765
0
Construct and return an empty network model.
def empty_network(network_id=NETWORK_ID): return make_net_model({"id": network_id, "subnets": [], "ports": [], "tenant_id": "calico", "mtu": neutron_constants.DEFAULT_NETWORK_MTU})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y):\n return x\n\n net = Net()\n model_simple = Model(net)\n\n return model_simple", "def create_network(self):\n\n print ('Creating network, changing data will have no effect beyond this point.')\n n = IMNN.IMNN(parameters=self.parameters)\n\n if self.load_network:\n n.restore_network()\n else:\n n.setup(network = self.network, load_data = self.data)\n\n return n", "def _make_network(self):\n inp = Input(shape = (self.input_dim,))\n x = Dense(256, activation='relu')(inp)\n x = GaussianNoise(1.0)(x)\n #x = Flatten()(x) # I assume this is if the input is a convolutional neural net?\n x = Dense(128, activation='relu')(x)\n x = GaussianNoise(1.0)(x)\n out = Dense(self.output_dim, activation='tanh', kernel_initializer=RandomUniform())(x)\n out = Lambda(lambda i: i * self.act_range)(out)\n return Model(inp, out)", "def new_network():\n new_names = Names()\n new_devices = Devices(new_names)\n return Network(new_names, new_devices)", "def createModel(self):\n outputs, inputs = baseUNet(self.input_shape,\n self.conv_depth,\n self.n_classes,\n self.init_w,\n self.dropout)\n \n if self.regression == True:\n outputs = Lambda(getPropOfGround)(outputs)\n \n model = Model(inputs = inputs,outputs = outputs)\n \n model.compile(optimizer = self.optimizer,\n loss=self.loss_function,\n metrics=self.metrics)\n\n if self.old_weights != None:\n model.set_weights(self.old_weights)\n self.model = model", "def make_model(self):\n onnx_graph = onnx.helper.make_graph(\n self._nodes, self._name, self._inputs, self._outputs, self._initializers\n )\n kwargs = {}\n kwargs[\"opset_imports\"] = self._get_opsets()\n kwargs[\"producer_name\"] = \"TVM Relay\"\n kwargs[\"producer_version\"] = tvm.__version__\n\n return onnx.helper.make_model(onnx_graph, **kwargs)", "def _create_nn(self):\n with tf.name_scope('policy_network'):\n with tf.variable_scope(\"policy_network\"):\n model = tf.keras.Sequential(name='policy_network_model')\n model.add(tf.keras.layers.Dense(self.neurons_in_each_layer[0], activation=tf.nn.relu,\n input_shape=(1, self.neurons_in_each_layer[0])))\n for num_neurons in self.neurons_in_each_layer[1:-1]:\n model.add(tf.keras.layers.Dense(num_neurons, activation=tf.nn.relu))\n model.add(tf.keras.layers.Dense(self.neurons_in_each_layer[-1], name='policy_output_layer'))\n\n return model", "def build_model(self):\n if self.args.network_type == 'unet':\n self.shared = models.Unet(self.args)\n else:\n raise NotImplementedError(f'Network type '\n f'`{self.args.network_type}` is not '\n f'defined')\n self.controller = models.Controller(self.args)\n\n if self.args.num_gpu == 1:\n self.shared.cuda()\n self.controller.cuda()\n elif self.args.num_gpu > 1:\n raise NotImplementedError('`num_gpu > 1` is in progress')", "def create_nn(self):\n\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(32, input_dim=self.state_size, activation='relu'))\n\t\tmodel.add(Dense(32, activation='relu'))\n\t\tmodel.add(Dense(64, activation='relu'))\n\t\tmodel.add(Dense(self.action_size, activation='linear'))\n\t\tmodel.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))\n\t\treturn model", "def init_model(self) -> keras.Model:\n model_input = keras.Input(shape=(self.num_classes, self.nun_models))\n\n layer_out = Conv1D(64, kernel_size=self.num_classes, activation=\"sigmoid\")(\n model_input\n )\n layer_out = Dropout(0.2)(layer_out)\n\n layer_out = Dense(128)(layer_out)\n layer_out = Dropout(0.2)(layer_out)\n\n layer_out = Flatten()(layer_out)\n\n layer_out = Dense(128)(layer_out)\n layer_out = Dropout(0.2)(layer_out)\n output = Dense(self.num_classes, activation=\"softmax\")(layer_out)\n\n return keras.Model(inputs=model_input, outputs=output)", "def initModel(self):\n input_shape = (self.params[\"nb_features\"],)\n x = input_tensor = Input(input_shape)\n x = Dense(self.params[\"nb_neurons\"], activation=\"relu\")(x)\n for i in range(2, self.params[\"nb_layers\"] + 1):\n x = Dense(self.params[\"nb_neurons\"], activation=\"relu\")(x)\n if self.params[\"dropout\"]:\n x = Dropout(self.params[\"dropout\"])(x)\n x = output_tensor = Dense(4)(x)\n model = Model(input_tensor, output_tensor)\n return model", "def create_network(layers):\r\n return NeuronNetwork(layers)", "def create_model() -> Model:\n # Create a neural network model that includes several dense layers with hyperbolic tangent activations, L2 regularization, and batch normalization\n regularizer = l2(0)\n dropout = 0\n activation = 'tanh'\n model = Sequential([\n InputLayer(input_shape=(16,)),\n BatchNormalization(),\n Dense(12, activation=activation, kernel_regularizer=regularizer),\n Dropout(dropout),\n Dense(8, activation=activation, kernel_regularizer=regularizer),\n Dropout(dropout),\n Dense(1, kernel_regularizer=regularizer)\n ])\n # Output a summary of the model's architecture\n print(model.summary())\n # Use a mean squared error loss function and an Adam optimizer; do not print accuracy because this is a regression task\n model.compile(\n optimizer='adam',\n loss='mse',\n metrics=['mae']\n )\n # Return the untrained model\n return model", "def _build_network(self):\n pass", "def initialise_network(self):\n raise NotImplementedError", "def build_empty_graph(input_dim, output_dim, num_intermediate):\n from .models import DAG\n num_emit, num_rec = num_intermediate + input_dim, num_intermediate + output_dim\n activations = torch.zeros(num_rec, dtype=torch.long)\n connections = torch.zeros(num_rec, num_emit, dtype=torch.long)\n\n return DAG(input_dim, output_dim, num_intermediate, connections, activations, check_valid=True)", "def empty_instance():\n from weighted_graph import Graph\n return Graph()", "def create_network(self):\n from dallinger.networks import Star\n\n return Star(max_size=2)", "def make_model():\n # create the base pre-trained model\n base_model = efn.EfficientNetB0(input_shape=(img_width, img_height, 3), include_top=False)\n # add a global spatial average pooling layer\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n predictions = Dense(num_classes, activation=\"softmax\")(x)\n model = Model(inputs=base_model.input, outputs=predictions)\n\n model.compile(optimizer=\"adam\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n\n return base_model, model", "def build_model(self):\n cfg = self.cfg\n\n print('Building model')\n self.model = SimpleNet(cfg, cfg.MODEL, 0, **cfg.MODEL.BACKBONE.PARAMS)\n self.model.to(self.device)\n print('# params: {:,}'.format(count_num_param(self.model)))\n self.optim = build_optimizer(self.model, cfg.OPTIM)\n self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)\n self.register_model('model', self.model, self.optim, self.sched)\n\n fdim = self.model.fdim\n self.classifier = nn.Linear(fdim, self.num_classes)\n print('# params: {:,}'.format(count_num_param(self.classifier)))\n self.classifier.to(self.device)\n self.optim_classifier = build_optimizer(self.classifier, cfg.OPTIM)\n self.sched_classifier = build_lr_scheduler(self.optim_classifier, cfg.OPTIM)\n self.register_model('classifier', self.classifier, self.optim_classifier, self.sched_classifier)", "def new(self):\n self.define_layers()\n self.model = nn.Sequential(*self.layers)\n self.model.cuda()\n self.model = orthogonal_init(self.model)\n\n # Re-count N\n self.count_params()", "def create_network(model_file=DEFAULT_MODEL_FILE, pretrained=DEFAULT_PRETRAINED, *args, **kwargs):\n net = imagenet_classifier(*args,**kwargs)\n net.set_phase_test()\n net.set_mode_cpu()\n return net", "def create_neural_network():\n network_input = keras.layers.Input((NETWORK_INPUT_SIZE,))\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_input)\n network_layer = keras.layers.Dense(100, kernel_initializer='random_uniform', activation='tanh')(network_layer)\n network_output = keras.layers.Dense(NETWORK_OUTPUT_SIZE, kernel_initializer='random_uniform', activation='linear')(network_layer)\n network = keras.models.Model(inputs=network_input, outputs=network_output)\n network.compile(loss=\"mse\", optimizer=\"Adam\")\n return network", "def create_model(project_parameters):\n model = Net(project_parameters=project_parameters)\n if project_parameters.checkpoint_path is not None:\n model = load_checkpoint(model=model, num_classes=project_parameters.num_classes,\n use_cuda=project_parameters.use_cuda, checkpoint_path=project_parameters.checkpoint_path)\n return model", "def create_model(self):\n # Create the generator and discriminators\n self.generator_lungs = self.generator_model()\n self.generator_organs = self.generator_model()\n\n self.disc_lungs = self.discriminator_model_lungs()\n self.disc_organs = self.discriminator_model_organs()\n\n # Initialize the optimizer and backend\n self.generator_optimizer = tf.keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)\n self.discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)\n self.set_backend = tf.keras.backend.set_floatx('float32')\n\n # Create the summary writer\n self.create_summary_writer()\n print('Models are created.')\n return self", "def create_simple_model():\n\n input_shape = (160, 320, 3)\n \n m = Sequential()\n\n # 1. Add Normalization\n m.add(Lambda(lambda x: x/255.0 - 0.5,\n input_shape=input_shape,\n ))\n\n # 2. Flatten + 1 fully connected layer\n m.add(Flatten())\n m.add(Dense(10, activation='relu', init=my_init))\n \n # 3. Output Layer is a Dense layer with no activation function\n m.add(Dense(1))\n \n return m", "def create_nueral_network(X, y, epochs=8):\n model = Sequential()\n model.add(layers.Dense(500, input_dim=X.shape[1]))\n model.add(layers.Dropout(.2))\n model.add(layers.Dense(128, activation='relu'))\n model.add(layers.Dropout(.2))\n model.add(layers.Dense(64, activation='relu'))\n model.add(layers.Dropout(.2))\n model.add(layers.Dense(32, activation='relu'))\n model.add(layers.Dense(5,activation='softmax'))\n\n model.compile(optimizer='adam', loss='categorical_crossentropy',metrics=['accuracy'])\n print(model.summary())\n model.fit(X, y, epochs=epochs, batch_size=500)\n return model", "def init_model(self):\n model = Sequential()\n model.add(Dense(units=24, input_dim=self.input_shape[0],\n activation='relu'))\n model.add(Dense(units=24, activation='relu'))\n # We want rewards instead of probability, so use linear here\n model.add(Dense(units=self.output_num, activation='linear'))\n model.compile(loss='mse', optimizer=Adam(lr=self.eta))\n return model", "def create_base_network(NumberOfFeatures, NumberOfClasses,init_mode='glorot_normal'):\n network = Sequential()\n network.add(Dense(44, activation='sigmoid', kernel_initializer=init_mode,input_dim=NumberOfFeatures))\n# network.add(Dense(22, activation='sigmoid',kernel_initializer=init_mode))\n network.add(Dense(NumberOfClasses, activation='softmax',kernel_initializer=init_mode))\n return network", "def create_model():\n\n # Create a sequential model (a simple NN is created) adding a softmax activation at the end with 10 units:\n model = Sequential()\n model.add(Dense(units=128, activation=\"relu\", input_shape=(784,)))\n model.add(Dense(units=128, activation=\"relu\"))\n model.add(Dense(units=128, activation=\"relu\"))\n model.add(Dense(units=10, activation=\"softmax\"))\n\n # Compile the model using the loss function \"categorical_crossentropy\" and Stocastic Gradient Descent optimizer:\n model.compile(optimizer=SGD(0.001), loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n\n # Return the created model\n return model" ]
[ "0.703458", "0.6730677", "0.6650045", "0.66165984", "0.65974486", "0.65902346", "0.6558993", "0.645672", "0.6456341", "0.6397473", "0.6397053", "0.63881767", "0.6345971", "0.6332582", "0.63210064", "0.62721264", "0.62575936", "0.6252461", "0.62491995", "0.6246717", "0.62378824", "0.62359154", "0.6229708", "0.62017465", "0.62000144", "0.6160766", "0.6146506", "0.6130534", "0.6127554", "0.61217946" ]
0.749267
0
Ensure that the cache has a NetModel and subnets for PORT.
def _ensure_net_and_subnets(self, port): # Gather the subnet IDs that we need for this port, and get the # NetModel if we already have it in the cache. needed_subnet_ids = set() net = None for fixed_ip in port['fixed_ips']: subnet_id = fixed_ip.get('subnet_id') if subnet_id: needed_subnet_ids.add(subnet_id) if not net: net = self.agent.cache.get_network_by_subnet_id(subnet_id) LOG.debug("Needed subnet IDs: %s", needed_subnet_ids) LOG.debug("Existing network model by subnet ID: %s", net) # For each subnet that we need, get its data from SubnetWatcher and # hold for adding into the cache. new_subnets = {} for subnet_id in needed_subnet_ids: # Get data for this subnet from the SubnetWatchers. subnet = (self.subnet_watcher.get_subnet(subnet_id) or self.v1_subnet_watcher.get_subnet(subnet_id)) if subnet is None: LOG.warning("No data for subnet %s", subnet_id) raise SubnetIDNotFound() new_subnets[subnet_id] = subnet if not net: # We don't already have a NetModel, so look for a cached NetModel # with the right network ID. (In this case we must have new # subnets to add into the cache, and the cached NetModel must have # subnets other than the ones that we're adding in this iteration; # otherwise we would have already found it when searching by # subnet_id above.) assert new_subnets network_id = list(new_subnets.values())[0]['network_id'] net = self.agent.cache.get_network_by_id(network_id) LOG.debug("Existing network model by network ID: %s", net) if not net: # We still have no NetModel for the relevant network ID, so create # a new one. In this case we _must_ be adding new subnets. assert new_subnets net = empty_network(network_id) LOG.debug("New network %s", net) elif new_subnets: # We have a NetModel that was already in the cache and are about to # modify it. Cache replacement only works if the new NetModel is a # distinct object from the existing one, so make a copy here. net = copy_network(net) LOG.debug("Copied network %s", net) if new_subnets: # Add the new subnets into the NetModel. assert net net.subnets = [s for s in net.subnets if s.id not in new_subnets] net.subnets += list(new_subnets.values()) # Add (or update) the NetModel in the cache. LOG.debug("Net: %s", net) _fix_network_cache_port_lookup(self.agent, net.id) self.agent.cache.put(net) return net.id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fix_network_cache_port_lookup(agent, network_id):\n\n # If there is an existing NetModel for this network ID, ensure that all\n # its ports are in the port_lookup dict.\n if network_id in agent.cache.cache:\n for port in agent.cache.cache[network_id].ports:\n agent.cache.port_lookup[port.id] = network_id", "def test_get_valid_networks_for_virtualization_realm(self):\n pass", "def test_port_update_with_fixed_ips_ok_if_no_binding_host(self):\n with self.network() as network:\n segment = self._test_create_segment(\n network_id=network['network']['id'],\n physical_network='physnet',\n network_type=constants.TYPE_VLAN)\n\n # Create a port with no IP address (since there is no subnet)\n port = self._create_deferred_ip_port(network)\n\n # Create the subnet and try to update the port to get an IP\n with self.subnet(network=network,\n segment_id=segment['segment']['id']) as subnet:\n # Try requesting an IP (but the only subnet is on a segment)\n data = {'port': {\n 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id)\n response = port_req.get_response(self.api)\n\n # The IP is allocated since there is no binding host info any\n # subnet can be used for allocation.\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)", "def check_port_validity(self):\n # Check if ports provided are already present in VPLEX\n if self.ports:\n LOG.info(\"Validating the ports\")\n for port in self.ports:\n obj = None\n try:\n obj = self.storageview.get_port(self.cl_name, port)\n except (utils.ApiException, ValueError, TypeError) as err:\n msg = \"Could not get port {0} details in {1} due to\"\n err_msg = msg.format(port, self.cl_name) + \" error {0}\"\n e_msg = utils.display_error(err_msg, err)\n LOG.error(\"%s\\n%s\\n\", e_msg, err)\n self.module.fail_json(msg=e_msg)\n\n if obj is None:\n msg = (\"Could not get port {0} details in {1}\"\n .format(port, self.cl_name))\n LOG.error(msg)\n self.module.fail_json(msg=msg)", "def _check_and_set_network(self) -> None:\n from hathor.transaction.storage.exceptions import WrongNetworkError\n\n network = settings.NETWORK_NAME\n stored_network = self.get_network()\n\n if stored_network is None:\n # no network is set, let's try to infer it\n self._checked_set_network(network)\n elif stored_network != network:\n # the stored network does not match, something is wrong\n raise WrongNetworkError(f'Databases created on {stored_network}, expected {network}')\n else:\n # the network is what is expected, nothing to do here\n pass", "def _validatePortConfig(self):\n if config.BindHTTPPorts:\n if config.HTTPPort == 0:\n raise UsageError(\n \"HTTPPort required if BindHTTPPorts is not empty\"\n )\n elif config.HTTPPort != 0:\n config.BindHTTPPorts = [config.HTTPPort]\n if config.BindSSLPorts:\n if config.SSLPort == 0:\n raise UsageError(\n \"SSLPort required if BindSSLPorts is not empty\"\n )\n elif config.SSLPort != 0:\n config.BindSSLPorts = [config.SSLPort]", "def test_insufficient_space(self):\n\n self._test_find_next_subnet(\n network=\"10.0.0.0/24\",\n subnets=[\"10.0.0.64/25\"],\n requests=[25],\n expected=None,\n )", "def test_port_update_allocate_no_segments(self):\n with self.network() as network:\n pass\n\n # Create a bound port with no IP address (since there is not subnet)\n port = self._create_deferred_ip_port(network)\n\n # Create the subnet and try to update the port to get an IP\n with self.subnet(network=network) as subnet:\n # Try requesting an IP (but the only subnet is on a segment)\n data = {'port': {\n 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id)\n response = port_req.get_response(self.api)\n\n # Since port is bound and there is a mapping to segment, it succeeds.\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)\n self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr'])", "def test_networking_project_network_update(self):\n pass", "def _check_port_available(hostname, port):\n for config_file in config_files:\n network_config = networkConfig(config_file)\n for name, host in network_config.hostDict.items():\n if port == host.port:\n return False\n\n return _check_socket_is_free(hostname, port)", "def _update_cachesize(self):\n san_res = self.san_interface\n _load = not self.san_interface.runmode\n if self.cachesize > 0:\n pvds = self._get_pvds()\n if len(pvds) < 1:\n # not suppposed to get here\n return (1,'Error no valid provider/path was found when setting cache')\n logger.eventlog.debug('in update cache for %s , cachedrdev: %s' % (str(self),str(self.cachedrdev)))\n # check if this is a single path case or replicated cache (multipath)\n if len(pvds) == 1 and len(self.cachepvds) < 2 and not self.cachedrdev:\n (e,pt) = ext2path(self,san_res.providers[pvds[0]])\n if e:\n return (e,'Error updating cache, '+pt)\n (e,r) = san_res.providers[pvds[0]].add_cache(pt,self.cachesize)\n if e:\n return (e,r)\n else:\n #\n # more than 1 path\n #\n\n # one path with cacheon and is running return ok\n for pt in self.paths():\n if pt.cacheon:\n if pt.state == ObjState.running:\n return (0,'Cache is ok')\n logger.eventlog.warning('cache for %s is ON but path is not running !' % str(self))\n\n # no running path with cache on\n self.cachepresent=False\n\n #\n cvolname=obj2volstr(self)\n cvolname=cvolname.replace(':',CACHESEP) # replace ':' with a legal volume char\n drname=CACHEPFX+cvolname\n cache_loadonly=False\n #\n\n # self.cachedrdev ?\n if self.san_interface.raids.has_key(drname):\n # found drbd dev for cache (fail-over or load??):\n # del tgt (old), remove cache (old), promote (new),\n # cache load (new), add targets (new)\n logger.eventlog.warning('Cache for %s is not on, while DR device is detected during update' % str(self))\n drdev = self.san_interface.raids[drname]\n if not drdev:\n logger.eventlog.error('cant update cache dr for %s , drdev not found' % (str(self)))\n return (1,'cant update Cache dr')\n if not drdev.provider:\n drdev.promote_one(checkluns=False)\n if not drdev.provider:\n logger.eventlog.error('cant update cache dr for %s , drdev provider not detected' % (str(self)))\n return (1,'cant update Cache dr')\n # debug\n #logger.eventlog.debug(\"cachepresent: %s\" % str(self.cachepresent))\n #for p in self.paths():\n # if p.provider==drdev.provider:\n # logger.eventlog.debug(\"p: %s\" % str(p))\n # logger.eventlog.debug(\"state: %s\" % str(p.state))\n # logger.eventlog.debug(\"cacheon: %s\" % str(p.cacheon))\n # end debug\n e,prim = ext2path(self,drdev.provider)\n if e:\n logger.eventlog.error('valid path not found for %s on %s in update' % (str(self),str(drdev.provider)))\n return (1,'valid path not found')\n #logger.eventlog.debug(\"prim: %s\" % str(prim))\n cache_loadonly=True\n else:\n if len(self.cachepvds)==1 or len(self.cachepvds)>2:\n # has only 1 cache LV (load, absent?) ?? or >2 (old ones redetected)\n logger.eventlog.error('Found %d Cache LVs for %s in update' % (len(self.cachepvds),str(self)))\n return (1,'Found %d Cache LVs for %s in update' % (len(self.cachepvds),str(self)))\n\n if len(self.cachepvds) == 2:\n # if has 2 cache LVs, no DR (load): create drbd, load cache\n (e1,path1) = ext2path(self,san_res.providers[self.cachepvds[0]])\n (e2,path2) = ext2path(self,san_res.providers[self.cachepvds[1]])\n print 'cache paths: ',str(path1),str(path2)\n if e1 or e2:\n logger.eventlog.error('valid paths not found for %s in update' % str(self))\n return (1,'valid path not found')\n vol1 = san_res.providers[self.cachepvds[0]].cachevg.volumes[cvolname]\n vol2 = san_res.providers[self.cachepvds[1]].cachevg.volumes[cvolname]\n cache_loadonly=True\n\n else:\n # else (new) : select 2 paths, create 2 LVs,\n # create & promote DRBD, Create cache on master\n\n e,path1,path2 = self._get_2_pvds_paths()\n if e:\n logger.eventlog.error(path1)\n return (1,path1)\n\n # create 2 cache LVs\n (e,vol1) = path1.provider.add_lv_for_cache(self,self.cachesize)\n if e > 1:\n tmp='cant create Cache LV1 for %s on %s in update: %s' % (self.name,path1.provider.name,vol1)\n logger.eventlog.error(tmp)\n return (1,tmp)\n (e,vol2) = path2.provider.add_lv_for_cache(self,self.cachesize)\n if e > 1:\n vol1.provider.cachevg.volumes.delete(vol1,force=True)\n tmp='cant create Cache LV2 for %s on %s in update: %s' % (self.name,path2.provider.name,vol2)\n logger.eventlog.error(tmp)\n return (1,tmp)\n #\n print 'cache vols: ',str(vol1),str(vol2)\n\n # create new drbd device\n drdev = san_res.raids.add(drname,SanRaidGrp(drname,None))\n if not drdev :\n logger.eventlog.error('failed to create/updare dr device for cache in %s' % str(self))\n return (1,'failed to create/updare dr device')\n drdev.raid=RaidLevel.dr\n drdev.iscachedr=True\n drdev.devices=[vol1,vol2]\n (e,txt)=drdev.update()\n print 'create dr device:',e,txt\n if e:\n logger.eventlog.error('cant create Cache dr for %s , %s' % (str(self),txt))\n return (1,'cant create Cache dr')\n if drdev.provider is path1.provider:\n prim=path1\n else:\n prim=path2\n\n logger.eventlog.debug('create cache on %s , loadonly: %s , drname: %s' % \\\n (drdev.provider.name, cache_loadonly, drname))\n #loadonly=(self.cachepvds<>[]) # check if we already had cache LVs\n\n # create CacheDev\n # on loadonly we also forcing devname update\n (e,r) = drdev.provider.create_cache(prim,drdev,cvolname,loadonly=cache_loadonly,force=cache_loadonly)\n logger.eventlog.debug('create cache response: %s %s' % (e,r))\n if e:\n return (e, 'error creating cache on %s: %s' % (drdev.provider.name,r))\n else:\n (e,r) = self._remove_cache()\n if e:\n return (e,'error removing cache on %s: %s' % (str(self),r))\n return (0,'')", "def test_port_update_deferred_allocation_no_ipam(self):\n with self.network() as network:\n with self.subnet(network=network):\n pass\n\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n fixed_ips=[],\n is_admin=True)\n port = self.deserialize(self.fmt, response)\n ips = port['port']['fixed_ips']\n self.assertEqual(0, len(ips))\n\n # Create the subnet and try to update the port to get an IP\n data = {'port': {portbindings.HOST_ID: 'fakehost'}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id,\n as_admin=True)\n response = port_req.get_response(self.api)\n\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)\n res = self.deserialize(self.fmt, response)\n self.assertEqual(0, len(res['port']['fixed_ips']))", "def oci_load_balancer_attack_surface_open_tcp_port_check(cache, awsAccountId, awsRegion, awsPartition, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):\n # ISO Time\n iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()\n for loadbalancer in get_oci_load_balancers(cache, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(loadbalancer,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n compartmentId = loadbalancer[\"compartment_id\"]\n loadBalancerId = loadbalancer[\"id\"]\n loadBalancerName = loadbalancer[\"display_name\"]\n lbLifecycleState = loadbalancer[\"lifecycle_state\"]\n createdAt = str(loadbalancer[\"time_created\"])\n\n # Create a list comprehension to check if any Public IPs are assigned to the LB - an empty list\n # means we should skip. In the event there are more than one Public IPs we will just take the first\n # as they all go to the same place after all...\n publicIps = [ip[\"ip_address\"] for ip in loadbalancer[\"ip_addresses\"] if ip[\"is_public\"] is True]\n if not publicIps:\n continue\n else:\n pubIp = publicIps[0]\n # Submit details to the scanner function\n scanner = scan_host(pubIp, loadBalancerName, \"OCI Load Balancer\")\n # NoneType returned on KeyError due to Nmap errors\n if scanner == None:\n continue\n else:\n # Loop the results of the scan - starting with Open Ports which require a combination of\n # a Public Instance, an open SG rule, and a running service/server on the host itself\n # use enumerate and a fixed offset to product the Check Title ID number\n for index, p in enumerate(scanner[pubIp][\"ports\"]):\n # Parse out the Protocol, Port, Service, and State/State Reason from NMAP Results\n checkIdNumber = str(int(index + 1))\n portNumber = int(p[\"portid\"])\n if portNumber == 8089:\n serviceName = 'SPLUNKD'\n elif portNumber == 10250:\n serviceName = 'KUBERNETES-API'\n elif portNumber == 5672:\n serviceName = 'RABBITMQ'\n elif portNumber == 4040:\n serviceName = 'SPARK-WEBUI'\n else:\n try:\n serviceName = str(p[\"service\"][\"name\"]).upper()\n except KeyError:\n serviceName = \"Unknown\"\n serviceStateReason = str(p[\"reason\"])\n serviceState = str(p[\"state\"])\n # This is a failing check\n if serviceState == \"open\":\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{loadBalancerId}/oci-attack-surface-lb-open-{serviceName}-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{loadBalancerId}/oci-attack-surface-lb-open-{serviceName}-check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices/Network Reachability\",\n \"TTPs/Discovery\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"HIGH\"},\n \"Confidence\": 99,\n \"Title\": f\"[AttackSurface.OCI.LoadBalancer.{checkIdNumber}] Load Balancers should not be publicly reachable on {serviceName}\",\n \"Description\": f\"Oracle Load Balancer {loadBalancerName} in Compartment {compartmentId} in {ociRegionName} is publicly reachable on port {portNumber} which corresponds to the {serviceName} service. When Services are successfully fingerprinted by the ElectricEye Attack Surface Management Auditor it means the load balancer is public (mapped 'ip_address` and 'is_public' is True within the list of IP Addresses), has an open Security List or Network Security Group, and a running service on the host which adversaries can also see. Refer to the remediation insturctions for an example of a way to secure OCI Load Balancers.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"OCI Load Balancers instances should only have the minimum necessary ports open to achieve their purposes, allow traffic from authorized sources, and use other defense-in-depth and hardening strategies. For a basic view on traffic authorization into your instances refer to the Network Security Groups section of the Oracle Cloud Infrastructure Documentation for Networks.\",\n \"Url\": \"https://docs.oracle.com/en-us/iaas/Content/Network/Concepts/networksecuritygroups.htm#support\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"OCI\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": ociTenancyId,\n \"AssetRegion\": ociRegionName,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Networking\",\n \"AssetService\": \"Oracle Cloud Load Balancer\",\n \"AssetComponent\": \"Load Balancer\"\n },\n \"Resources\": [\n {\n \"Type\": \"OciCloudLoadBalancerLoadBalancer\",\n \"Id\": loadBalancerId,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"TenancyId\": ociTenancyId,\n \"CompartmentId\": compartmentId,\n \"Region\": ociRegionName,\n \"Name\": loadBalancerName,\n \"Id\": loadBalancerId,\n \"CreatedAt\": createdAt,\n \"LifecycleState\": lbLifecycleState\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-3\",\n \"NIST SP 800-53 Rev. 4 AC-1\",\n \"NIST SP 800-53 Rev. 4 AC-17\",\n \"NIST SP 800-53 Rev. 4 AC-19\",\n \"NIST SP 800-53 Rev. 4 AC-20\",\n \"NIST SP 800-53 Rev. 4 SC-15\",\n \"AICPA TSC CC6.6\",\n \"ISO 27001:2013 A.6.2.1\",\n \"ISO 27001:2013 A.6.2.2\",\n \"ISO 27001:2013 A.11.2.6\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"MITRE ATT&CK T1040\",\n \"MITRE ATT&CK T1046\",\n \"MITRE ATT&CK T1580\",\n \"MITRE ATT&CK T1590\",\n \"MITRE ATT&CK T1592\",\n \"MITRE ATT&CK T1595\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{loadBalancerId}/oci-attack-surface-lb-open-{serviceName}-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{loadBalancerId}/oci-attack-surface-lb-open-{serviceName}-check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices/Network Reachability\",\n \"TTPs/Discovery\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": f\"[AttackSurface.OCI.LoadBalancer.{checkIdNumber}] Load Balancers should not be publicly reachable on {serviceName}\",\n \"Description\": f\"Oracle Load Balancer {loadBalancerName} in Compartment {compartmentId} in {ociRegionName} is not publicly reachable on port {portNumber} which corresponds to the {serviceName} service due to {serviceStateReason}. OCI Load Balancers and their respective Security Lists and/or Network Security Groups should still be reviewed for minimum necessary access.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"OCI Load Balancers instances should only have the minimum necessary ports open to achieve their purposes, allow traffic from authorized sources, and use other defense-in-depth and hardening strategies. For a basic view on traffic authorization into your instances refer to the Network Security Groups section of the Oracle Cloud Infrastructure Documentation for Networks.\",\n \"Url\": \"https://docs.oracle.com/en-us/iaas/Content/Network/Concepts/networksecuritygroups.htm#support\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"OCI\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": ociTenancyId,\n \"AssetRegion\": ociRegionName,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Networking\",\n \"AssetService\": \"Oracle Cloud Load Balancer\",\n \"AssetComponent\": \"Load Balancer\"\n },\n \"Resources\": [\n {\n \"Type\": \"OciCloudLoadBalancerLoadBalancer\",\n \"Id\": loadBalancerId,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"TenancyId\": ociTenancyId,\n \"CompartmentId\": compartmentId,\n \"Region\": ociRegionName,\n \"Name\": loadBalancerName,\n \"Id\": loadBalancerId,\n \"CreatedAt\": createdAt,\n \"LifecycleState\": lbLifecycleState\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-3\",\n \"NIST SP 800-53 Rev. 4 AC-1\",\n \"NIST SP 800-53 Rev. 4 AC-17\",\n \"NIST SP 800-53 Rev. 4 AC-19\",\n \"NIST SP 800-53 Rev. 4 AC-20\",\n \"NIST SP 800-53 Rev. 4 SC-15\",\n \"AICPA TSC CC6.6\",\n \"ISO 27001:2013 A.6.2.1\",\n \"ISO 27001:2013 A.6.2.2\",\n \"ISO 27001:2013 A.11.2.6\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"MITRE ATT&CK T1040\",\n \"MITRE ATT&CK T1046\",\n \"MITRE ATT&CK T1580\",\n \"MITRE ATT&CK T1590\",\n \"MITRE ATT&CK T1592\",\n \"MITRE ATT&CK T1595\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def test_port_update_deferred_allocation_no_segments_empty_alloc(self):\n with self.network() as network:\n pass\n\n port = self._create_deferred_ip_port(network)\n\n # Create the subnet and update the port but specify no IPs\n with self.subnet(network=network):\n data = {'port': {\n portbindings.HOST_ID: 'fakehost',\n 'fixed_ips': []}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id,\n as_admin=True)\n response = port_req.get_response(self.api)\n\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)\n res = self.deserialize(self.fmt, response)\n # Since I specifically requested no IP addresses, I shouldn't get one.\n self.assertEqual(0, len(res['port']['fixed_ips']))", "def test_get_networks(self):\n pass", "def test_patch_host_subnet(self):\n pass", "def test_read_host_subnet(self):\n pass", "def test_port_create_with_segment_subnets(self):\n network, segment, subnet = self._create_test_segment_with_subnet()\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'])\n res = self.deserialize(self.fmt, response)\n # Don't allocate IPs in this case because we didn't give binding info\n self.assertEqual(0, len(res['port']['fixed_ips']))", "def check_routable(self, from_subnets: List[Subnet], to_subnets: List[Subnet]) -> dict:\n # check what ports from subnets allow to any to subnets\n ports = {} # port: (to_subnet, from_subnet)\n for from_subnet in from_subnets:\n for to_subnet in to_subnets:\n # check if traffic from subnet is stopped by to subnet nacl\n if from_subnet.name in to_subnet.nacls:\n if 'ICMP' not in ports:\n ports['ICMP'] = (from_subnet.cidr, to_subnet.cidr)\n if 'all' in to_subnet.nacls[from_subnet.name]['in']:\n # if all ports accepted in then set ports to all and we are done\n return {'all': (from_subnet.cidr, to_subnet.cidr)}\n elif 'None' in to_subnet.nacls[from_subnet.name]['in']:\n # If you don't have access to Enteprise network, you can't act on Operational Host\n # TODO refactor this hacky fix\n permission = self.check_for_enterprise_sessions()\n ports = {'all': (from_subnet.cidr, to_subnet.cidr)} if permission else {}\n return ports\n \n else:\n # we only add the ports in rules to our accepted ports\n for rule in to_subnet.nacls[from_subnet.name]['in']:\n if rule['PortRange'] is int and rule['PortRange'] not in ports:\n ports[rule[\"PortRange\"]] = (from_subnet.cidr, to_subnet.cidr)\n else:\n for p in range(rule[\"PortRange\"][0], rule[\"PortRange\"][1]):\n if p not in ports:\n ports[p] = (from_subnet.cidr, to_subnet.cidr)\n elif 'all' in to_subnet.nacls:\n if 'ICMP' not in ports:\n ports['ICMP'] = (from_subnet.cidr, to_subnet.cidr)\n # if all ports accepted out then use inbound rules only\n if 'all' in to_subnet.nacls['all']['in']:\n # if all ports accepted in then set ports to all and we are done\n return {'all': (from_subnet.cidr, to_subnet.cidr)}\n else:\n # we only add the ports in rules to our accepted ports\n for rule in to_subnet.nacls['all']['in']:\n if rule['PortRange'] is int and rule['PortRange'] not in ports:\n ports[rule[\"PortRange\"]] = (from_subnet.cidr, to_subnet.cidr)\n else:\n for p in range(rule[\"PortRange\"][0], rule[\"PortRange\"][1]):\n if p not in ports:\n ports[p] = (from_subnet.cidr, to_subnet.cidr)\n else:\n # this means that traffic cannot reach move between these 2 subnets\n continue\n\n return ports", "def test_networking_project_network_get(self):\n pass", "def ensure_mapping(self):\n if not self.host_mapping:\n self.get_interfaces()", "def check_model(self):\n layers_map = self.core.query_network(network=self.network,\n device_name=self.device)\n\n unsupported_layers = [\n l for l in self.network.layers.keys() if l not in layers_map\n ]\n\n if (unsupported_layers != []):\n sys.exit(\"Those mention layers in your model are not supported by OpenVino Inference Engine:\" \\\n \" \\n\\t\" + \"\\n\\t\".join(unsupported_layers))", "def test_network_too_small(self):\n\n self._test_find_next_subnet(\n network=\"10.0.0.0/25\",\n requests=[24],\n expected=None,\n )", "def test_network_full(self):\n\n self._test_find_next_subnet(\n network=\"10.0.0.0/24\",\n subnets=[\"10.0.0.0/24\"],\n requests=[24],\n expected=None,\n )", "def test_port_without_ip_not_deferred_no_binding(self):\n with self.network() as network:\n pass\n\n # Create a unbound port with no IP address (since there is no subnet)\n response = self._create_port_and_show(network)\n self.assertEqual([], response['port']['fixed_ips'])\n self.assertEqual(ipalloc_apidef.IP_ALLOCATION_IMMEDIATE,\n response['port'][ipalloc_apidef.IP_ALLOCATION])", "def __init_cache__(self) -> None:\n try:\n self.cache = caches[CACHE_NAME]\n logging.info(\"GeoIP2 - successfully initialised cache\")\n except InvalidCacheBackendError as ex:\n raise MiddlewareNotUsed(f\"GeoIP2 - cache configuration error: {ex}\") from ex", "def check(self) -> None:\n # validate pruning config\n super().check()\n\n assert self.config[\"TRAIN_CONFIG\"][\"MODEL_NAME\"] in {\n \"densenet\",\n \"quant_densenet\",\n \"simplenet\",\n \"quant_simplenet\",\n }, f\"{self.config['TRAIN_CONFIG']['MODEL_NAME']} is not supported\"", "def _validate_ens_net_portsecurity(self, net_data):\n pass", "def test_port_create_with_no_fixed_ips_no_ipam(self):\n with self.network() as network:\n with self.subnet(network=network):\n pass\n\n # Create an unbound port requesting no IP addresses\n response = self._create_port_and_show(network, fixed_ips=[])\n\n self.assertEqual([], response['port']['fixed_ips'])\n self.assertEqual(ipalloc_apidef.IP_ALLOCATION_NONE,\n response['port'][ipalloc_apidef.IP_ALLOCATION])", "def test_update_network_external_ports(self):\n policies_ports = [(self.qos_policies[0].id, {self.ports[0].id})]\n self.ports[2].qos_policy_id = self.qos_policies[0].id\n self.ports[2].update()\n port_obj.PortBinding(self.ctx, port_id=self.ports[1].id, host='host',\n profile={}, vif_type='',\n vnic_type=portbindings_api.VNIC_DIRECT).create()\n with mock.patch.object(self.qos_driver._driver._nb_idl,\n 'get_lswitch_port') as mock_lsp:\n mock_lsp.side_effect = [\n mock.Mock(type=ovn_const.LSP_TYPE_LOCALNET),\n mock.Mock(type=ovn_const.LSP_TYPE_EXTERNAL)]\n for qos_policy_id, reference_ports in policies_ports:\n self.networks[0].qos_policy_id = qos_policy_id\n self.networks[0].update()\n original_network = {'qos_policy_id': self.qos_policies[0]}\n reviewed_port_ids, _, _ = self.qos_driver.update_network(\n mock.ANY, self.networks[0], original_network, reset=True)\n self.assertEqual(reference_ports, reviewed_port_ids)\n calls = [mock.call(\n mock.ANY, self.ports[0].id, self.ports[0].network_id,\n qos_policy_id, None)]\n self.mock_rules.assert_has_calls(calls)\n self.mock_rules.reset_mock()" ]
[ "0.5711651", "0.5685266", "0.5486034", "0.5470297", "0.53793174", "0.53505814", "0.52206916", "0.520188", "0.50624466", "0.50617975", "0.5060805", "0.5031961", "0.5029634", "0.50271976", "0.5011506", "0.5004499", "0.5002426", "0.49931684", "0.49833202", "0.4974392", "0.4967064", "0.49566424", "0.49401653", "0.49341184", "0.49323916", "0.4928432", "0.4922822", "0.49191308", "0.49160984", "0.49144766" ]
0.7904054
0
Start/stop/restart Dnsmasq for NETWORK_ID.
def _update_dnsmasq(self, network_id): # Check whether we should really do the following processing. if self.suppress_dnsmasq_updates: LOG.debug("Don't update dnsmasq yet;" " must be processing a snapshot") self.dirty_networks.add(network_id) return self.dnsmasq_updater.update_network(network_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def startServices():\n # dnsmasq\n out_dnsmasq = subprocess.run([\"systemctl\", \"restart\", \"dnsmasq\"], stdout=subprocess.PIPE)\n if out_dnsmasq.returncode == 0:\n logging.info(\"dnsmasq service started/restarted successfully\")\n else:\n logging.error(\"dnsmasq service start restart error\")\n # 3proxy\n out_3proxy = subprocess.run([\"systemctl\", \"restart\", \"3proxy\"], stdout=subprocess.PIPE)\n if out_3proxy.returncode == 0:\n logging.info(\"3proxy service started/restarted successfully\")\n else:\n logging.error(\"3proxy service start restart error\")", "def launch (no_flow = False,\n network = \"192.168.0.0/24\", # Address range\n first = 1, last = None, count = None, # Address range\n ip = \"192.168.0.254\",\n router = (), # Auto\n dns = (), # Auto\n dpid = None, # All\n ports = None, # All\n __INSTANCE__ = None):\n def fixint (i):\n i = str(i)\n if i.lower() == \"none\": return None\n if i.lower() == \"true\": return None\n return int(i)\n def fix (i):\n i = str(i)\n if i.lower() == \"none\": return None\n if i.lower() == \"true\": return None\n if i == '()': return ()\n return i\n first,last,count = map(fixint,(first,last,count))\n router,dns = map(fix,(router,dns))\n\n if ports is not None:\n ports = ports.split(\",\")\n ports = set(int(p) if p.isdigit() else p for p in ports)\n\n pool = SimpleAddressPool(network = network, first = first, last = last,\n count = count)\n\n inst = DHCPD(install_flow = not no_flow, pool = pool,\n ip_address = ip, router_address = router,\n dns_address = dns, dpid = dpid, ports = ports)\n\n if __INSTANCE__[0] == 0:\n # First or only instance\n core.register(inst)\n\n log.debug(\"DHCP serving a%s\", str(pool)[2:-1])", "def enable_dhcp_helper(self, network_id):\n try:\n network = self.plugin_rpc.get_network_info(network_id)\n except:\n self.needs_resync = True\n LOG.exception(_('Network %s RPC info call failed.') % network_id)\n return\n\n if not network.admin_state_up:\n return\n\n for subnet in network.subnets:\n if subnet.enable_dhcp:\n if self.call_driver('enable', network):\n self.cache.put(network)\n break", "def startservers():\n try:\n dns = subprocess.Popen(['python', FAKE_LOC, '-c', DNS_LOC])\n except IOError:\n sys.exit('>> Unable to locate FakeDns')\n\n try:\n httpd = MyTCPServer(('', 80), MyHandler)\n except socket.error:\n dns.kill()\n sys.exit('>> Port 80 already in use')\n try:\n print '>> Starting HTTP Server...'\n httpd.serve_forever()\n except KeyboardInterrupt:\n httpd.shutdown()\n httpd.server_close()\n dns.kill()\n sys.exit()", "def restartHTTPd(htconf):\n parentpid = pidHTTPd(htconf)\n if parentpid <= 1:\n return\n# hopefulle killing the parent proc. will do the trick\n print >> FileKeyUtils.WMSlog, 'restartHTTPd> kill parentpid:', parentpid\n os.system('kill -TERM '+repr(parentpid))\n apache = '/devstore/apache2/bin/httpd -f /devstore/apache2/conf/' + htconf\n print >> FileKeyUtils.WMSlog, 'restartHTTPd> via:', apache\n time.sleep(0.5) # give it time to complete proc. termination\n os.system('/devstore/apache2/bin/httpd -f /devstore/apache2/conf/' + htconf)", "def start_srv(start, process):\n if not \"conf_option\" in world.cfg:\n world.cfg[\"conf_option\"] = \"\"\n\n world.cfg['log_file'] = build_log_path()\n fabric_sudo_command('cat /dev/null >' + world.cfg['log_file'])\n world.cfg[\"dhcp_log_file\"] = world.cfg['log_file']\n\n log = \"local7\"\n if world.f_cfg.isc_dhcp_log_facility != \"\":\n log = world.f_cfg.isc_dhcp_log_facility\n\n world.cfg['log_facility'] = '''\\nlog-facility {log};\\n'''.format(**locals())\n\n add_defaults()\n cfg_write()\n log.debug(\"Start ISC-DHCP with generated config:\")\n convert_cfg_file(world.cfg[\"cfg_file\"])\n fabric_send_file(world.cfg[\"cfg_file\"] + '_processed', world.cfg[\"cfg_file\"] + '_processed')\n copy_configuration_file(world.cfg[\"cfg_file\"] + '_processed')\n remove_local_file(world.cfg[\"cfg_file\"])\n #set_ethernet_interface()\n stop_srv()\n\n world.cfg['leases'] = build_leases_path()\n\n #fabric_sudo_command('echo y |rm ' + world.cfg['leases'])\n fabric_sudo_command('touch ' + world.cfg['leases'])\n\n result = fabric_sudo_command('(' + os.path.join(world.f_cfg.software_install_path, 'sbin/dhcpd') + ' -cf server.cfg_processed'\n + ' -lf ' + world.cfg['leases']\n + '&); sleep ' + str(world.f_cfg.sleep_time_1) + ';')\n\n check_process_result(start, result, process)\n\n # clear configs in case we would like make couple configs in one test\n world.cfg[\"conf_time\"] = \"\"\n world.cfg[\"log_facility\"] = \"\"\n world.cfg[\"custom_lines\"] = \"\"\n world.cfg[\"conf_option\"] = \"\"\n world.cfg[\"conf_vendor\"] = \"\"", "def start_inetsim(self):\n\t\tif os.path.exists(\"/var/run/inetsim.pid\"):\n\t\t\tos.remove(\"/var/run/inetsim.pid\")\n\t\tcmd = [\"/usr/bin/pkill\",\"inetsim\"]\n\t\toutput = self.check_output_safe(cmd)\n\t\tself.log.info(output)\n\t\tcmd = [\"/usr/bin/inetsim\",\"--bind-address\",self.cfg.net_eth1,\"--config\",self.cfg.inetsim_cfg_path,\"--log-dir\",self.cfg.inetsim_log_dir,\"--data-dir\",self.cfg.inetsim_data_dir,\"--report-dir\",self.cfg.inetsim_log_report_dir]\n\t\tself.p_inetsim = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\tself.log.info(\"inetsim starts, pid: %d\",self.p_inetsim.pid)", "def refresh_dhcp_helper(self, network_id):\n old_network = self.cache.get_network_by_id(network_id)\n if not old_network:\n # DHCP current not running for network.\n return self.enable_dhcp_helper(network_id)\n\n try:\n network = self.plugin_rpc.get_network_info(network_id)\n except:\n self.needs_resync = True\n LOG.exception(_('Network %s RPC info call failed.') % network_id)\n return\n\n old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp)\n new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp)\n\n if new_cidrs and old_cidrs == new_cidrs:\n self.call_driver('reload_allocations', network)\n self.cache.put(network)\n elif new_cidrs:\n if self.call_driver('restart', network):\n self.cache.put(network)\n else:\n self.disable_dhcp_helper(network.id)", "def start(self):\n if self.isRunning():\n raise Exception('DhcpClientAlreadyStarted')\n cmd = ['sudo', self._slave_dhcp_client_path, '-i', self._ifname, '-A', '-S']\n if self._logger is not None:\n self._logger.debug('Running command ' + str(cmd))\n #self._slave_dhcp_client_proc = robot.libraries.Process.Process()\n #self._slave_dhcp_client_proc.start_process('sudo', self._slave_dhcp_client_path, '-i', self._ifname, '-A', '-S')\n self._slave_dhcp_client_proc = subprocess.Popen(cmd)#, stdout=open(os.devnull, 'wb'), stderr=subprocess.STDOUT)\n self._slave_dhcp_client_pid = self._slave_dhcp_client_proc.pid\n self.addSlavePid(self._slave_dhcp_client_proc.pid) # Add the PID of the child to the list of subprocesses (note: we get sudo's PID here, not the slave PID, that we will get later on via D-Bus (see RemoteDhcpClientControl.getPid())", "def run(self):\n try:\n self.ssh_connection.connect()\n dns_response = self.query_dns_server()\n result = self.process_dns_response(dns_response)\n self.handle_result(result)\n\n except Exception as e:\n print(f\"Error: {str(e)}\")\n sys.exit(2)", "def sync_dns(self,):\n\n for server_name, server_ip in self.get_instances():\n self.dnsmanager.ensure_a_record(server_name, server_ip)", "def restart(self, cleanup=False): \n params = {'command':'restartNetwork',\n 'id':self.id,\n 'cleanup':cleanup}\n\n name = self.name\n self.logger.debug('Restart network %s' % name)\n\n try:\n response = self.send_request(params)\n res = json.loads(response)\n clsk_job_id = res['restartnetworkresponse']['jobid']\n self.logger.debug('Start job over %s.%s - %s: %s' % (\n self._obj_type, self.name, \n 'restartNetwork', res))\n return clsk_job_id\n except KeyError as ex :\n self.logger.error('Error parsing json data: %s' % ex)\n raise ClskError('Error parsing json data: %s' % ex)\n except ApiError as ex:\n self.logger.error(ex)\n raise ClskError(ex)", "def configureDHCP():\n dhcpStart = config.get(\"hotspot\", \"dhcpstart\")\n dhcpEnd = config.get(\"hotspot\", \"dhcpend\")\n dnsmasqConfig = f\"\"\"#PI Hotspot config\ndomain-needed\nbogus-priv\ndhcp-option=option:dns-server\ndhcp-authoritative\ndhcp-range={dhcpStart},{dhcpEnd},1h\n\"\"\"\n confFile = open(\"/etc/dnsmasq.conf\", \"w\")\n confFile.write(dnsmasqConfig)\n confFile.close()", "def network_create_end(self, payload):\n network_id = payload['network']['id']\n self.enable_dhcp_helper(network_id)", "def set_dhcp_conn(nic):\n nic.EnableDHCP()\n # After static DNS servers are specified to start using Dynamic Host\n # Configuration Protocol (DHCP) instead of static DNS servers,\n # you can call the method without supplying \"in\" parameters.\n nic.SetDNSServerSearchOrder()", "def _restart(self):\n\n daemon_prefix = ConfigUtil().get_prefix_for_daemon_id(daemon_id=self._daemon_id, conf_dict=self._pyswitchlib_conf)\n\n if daemon_prefix:\n if self._daemon_id in self._pyswitchlib_conf:\n daemon_prefixes = self._pyswitchlib_conf[self._daemon_id].split(':')\n\n if len(daemon_prefixes) > 1:\n daemon_prefixes.remove(daemon_prefix)\n daemon_prefixes.insert(0, daemon_prefix)\n\n self._pyswitchlib_conf[self._daemon_id] = ':'.join(daemon_prefixes)\n ConfigFileUtil().write(filename=pyswitchlib_conf_file, conf_dict=self._pyswitchlib_conf)\n\n super(PySwitchLibApiDaemonRunner, self)._restart()", "def disable_dhcp_helper(self, network_id):\n network = self.cache.get_network_by_id(network_id)\n if network:\n if self.call_driver('disable', network):\n self.cache.remove(network)", "def _RunDHCPClient(self, dhclient_script_path=None, **kwargs):\n del kwargs\n PID_FILE = os.path.join(self._tmp_dir, 'dhclient.pid')\n clear_ifconfig_command = 'ifconfig {interface} 0.0.0.0'.format(\n interface=self.interface)\n dhcp_command = ('echo \"\" | ' # dhclient expects STDIN for some reason\n 'dhclient -4 ' # only run on IPv4\n '-nw ' # immediately daemonize\n '-pf {pid_file} '\n '-sf {dhclient_script} '\n '-lf /dev/null ' # don't keep a leases file\n '-v {interface}'.format(\n pid_file=PID_FILE,\n dhclient_script=dhclient_script_path,\n interface=self.interface))\n kill_command = 'cat {pid_file} | xargs -r kill; rm {pid_file}'.format(\n pid_file=PID_FILE)\n force_kill_command = 'pgrep dhclient | xargs -r kill -9'\n\n logging.info('Killing any existing dhclient processes...')\n self._device.Call(force_kill_command)\n\n logging.info('Clearing any existing ifconfig networks...')\n self._device.Call(clear_ifconfig_command)\n\n logging.info('Starting dhclient...')\n self._device.CheckCall(dhcp_command)\n\n logging.info('Waiting to lease an IP...')\n ip = sync_utils.WaitFor(self._LeasedIP, self._dhcp_timeout)\n if not ip:\n self._device.Call(kill_command)\n raise WiFiError('DHCP bind failed')\n logging.info('Success: bound to IP %s', ip)\n\n yield ip # We have bound an IP; yield back to the caller.\n\n logging.info('Stopping dhclient...')\n self._device.Call(kill_command)\n self._device.Call(force_kill_command)\n self._device.Call(clear_ifconfig_command)\n\n yield # We have released the IP.", "def _set_nameserver(self, instance):\n ctxt = context.get_admin_context()\n ip = db.instance_get_fixed_address(ctxt, instance['id'])\n network = db.fixed_ip_get_network(ctxt, ip)\n\n try:\n _, err = utils.execute('sudo', 'vzctl', 'set', instance['id'],\n '--save', '--nameserver', network['dns'])\n if err:\n LOG.error(err)\n except Exception as err:\n LOG.error(err)\n raise exception.Error('Unable to set nameserver for %s' %\n instance['id'])", "def set_static_conn(nic, ip_addr, subnet_mask, default_gateway, dns_servers):\n if isinstance(ip_addr, str):\n ip_addr = [ip_addr,]\n if isinstance(subnet_mask, str):\n subnet_mask = [subnet_mask,]\n if isinstance(default_gateway, str):\n default_gateway = [default_gateway, ]\n\n # set defult gateway. return value:\n # 0: success & no reboot required, \n # 1: sucess & reboot required\n ret = nic.SetGateways(default_gateway)\n print 'Default Gateway updated (status %d)' % ret\n\n # Set IP adrress & subnet mask. return value:\n # 0: success & no reboot required, \n # 1: sucess & reboot required\n ret = nic.EnableStatic(IPAddress=ip_addr, SubnetMask=subnet_mask)\n print 'IP Address / Subnet Mask updated (status %d)' % ret\n\n # set dns servers\n if dns_servers:\n #assert 0 == nic.EnableDNS(DNSServerSearchOrder=dns_servers)\n # or \n ret = nic.SetDNSServerSearchOrder(dns_servers)\n print 'DNS Server updated (status %d)' % ret", "def start_maintenance(ServerName=None):\n pass", "def launch_vrouter_instance(self):\n # Add code to start vrouter\n if (self.vr_args['vtest_only']):\n self.logger.info(\"VTEST_ONLY_RETURN \" +\n str(self.vr_args['vtest_only']))\n return\n cpid = os.fork()\n if cpid == 0:\n vrouter_cmd_args = [\"taskset\", self.vr_args['taskset'],\n self.vr_args['vrouter_path'], \"--no-daemon\",\n \"--no-huge\", \"--vr_packet_sz\", \"2048\"]\n if self.vr_args['dpdk_args']:\n for dpdk_arg in self.vr_args['dpdk_args'].split(' '):\n vrouter_cmd_args.append(dpdk_arg)\n vrouter_cmd_args.extend([\"--vr_socket_dir\",\n self.vr_args['socket_dir']])\n os.execvp(\"taskset\", vrouter_cmd_args)\n else:\n self.logger.info(\n \"Running cmd - taskset %s %s --no-daemon --no-huge \"\n \"--vr_packet_sz 2048 --vr_socket_dir %s\" %\n (self.vr_args['taskset'],\n self.vr_args['vrouter_path'],\n self.vr_args['socket_dir']))\n self.logger.info(\"pid = \" + str(cpid))\n self.pid = cpid\n count = 0\n ret = 0\n while (count < 10):\n cmd = \"lsof \" + self.vr_args['socket_dir'] +\\\n \"/dpdk_netlink | wc -l\"\n self.logger.info(\"Running cmd - {}\".format(cmd))\n try:\n ret = subprocess.check_output(cmd, shell=True)\n # check if the netlink is up using the ret value\n if (ret == \"2\\n\"):\n break\n else:\n time.sleep(1)\n count += 1\n except Exception as e:\n self.logger.error(e)\n time.sleep(1)\n count += 1\n if (ret != \"2\\n\"):\n self.logger.error(\"Failed to bringup vrouter\")\n return -1\n else:\n return 0", "def Restart(self, udp=False):\n self.Stop()\n self.Start(udp)", "def start_ddos_wall():\n\n if Setup.parse_options()['setup'] or Setup.parse_options()['reset']:\n Setup.write_firewall_script()\n httpd = SocketServer.ThreadingTCPServer(('', Setup.parse_options()['port']), Proxy)\n print('Proxy is running on port ', Setup.parse_options()['port'])\n monitor = Monitoring()\n monitor.start()\n httpd.serve_forever()", "def run(self):\n factory = FoghornDNSServerFactory(\n clients=[self.foghorn, client.Resolver(resolv='/etc/resolv.conf')]\n )\n protocol = dns.DNSDatagramProtocol(controller=factory)\n\n # Pylint can't seem to find these methods.\n # pylint: disable=E1101\n reactor.listenUDP(self.settings.dns_port, protocol)\n reactor.listenTCP(self.settings.dns_port, factory)\n reactor.run()\n self.foghorn.save_state()", "def restart_nginx():\n run_command_on_selected_server(_restart_nginx)", "def test_snat_with_docker_restart(self):\n client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,\n ip_fabric_snat=True,\n ip_fabric_forwarding=True)\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)\n self.inputs.restart_service(service_name = \"containerd\",\n host_ips = self.inputs.k8s_slave_ips)\n time.sleep(60) # Wait timer for all contrail service to come up.\n self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,\n client3, client4)", "def restart_local(drain=False):\n if _TRAFFICCTL:\n cmd = _traffic_ctl(\"server\", \"restart\", \"--manager\")\n else:\n cmd = _traffic_line(\"-L\")\n\n if drain:\n cmd = cmd + [\"--drain\"]\n\n return _subprocess(cmd)", "def start(self, ifname = None):\n \n if not self._slave_dhcp_process is None:\n raise Exception('DhcpClientAlreadyStarted')\n \n if not ifname is None:\n self._ifname = ifname\n \n if self._ifname is None:\n raise Exception('NoInterfaceProvided')\n \n self._slave_dhcp_process = SlaveDhcpClientProcess(dhcp_client_daemon_exec_path=self._dhcp_client_daemon_exec_path, ifname=self._ifname, logger=logger)\n self._slave_dhcp_process.start()\n self._new_lease_event.clear()\n self._dhcp_client_ctrl = RemoteDhcpClientControl(ifname=self._ifname) # Create a RemoteDhcpClientControl object that symbolizes the control on the remote process (over D-Bus)\n self._dhcp_client_ctrl.notifyNewLease(self._got_new_lease) # Ask underlying RemoteDhcpClientControl object to call self._new_lease_retrieved() as soon as we get a new lease \n logger.debug('DHCP client started on ' + self._ifname)\n slave_pid = self._dhcp_client_ctrl.getRemotePid()\n if slave_pid is None:\n logger.warn('Could not get remote process PID')\n raise('RemoteCommunicationError')\n else:\n logger.debug('Slave has PID ' + str(slave_pid)) \n self._slave_dhcp_process.addSlavePid(slave_pid)\n\n self._dhcp_client_ctrl.sendDiscover()", "def enable_dns_management(self):\n self._request({\"enable-dns-management\": True})" ]
[ "0.57506925", "0.5736145", "0.5705236", "0.5628683", "0.56053996", "0.55890465", "0.5588804", "0.5274913", "0.5227108", "0.5199843", "0.5196082", "0.5161446", "0.51592255", "0.51375973", "0.51361805", "0.51020324", "0.50229216", "0.49852252", "0.49827528", "0.49735984", "0.49720192", "0.49456844", "0.49299705", "0.49244526", "0.48847145", "0.48569202", "0.4825394", "0.48033556", "0.48005086", "0.47935653" ]
0.6410348
0
Fix NetworkCache before removing or replacing a network. neutron.agent.dhcp.agent is bugged in that it adds the DHCP port into the cache without updating the cache's port_lookup dict, but then NetworkCache.remove() barfs if there is a port in network.ports but not in that dict... NetworkCache.put() implicitly does a remove() first if there is already a NetModel in the cache with the same ID. So a put() to update or replace a network also hits this problem. This method avoids that problem by ensuring that all of a network's ports are in the port_lookup dict. A caller should call this immediately before a remove() or a put().
def _fix_network_cache_port_lookup(agent, network_id): # If there is an existing NetModel for this network ID, ensure that all # its ports are in the port_lookup dict. if network_id in agent.cache.cache: for port in agent.cache.cache[network_id].ports: agent.cache.port_lookup[port.id] = network_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _ensure_net_and_subnets(self, port):\n\n # Gather the subnet IDs that we need for this port, and get the\n # NetModel if we already have it in the cache.\n needed_subnet_ids = set()\n net = None\n for fixed_ip in port['fixed_ips']:\n subnet_id = fixed_ip.get('subnet_id')\n if subnet_id:\n needed_subnet_ids.add(subnet_id)\n if not net:\n net = self.agent.cache.get_network_by_subnet_id(subnet_id)\n LOG.debug(\"Needed subnet IDs: %s\", needed_subnet_ids)\n LOG.debug(\"Existing network model by subnet ID: %s\", net)\n\n # For each subnet that we need, get its data from SubnetWatcher and\n # hold for adding into the cache.\n new_subnets = {}\n for subnet_id in needed_subnet_ids:\n # Get data for this subnet from the SubnetWatchers.\n subnet = (self.subnet_watcher.get_subnet(subnet_id) or\n self.v1_subnet_watcher.get_subnet(subnet_id))\n if subnet is None:\n LOG.warning(\"No data for subnet %s\", subnet_id)\n raise SubnetIDNotFound()\n new_subnets[subnet_id] = subnet\n\n if not net:\n # We don't already have a NetModel, so look for a cached NetModel\n # with the right network ID. (In this case we must have new\n # subnets to add into the cache, and the cached NetModel must have\n # subnets other than the ones that we're adding in this iteration;\n # otherwise we would have already found it when searching by\n # subnet_id above.)\n assert new_subnets\n network_id = list(new_subnets.values())[0]['network_id']\n net = self.agent.cache.get_network_by_id(network_id)\n LOG.debug(\"Existing network model by network ID: %s\", net)\n\n if not net:\n # We still have no NetModel for the relevant network ID, so create\n # a new one. In this case we _must_ be adding new subnets.\n assert new_subnets\n net = empty_network(network_id)\n LOG.debug(\"New network %s\", net)\n elif new_subnets:\n # We have a NetModel that was already in the cache and are about to\n # modify it. Cache replacement only works if the new NetModel is a\n # distinct object from the existing one, so make a copy here.\n net = copy_network(net)\n LOG.debug(\"Copied network %s\", net)\n\n if new_subnets:\n # Add the new subnets into the NetModel.\n assert net\n net.subnets = [s for s in net.subnets\n if s.id not in new_subnets]\n net.subnets += list(new_subnets.values())\n\n # Add (or update) the NetModel in the cache.\n LOG.debug(\"Net: %s\", net)\n _fix_network_cache_port_lookup(self.agent, net.id)\n self.agent.cache.put(net)\n\n return net.id", "def update_host_routes(self, config, cache):\n db = cache.get_or_create('host_routes', lambda: {})\n for net in config.networks:\n\n # For each subnet...\n for subnet in net.subnets:\n cidr = str(subnet.cidr)\n\n # determine the set of previously written routes for this cidr\n if cidr not in db:\n db[cidr] = set()\n\n current = db[cidr]\n\n # build a set of new routes for this cidr\n latest = set()\n for r in subnet.host_routes:\n latest.add((r.destination, r.next_hop))\n\n # If the set of previously written routes contains routes that\n # aren't defined in the new config, run commands to delete them\n for x in current - latest:\n if self._alter_route(net.interface.ifname, 'del', *x):\n current.remove(x)\n\n # If the new config contains routes that aren't defined in the\n # set of previously written routes, run commands to add them\n for x in latest - current:\n if self._alter_route(net.interface.ifname, 'add', *x):\n current.add(x)\n\n if not current:\n del db[cidr]\n\n cache.set('host_routes', db)", "def _update_network_config(port_config, allow_multiple=False):\n # Get network id from port config\n network_id = port_config.get('network_id')\n\n # Get the network id from relationship if any\n rel_network_ids = find_openstack_ids_of_connected_nodes_by_openstack_type(\n ctx, NETWORK_OPENSTACK_TYPE)\n\n rel_network_id = rel_network_ids[0] if rel_network_ids else None\n # Check if network config comes from two sources or not\n if network_id and rel_network_id and not allow_multiple:\n raise NonRecoverableError('Port can\\'t both have the '\n '\"network_id\" property and be '\n 'connected to a network via a '\n 'relationship at the same time')\n\n port_config['network_id'] = network_id or rel_network_id", "def update_network(self, dbnetwork, qipinfo):\n\n # We don't want to add the plenary to self.plenaries if we aren't going\n # to change anything\n plenary = Plenary.get_plenary(dbnetwork)\n updated = False\n\n if dbnetwork.name != qipinfo.name:\n self.logger.client_info(\"Setting network {0!s} name to {1}\"\n .format(dbnetwork, qipinfo.name))\n dbnetwork.name = qipinfo.name\n if dbnetwork.network_type != qipinfo.network_type:\n self.logger.client_info(\"Setting network {0!s} type to {1}\"\n .format(dbnetwork, qipinfo.network_type))\n dbnetwork.network_type = qipinfo.network_type\n if dbnetwork.location != qipinfo.location:\n self.logger.client_info(\"Setting network {0!s} location to {1:l}\"\n .format(dbnetwork, qipinfo.location))\n dbnetwork.location = qipinfo.location\n if dbnetwork.side != qipinfo.side:\n self.logger.client_info(\"Setting network {0!s} side to {1}\"\n .format(dbnetwork, qipinfo.side))\n dbnetwork.side = qipinfo.side\n if dbnetwork.network_compartment != qipinfo.compartment:\n self.logger.client_info(\"Setting network {0!s} compartment to {1!s}\"\n .format(dbnetwork, qipinfo.compartment))\n dbnetwork.network_compartment = qipinfo.compartment\n\n if dbnetwork in self.session.dirty:\n updated = True\n\n old_rtrs = set(dbnetwork.router_ips)\n new_rtrs = set(qipinfo.routers)\n\n del_routers = []\n for router in dbnetwork.routers:\n if router.ip in old_rtrs - new_rtrs:\n del_routers.append(router)\n\n for router in del_routers:\n self.logger.client_info(\"Removing router {0:s} from \"\n \"{1:l}\".format(router.ip, dbnetwork))\n for dns_rec in router.dns_records:\n if dns_rec.is_unused:\n delete_dns_record(dns_rec)\n dbnetwork.routers.remove(router)\n updated = True\n\n for ip in new_rtrs - old_rtrs:\n self.add_router(dbnetwork, ip)\n updated = True\n\n if updated:\n self.plenaries.append(plenary)\n\n # TODO: add support for updating router locations\n\n return dbnetwork.netmask == qipinfo.address.netmask", "def update_network(**kwargs):\n\n ip_addr = kwargs.get('ip_addr')\n is_private = kwargs.get('is_private')\n name = kwargs.get('name')\n dns_names = kwargs.get('dns_names')\n is_scanning = kwargs.get('is_scanning', False)\n network_id = make_shortuuid(name)\n\n network = {\n 'dns_names': dns_names,\n 'ip_addr': ip_addr,\n 'is_private' : is_private,\n 'name': name,\n 'id': network_id,\n 'is_scanning': is_scanning,\n 'updated_count': 0\n\n }\n\n network_exists = r.table(\"networks\").insert([network], conflict=\"update\")\n\n return network_exists.run(conn)", "def test_update_network_no_policy_change(self):\n for qos_policy_id in (self.qos_policies[0].id, None):\n self.networks[0].qos_policy_id = qos_policy_id\n self.networks[0].update()\n original_network = {'qos_policy_id': qos_policy_id}\n port_ids, fip_ids, router_ids = self.qos_driver.update_network(\n mock.ANY, self.networks[0], original_network)\n self.assertEqual(set([]), port_ids)\n self.assertEqual(set([]), fip_ids)\n self.assertEqual(set([]), router_ids)\n self.mock_rules.assert_not_called()", "def refresh_dhcp_helper(self, network_id):\n old_network = self.cache.get_network_by_id(network_id)\n if not old_network:\n # DHCP current not running for network.\n return self.enable_dhcp_helper(network_id)\n\n try:\n network = self.plugin_rpc.get_network_info(network_id)\n except:\n self.needs_resync = True\n LOG.exception(_('Network %s RPC info call failed.') % network_id)\n return\n\n old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp)\n new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp)\n\n if new_cidrs and old_cidrs == new_cidrs:\n self.call_driver('reload_allocations', network)\n self.cache.put(network)\n elif new_cidrs:\n if self.call_driver('restart', network):\n self.cache.put(network)\n else:\n self.disable_dhcp_helper(network.id)", "def update_network(self, context, net_id, network):\n\n LOG.debug(_(\"QuantumRestProxyV2.update_network() called\"))\n\n # Validate Args\n if network[\"network\"].get(\"admin_state_up\"):\n if network[\"network\"][\"admin_state_up\"] is False:\n LOG.warning(_(\"Network with admin_state_up=False are not yet \"\n \"supported by this plugin. Ignoring setting for \"\n \"network %s\", net_name))\n\n # update DB\n orig_net = super(QuantumRestProxyV2, self).get_network(context, net_id)\n tenant_id = orig_net[\"tenant_id\"]\n new_net = super(QuantumRestProxyV2, self).update_network(\n context, net_id, network)\n\n # update network on network controller\n if new_net[\"name\"] != orig_net[\"name\"]:\n try:\n resource = NETWORKS_PATH % (tenant_id, net_id)\n data = {\n \"network\": new_net,\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2: Unable to update remote \"\n \"network: %s\"), e.message)\n # reset network to original state\n super(QuantumRestProxyV2, self).update_network(\n context, id, orig_net)\n raise\n\n # return updated network\n return new_net", "def network_refresh(self, kwargs=None):\n scode, hosts = Rest.get('Host')\n filter = {}\n n = 1\n e = {}\n data = []\n for host in hosts:\n os.environ[\"DOCKER_HOST\"] = host['Ip'] + \":\" + str(host['Port'])\n filter['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n self.client = docker.from_env()\n try:\n networks = self.client.networks.list(**kwargs)\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n continue\n\n if len(networks) == 0:\n Console.info(\"No network exist\" + host['Ip'])\n continue\n\n for networkm in networks:\n network = networkm.__dict__['attrs']\n network['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n data.append(network)\n d = {}\n d['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n d['Id'] = network['Id']\n d['Name'] = network['Name']\n d['Containers'] = network['Containers']\n e[n] = d\n n = n + 1\n r = Rest.delete('Network', filter)\n r = Rest.post('Network', data)\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Name', 'Containers'])))", "def test_port_update_deferred_allocation_no_ipam(self):\n with self.network() as network:\n with self.subnet(network=network):\n pass\n\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n fixed_ips=[],\n is_admin=True)\n port = self.deserialize(self.fmt, response)\n ips = port['port']['fixed_ips']\n self.assertEqual(0, len(ips))\n\n # Create the subnet and try to update the port to get an IP\n data = {'port': {portbindings.HOST_ID: 'fakehost'}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id,\n as_admin=True)\n response = port_req.get_response(self.api)\n\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)\n res = self.deserialize(self.fmt, response)\n self.assertEqual(0, len(res['port']['fixed_ips']))", "def port_update_end(self, payload):\n port = DictModel(payload['port'])\n network = self.cache.get_network_by_id(port.network_id)\n if network:\n self.cache.put_port(port)\n self.call_driver('reload_allocations', network)", "def test_update_network(self):\n policies_ports = [\n (None, {self.ports[0].id}),\n (self.qos_policies[1].id, {self.ports[0].id})]\n\n self.ports[1].qos_policy_id = self.qos_policies[0].id\n self.ports[1].update()\n self.ports[2].qos_policy_id = self.qos_policies[1].id\n self.ports[2].update()\n for qos_policy_id, reference_ports in policies_ports:\n self.networks[0].qos_policy_id = qos_policy_id\n self.networks[0].update()\n original_network = {'qos_policy_id': self.qos_policies[0]}\n reviewed_port_ids, _, _ = self.qos_driver.update_network(\n mock.ANY, self.networks[0], original_network)\n self.assertEqual(reference_ports, reviewed_port_ids)\n calls = [mock.call(mock.ANY, self.ports[0].id,\n self.ports[0].network_id, qos_policy_id,\n None)]\n self.mock_rules.assert_has_calls(calls)\n self.mock_rules.reset_mock()", "def _internal_network_removed(self, ri, port, ex_gw_port):\n itfc_deleted = False\n driver = self.driver_manager.get_driver(ri.id)\n vrf_name = driver._get_vrf_name(ri)\n network_name = ex_gw_port['hosting_info'].get('network_name')\n if self._router_ids_by_vrf_and_ext_net.get(\n vrf_name, {}).get(network_name) and (\n ri.router['id'] in\n self._router_ids_by_vrf_and_ext_net[vrf_name][network_name]):\n # If this is the last port for this neutron router,\n # then remove this router from the list\n if len(ri.internal_ports) == 1 and port in ri.internal_ports:\n self._router_ids_by_vrf_and_ext_net[\n vrf_name][network_name].remove(ri.router['id'])\n\n # Check if any other routers in this VRF have this network,\n # and if not, set the flag to remove the interface\n if not self._router_ids_by_vrf_and_ext_net[vrf_name].get(\n network_name):\n LOG.debug(\"++ REMOVING NETWORK %s\" % network_name)\n itfc_deleted = True\n del self._router_ids_by_vrf_and_ext_net[\n vrf_name][network_name]\n if not self._router_ids_by_vrf_and_ext_net.get(vrf_name):\n del self._router_ids_by_vrf_and_ext_net[vrf_name]\n\n driver.internal_network_removed(ri, port,\n itfc_deleted=itfc_deleted)\n if ri.snat_enabled and ex_gw_port:\n driver.disable_internal_network_NAT(ri, port, ex_gw_port,\n itfc_deleted=itfc_deleted)", "def test_port_update_deferred_allocation_no_segments(self):\n with self.network() as network:\n pass\n\n port = self._create_deferred_ip_port(network)\n\n # Create the subnet and try to update the port to get an IP\n with self.subnet(network=network):\n data = {'port': {portbindings.HOST_ID: 'fakehost'}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id,\n as_admin=True)\n response = port_req.get_response(self.api)\n\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)\n res = self.deserialize(self.fmt, response)\n self.assertEqual(0, len(res['port']['fixed_ips']))", "def test_update_network_external_ports(self):\n policies_ports = [(self.qos_policies[0].id, {self.ports[0].id})]\n self.ports[2].qos_policy_id = self.qos_policies[0].id\n self.ports[2].update()\n port_obj.PortBinding(self.ctx, port_id=self.ports[1].id, host='host',\n profile={}, vif_type='',\n vnic_type=portbindings_api.VNIC_DIRECT).create()\n with mock.patch.object(self.qos_driver._driver._nb_idl,\n 'get_lswitch_port') as mock_lsp:\n mock_lsp.side_effect = [\n mock.Mock(type=ovn_const.LSP_TYPE_LOCALNET),\n mock.Mock(type=ovn_const.LSP_TYPE_EXTERNAL)]\n for qos_policy_id, reference_ports in policies_ports:\n self.networks[0].qos_policy_id = qos_policy_id\n self.networks[0].update()\n original_network = {'qos_policy_id': self.qos_policies[0]}\n reviewed_port_ids, _, _ = self.qos_driver.update_network(\n mock.ANY, self.networks[0], original_network, reset=True)\n self.assertEqual(reference_ports, reviewed_port_ids)\n calls = [mock.call(\n mock.ANY, self.ports[0].id, self.ports[0].network_id,\n qos_policy_id, None)]\n self.mock_rules.assert_has_calls(calls)\n self.mock_rules.reset_mock()", "def update_network_postcommit(self, context):\n if self.rpc_handler is None:\n return\n network = self._get_network_info(context._network)\n for _, _network in network.items():\n network_type = _network.get('network_type', '')\n if network_type not in CentecConstant.SUPPORTED_NETWORK_TYPES and len(CentecConstant.SUPPORTED_NETWORK_TYPES) > 0:\n return\n if network is not None:\n try:\n self.rpc_handler.update_network(network)\n except:\n pass", "def reset_cache(self):\n if self.cache_address is not None:\n for add in self.cache:\n os.remove(add + \".cd\")\n os.remove(add + \".cl\")\n self.cache = [None] * len(self)", "def __fillCache(self):\n assert (not self.__modelCache)\n\n # Assemble a list of model IDs to look up\n numModelIDs = len(self.__modelIDs) if self.__modelIDs else 0\n\n if self.__nextIndex >= numModelIDs:\n return\n\n idRange = self.__nextIndex + self.__CACHE_LIMIT\n if idRange > numModelIDs:\n idRange = numModelIDs\n\n lookupIDs = self.__modelIDs[self.__nextIndex:idRange]\n\n self.__nextIndex += (idRange - self.__nextIndex)\n\n # Query Nupic for model info of all models in the look-up list\n # NOTE: the order of results may not be the same as lookupIDs\n infoList = _clientJobsDB().modelsInfo(lookupIDs)\n assert len(infoList) == len(lookupIDs), \\\n \"modelsInfo returned %s elements; expected %s.\" % \\\n (len(infoList), len(lookupIDs))\n\n # Create _NupicModelInfo instances and add them to cache\n for rawInfo in infoList:\n modelInfo = _NupicModelInfo(rawInfo=rawInfo)\n self.__modelCache.append(modelInfo)\n\n assert len(self.__modelCache) == len(lookupIDs), \\\n \"Added %s elements to modelCache; expected %s.\" % \\\n (len(self.__modelCache), len(lookupIDs))\n\n if self.debug:\n _emit(Verbosity.DEBUG,\n \"MODELITERATOR: Leaving __fillCache(); modelCacheLen=%s\" % \\\n (len(self.__modelCache),))", "def sync_state(self):\n LOG.info(_('Synchronizing state'))\n known_networks = set(self.cache.get_network_ids())\n\n try:\n active_networks = set(self.plugin_rpc.get_active_networks())\n for deleted_id in known_networks - active_networks:\n self.disable_dhcp_helper(deleted_id)\n\n for network_id in active_networks:\n self.refresh_dhcp_helper(network_id)\n except:\n self.needs_resync = True\n LOG.exception(_('Unable to sync network state.'))", "def test_port_update_with_fixed_ips_ok_if_no_binding_host(self):\n with self.network() as network:\n segment = self._test_create_segment(\n network_id=network['network']['id'],\n physical_network='physnet',\n network_type=constants.TYPE_VLAN)\n\n # Create a port with no IP address (since there is no subnet)\n port = self._create_deferred_ip_port(network)\n\n # Create the subnet and try to update the port to get an IP\n with self.subnet(network=network,\n segment_id=segment['segment']['id']) as subnet:\n # Try requesting an IP (but the only subnet is on a segment)\n data = {'port': {\n 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id)\n response = port_req.get_response(self.api)\n\n # The IP is allocated since there is no binding host info any\n # subnet can be used for allocation.\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)", "def test_update_external_network(self):\n network_policies = [(self.qos_policies[1].id,\n {self.fips[1].id},\n {self.router_fips.id}),\n (None,\n {self.fips[1].id},\n {self.router_fips.id})]\n\n self.fips[0].qos_policy_id = self.qos_policies[0].id\n self.fips[0].update()\n for qos_policy_id, ref_fips, ref_routers in network_policies:\n self.fips_network.qos_policy_id = qos_policy_id\n self.fips_network.update()\n original_network = {'qos_policy_id': self.qos_policies[0]}\n _, reviewed_fips_ids, reviewed_router_ids = (\n self.qos_driver.update_network(\n mock.Mock(), self.fips_network, original_network))\n self.assertEqual(ref_fips, reviewed_fips_ids)\n self.assertEqual(ref_routers, reviewed_router_ids)", "def updateNetwork(self, session: Session, network: Network) -> Network:\n try:\n return NetworkManager().updateNetwork(session, network)\n except TortugaException as ex:\n raise\n except Exception as ex:\n self._logger.exception(str(ex))\n raise TortugaException(exception=ex)", "def dcnm_network_delete_event(self, network_info):\n seg_id = network_info.get('segmentation_id')\n if not seg_id:\n LOG.error(_LE('Failed to delete network. Invalid network '\n 'info %s.'), network_info)\n query_net = self.get_network_by_segid(seg_id)\n if not query_net:\n LOG.info(_LI('dcnm_network_delete_event: network %(segid)s '\n 'does not exist.'), {'segid': seg_id})\n return\n if self.fw_api.is_network_source_fw(query_net, query_net.name):\n LOG.info(_LI(\"Service network %s, returning\"), query_net.name)\n return\n # Send network delete request to neutron\n try:\n del_net = self.network.pop(query_net.network_id)\n self.neutronclient.delete_network(query_net.network_id)\n self.delete_network_db(query_net.network_id)\n except Exception as exc:\n # Failed to delete network.\n # Put back the entry to the local cache???\n self.network[query_net.network_id] = del_net\n LOG.exception(_LE('dcnm_network_delete_event: Failed to delete '\n '%(network)s. Reason %(err)s.'),\n {'network': query_net.name, 'err': str(exc)})", "def fusion_api_edit_network_set(self, body=None, uri=None, api=None, headers=None):\n return self.network_set.update(body, uri, api, headers)", "def test_port_update_deferred_allocation_no_host_mapping(self):\n network, segment, subnet = self._create_test_segment_with_subnet()\n\n port = self._create_deferred_ip_port(network)\n self._validate_deferred_ip_allocation(port['port']['id'])\n\n # Try requesting an IP (but the only subnet is on a segment)\n data = {'port': {portbindings.HOST_ID: 'fakehost'}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id,\n as_admin=True)\n response = port_req.get_response(self.api)\n res = self.deserialize(self.fmt, response)\n\n # Gets conflict because it can't map the host to a segment\n self.assertEqual(webob.exc.HTTPConflict.code, response.status_int)\n self.assertEqual(segment_exc.HostNotConnectedToAnySegment.__name__,\n res['NeutronError']['type'])", "def alter_network(self, add=[], remove=[]):\n\n self.network.edges.add_many(add)\n self.network.edges.remove_many(remove)\n return self.score_network()", "def before_update(self, introspection_data, node_info, **kwargs):\n inventory = utils.get_inventory(introspection_data)\n\n ironic_ports = node_info.ports()\n\n for iface in inventory['interfaces']:\n if iface['name'] not in introspection_data['all_interfaces']:\n continue\n\n mac_address = iface['mac_address']\n port = ironic_ports.get(mac_address)\n if not port:\n LOG.debug(\"Skipping physical network processing for interface \"\n \"%s, matching port not found in Ironic.\",\n mac_address,\n node_info=node_info, data=introspection_data)\n continue\n\n # Determine the physical network for this port.\n # Port not touched in here.\n physnet = self.get_physnet(port, iface['name'], introspection_data)\n if physnet is None:\n LOG.debug(\"Skipping physical network processing for interface \"\n \"%s, no physical network mapping\",\n mac_address,\n node_info=node_info, data=introspection_data)\n continue\n\n patch = self._get_physnet_patch(physnet, port)\n if patch is None:\n LOG.debug(\"Skipping physical network processing for interface \"\n \"%s, no update required\",\n mac_address,\n node_info=node_info, data=introspection_data)\n continue\n\n try:\n node_info.patch_port(port, [patch])\n except exceptions.BadRequestException as e:\n LOG.warning(\"Failed to update port %(uuid)s: %(error)s\",\n {'uuid': port.id, 'error': e},\n node_info=node_info)", "def _update_fixed_ips_config(port_config):\n fixed_ip_prop = ctx.node.properties.get('fixed_ip')\n if not (port_config.get('fixed_ips') or fixed_ip_prop):\n return\n\n elif not port_config.get('fixed_ips'):\n port_config['fixed_ips'] = []\n\n if fixed_ip_prop:\n for item in port_config['fixed_ips']:\n if item.get('ip_address') and item['ip_address'] == fixed_ip_prop:\n break\n else:\n port_config['fixed_ips'].append({'ip_address': fixed_ip_prop})", "def reset_cache(self):\n self.cache = [None] * self.n_layers\n self.offset = 0\n logger.debug('Reset cache.')", "def test_networking_project_network_update(self):\n pass" ]
[ "0.6243899", "0.5750837", "0.55582297", "0.5493871", "0.54649335", "0.53545606", "0.5274179", "0.52181244", "0.51568764", "0.51559347", "0.5134361", "0.51047474", "0.5044345", "0.5022658", "0.50088584", "0.50081223", "0.50078607", "0.49672914", "0.4941016", "0.49056178", "0.49003807", "0.48882106", "0.48862943", "0.48803425", "0.48673484", "0.48665342", "0.4862084", "0.48397473", "0.48308828", "0.4825883" ]
0.7541457
0
Run the EtcdWatcher loop.
def run(self): self.etcd.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n watcher = self._watcher(self.on_recv)\n watcher.loop()", "def run(self):\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n\n event_handler = PatternMatchingEventHandler(\n self.patterns,\n self.ignore_patterns,\n self.ignore_directories,\n self.case_sensitive\n )\n\n # event_handler.on_deleted = self.on_deleted\n # event_handler.on_modified = self.on_modified\n event_handler.on_moved = self.on_moved\n event_handler.on_created = self.on_created\n\n go_recursively = True\n\n observer = Observer()\n observer.schedule(event_handler, self.path, recursive=go_recursively)\n observer.start()\n\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()", "def run(self):\n self.logger.info(\"starting Dashi consumer\")\n while not self.shutdown:\n self.rpc.listen()", "def run_inner(self):\n for event in self.inotify.event_gen():\n self.process_inotify_event(event)", "def start(self):\n self.watcher.start()\n self._asyncio_loop.run_forever()", "def run(self):\n while True:\n time.sleep(RTM_READ_DELAY)\n for event in self._slack_client.rtm_read():\n self.handle_event(event)", "def _main_loop(self):\n observer = Observer()\n observer.schedule(self.changes_event_handler, path=self.base_dir, recursive=False)\n observer.start()\n while True:\n if os.path.exists(self.todo_local_file):\n with open(self.todo_local_file, 'rb') as f:\n obj_list = pickle.load(f)\n\n today_todo_list = [i for i in obj_list if self.is_today_todo(i['time'])]\n self.solve_one_day_todo_events(todo_items_list=today_todo_list)\n else:\n time.sleep(60)\n pass", "def run(self):\n if not self.running:\n self.loop.run_forever()", "def event_loop(self):\n while self.ack is False:\n gevent.sleep(self.loop_interval)\n output_service = self.get_directory_service_proxy().get_service(\"mock-output-service\")\n output_service.put(\"test-worker-work-result\")\n self.ack = True", "def run(self): \n #\n\n \n # forever loop\n while True: \n \n for app in self.app_list:\n self.check(app) \n #print(\"check\")\n \n gevent.sleep(SLEEP_SECONDS)", "def listen(self):\n self.init_delete_batch_processing()\n self.init_file_batch_processing()\n self.init_symlink_batch_processing()\n\n self.loop.create_task(self.start_watching_roots())\n\n self.revisit_cond = asyncio.Condition()\n self.loop.create_task(self.start_polling_revisits())\n\n self.start_polling_changes()\n self.loop.run_forever()\n self.stop_polling_changes()", "def run_forever(self):\n reactor.run()", "def run_forever(self):\n reactor.run()", "def run(self):\n # for running indefinitely if 'watch' is passed\n if self._arguments.watch:\n while True:\n self.watch(self.main(), int(self._arguments.watch))\n else:\n self.main()", "def main() -> None:\n runner()\n asyncio.get_event_loop().run_forever()", "def main() -> None:\n runner()\n asyncio.get_event_loop().run_forever()", "def run(self):\n self.connect()\n self.run_forever()", "def main():\n channel_watcher = ChannelWatcher()\n channel_watcher.create_threads()\n for thread in channel_watcher.threads:\n thread.join()\n return", "def run(self):\n self.logger.info(f'Running {self.__class__.__name__}')\n while True:\n last_check = time.time()\n self.collect_new_events()\n while time.time() - last_check < self._check_for_new_events_interval:\n self.logger.debug('Waiting for new events collection: new collection in {}s'.format(\n self._check_for_new_events_interval - (time.time() - last_check)))\n time.sleep(1)", "def run(self):\n self.poller = select.epoll()\n self.pollmask = select.EPOLLIN | select.EPOLLHUP | select.EPOLLERR\n self.poller.register(self.server,self.pollmask)\n self.timeout = float(self.webconfig.parameters[\"timeout\"])\n lastSweep = time.time()\n\n while True:\n # poll sockets\n\n if (time.time() - lastSweep) > .5: #sweet through every half second\n self.socketCheck()\n lastSweep = time.time()\n try:\n fds = self.poller.poll(timeout=1.0)\n except:\n return\n fd = 0\n for (fd,event) in fds:\n # handle errors\n if event & (select.POLLHUP | select.POLLERR):\n self.handleError(fd)\n continue\n # handle the server socket\n if fd == self.server.fileno():\n self.handleServer()\n continue\n # handle client socket\n result = self.handleClient(fd)", "def run(self):\n self.cmdloop()", "def run(self):\n\n self._daemon_thread.start()\n\n while True:\n time.sleep(5)", "def _event_loop(self):\n while True:\n self.scheduler.run(blocking=True)\n time.sleep(1)", "def run(self):\n while True:\n try:\n if not self._read_new_entries(False):\n time.sleep(0.1)\n self._update_all_tasks()\n except KeyboardInterrupt:\n break", "def main(dir_to_watch):\n event_handler = AudioCreatedHandler()\n observer = Observer()\n observer.schedule(event_handler, dir_to_watch, recursive=True)\n observer.start()\n try:\n while True:\n time.sleep(1) \n except KeyboardInterrupt:\n print \"Stopping...\"\n observer.stop()\n observer.join()", "def run(self):\n # get the active node before we start anything...\n self.active_node_ip_port = self.get_active_node()\n if self.active_node_ip_port is None:\n logger.critical(\"ERROR: Could not get active vault node from \"\n \"Consul. Exiting.\")\n raise SystemExit(3)\n logger.warning(\"Initial Vault active node: %s\",\n self.active_node_ip_port)\n site = Site(VaultRedirectorSite(self))\n # setup our HTTP(S) listener\n if self.tls_factory is not None:\n self.listentls(site)\n else:\n self.listentcp(site)\n # setup the update_active_node poll every POLL_INTERVAL seconds\n self.add_update_loop()\n logger.warning('Starting Twisted reactor (event loop)')\n self.run_reactor()", "def run(self):\n while self.running:\n self.handle_request()", "def run(self):\n try:\n self.eventloop.run_forever()\n except KeyboardInterrupt:\n self.logger.info(\"Interrupt received, shutting down.\")\n except Exception:\n self.logger.exception(\"Unhandled exception raised, shutting down.\")\n finally:\n self._shutdown()\n self.logger.debug(\"Closing event loop\")\n self.eventloop.close()\n if self._restarting:\n self.logger.info(f\"Restarting with command line: {sys.argv}\")\n os.execl(sys.executable, sys.executable, *sys.argv)", "def loop(self):\n keys.mode = 'main'\n for line in client.readlines('/event'):\n if not self.alive:\n break\n self.dispatch(*line.split(' ', 1))\n self.alive = False", "def _run_loop(self):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n self._server = websockets.serve(self._log_message, self._host, self._port)\n\n loop.run_until_complete(self._server)\n loop.run_forever()" ]
[ "0.60920537", "0.60423386", "0.6004358", "0.59458005", "0.5935371", "0.5914737", "0.5896926", "0.58915985", "0.58893985", "0.5884968", "0.58668745", "0.5844063", "0.5844063", "0.58165354", "0.58041525", "0.58041525", "0.5799248", "0.57745546", "0.57614356", "0.5740617", "0.57405484", "0.57402986", "0.57360905", "0.5727484", "0.5718508", "0.5704776", "0.5692617", "0.5688287", "0.5687442", "0.5672505" ]
0.6893604
0
Compiles robot from given file and returns class object
def compile_robot(file_name, module_name = "contestant_module"): global counter_module module_name += str(counter_module) counter_module += 1 mod = importCode(file_name, module_name) compiled_class = None for symbol in dir(mod): if hasattr(getattr(mod, symbol), "act") and getattr(mod, symbol).__name__ != "RobotController": compiled_class = getattr(mod, symbol) print compiled_class globals()[compiled_class.__name__] = compiled_class if compiled_class is None: raise KrakrobotException("Not found class with act() function named different than RobotController in provided .py") return compiled_class, mod
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepareRobot(self):\n f = StringIO.StringIO()\n f.write(self.zipfile)\n\n zip = zipfile.ZipFile(f)\n \n #modules of the form {\"robot\" : \"from sr import *...\", ...}\n modules = dict([(os.path.splitext(z.filename)[0], zip.open(z.filename).read())\n for z in zip. infolist() \\\n if os.path.splitext(z.filename)[1] == \".py\"])\n\n class Loader:\n \"\"\"\n An object capable of bringing the python in the contents string\n to life. This works as part of the import hooks structure.\n \"\"\"\n def __init__(self, fullname, contents):\n self.fullname = fullname\n self.contents = contents\n\n def load_module(self, fullname):\n if fullname in sys.modules:\n return sys.modules[fullname]\n\n mod = sys.modules.setdefault(fullname, imp.new_module(fullname))\n mod.__file__ = \"<memory/%s>\" % fullname\n mod.__loader__ = self\n\n code = compile(self.contents, mod.__file__, \"exec\")\n\n exec code in mod.__dict__\n return mod\n\n class Finder:\n \"\"\"\n An object to provide loaders for modules present as strings in the\n modules dict.\n \"\"\"\n def __init__(self, modules):\n self.modules = modules\n\n def find_module(self, fullname, path=None):\n if (fullname in self.modules) and (path == None):\n return Loader(fullname, self.modules[fullname])\n\n return None\n\n #Register the finder with the system\n sys.meta_path.append(Finder(modules))", "def createInstanceSource(pcol, path, nr_robots, smallest_robot_id):\n\n # prevent alphabet related bugs by including e and f objects in alphabet\n if (\"e\" not in pcol.A):\n pcol.A.append(\"e\")\n if (\"f\" not in pcol.A):\n pcol.A.append(\"f\")\n\n with open(path + \".c\", \"w\") as fout:\n fout.write(\"\"\"#include \"%s.h\"\n\n#ifdef NEEDING_WILDCARD_EXPANSION\n #include \"wild_expand.h\"\n#endif\n\n#ifdef PCOL_SIM\"\"\" % path.split(\"/\")[-1]) #only filename\n\n fout.write(\"\"\"\\n char* objectNames[] = {[NO_OBJECT] = \"no_object\", \"\"\")\n for obj in pcol.A:\n fout.write(\"\"\"[OBJECT_ID_%s] = \"%s\", \"\"\" % (obj.upper(), obj))\n\n fout.write(\"\"\"};\n char* agentNames[] = {\"\"\")\n for ag_name in pcol.B:\n fout.write(\"\"\"[AGENT_%s] = \"%s\", \"\"\" % (ag_name.upper(), ag_name))\n fout.write(\"\"\"};\n#endif\n\n//the smallest kilo_uid from the swarm\nconst uint16_t smallest_robot_uid = %d;\n//the number of robots that make up the swarm\nconst uint16_t nr_swarm_robots = %d;\n\nvoid lulu_init(Pcolony_t *pcol) {\"\"\" % (smallest_robot_id, nr_robots) )\n\n # call initPcolony()\n fout.write(\"\"\"\\n //init Pcolony with alphabet size = %d, nr of agents = %d, capacity = %d\n initPcolony(pcol, %d, %d, %d);\"\"\" % (len(pcol.A), len(pcol.B), pcol.n, len(pcol.A), len(pcol.B), pcol.n))\n fout.write(\"\"\"\\n //Pcolony.alphabet = %s\"\"\" % pcol.A)\n\n # init environment\n fout.write(\"\"\"\\n\\n //init environment\"\"\")\n counter = 0;\n for obj, nr in pcol.env.items():\n #replace %id and * with $id and $ respectively\n\n fout.write(\"\"\"\\n pcol->env.items[%d].id = OBJECT_ID_%s;\"\"\" % (counter, obj.upper()))\n fout.write(\"\"\"\\n pcol->env.items[%d].nr = %d;\\n\"\"\" % (counter, nr))\n counter += 1\n fout.write(\"\"\"\\n //end init environment\"\"\")\n\n fout.write(\"\"\"\\n\\n //init global pswarm environment\"\"\")\n if (pcol.parentSwarm == None or len(pcol.parentSwarm.global_env) == 0):\n fout.write(\"\"\"\\n pcol->pswarm.global_env.items[0].id = OBJECT_ID_E;\"\"\")\n fout.write(\"\"\"\\n pcol->pswarm.global_env.items[0].nr = 1;\"\"\")\n else:\n counter = 0\n for obj, nr in pcol.parentSwarm.global_env.items():\n #replace %id and * with $id and $ respectively\n\n fout.write(\"\"\"\\n pcol->pswarm.global_env.items[%d].id = OBJECT_ID_%s;\"\"\" % (counter, obj.upper()))\n fout.write(\"\"\"\\n pcol->pswarm.global_env.items[%d].nr = %d;\"\"\" % (counter, nr))\n counter += 1\n fout.write(\"\"\"\\n //end init global pswarm environment\"\"\")\n\n fout.write(\"\"\"\\n\\n //init INPUT global pswarm environment\"\"\")\n if (pcol.parentSwarm == None or len(pcol.parentSwarm.in_global_env) == 0):\n fout.write(\"\"\"\\n pcol->pswarm.in_global_env.items[0].id = OBJECT_ID_E;\"\"\")\n fout.write(\"\"\"\\n pcol->pswarm.in_global_env.items[0].nr = 1;\"\"\")\n else:\n counter = 0\n for obj, nr in pcol.parentSwarm.in_global_env.items():\n #replace %id and * with $id and $ respectively\n\n fout.write(\"\"\"\\n pcol->pswarm.in_global_env.items[%d].id = OBJECT_ID_%s;\"\"\" % (counter, obj.upper()))\n fout.write(\"\"\"\\n pcol->pswarm.in_global_env.items[%d].nr = %d;\"\"\" % (counter, nr))\n counter += 1\n fout.write(\"\"\"\\n //end init INPUT global pswarm environment\"\"\")\n\n fout.write(\"\"\"\\n\\n //init OUTPUT global pswarm environment\"\"\")\n if (pcol.parentSwarm == None or len(pcol.parentSwarm.out_global_env) == 0):\n fout.write(\"\"\"\\n pcol->pswarm.out_global_env.items[0].id = OBJECT_ID_E;\"\"\")\n fout.write(\"\"\"\\n pcol->pswarm.out_global_env.items[0].nr = 1;\"\"\")\n else:\n counter = 0\n for obj, nr in pcol.parentSwarm.out_global_env.items():\n #replace %id and * with $id and $ respectively\n\n fout.write(\"\"\"\\n pcol->pswarm.out_global_env.items[%d].id = OBJECT_ID_%s;\"\"\" % (counter, obj.upper()))\n fout.write(\"\"\"\\n pcol->pswarm.out_global_env.items[%d].nr = %d;\"\"\" % (counter, nr))\n counter += 1\n fout.write(\"\"\"\\n //end init OUTPUT global pswarm environment\"\"\")\n\n for ag_name in pcol.B:\n fout.write(\"\"\"\\n\\n //init agent %s\"\"\" % ag_name)\n #fout.write(\"\"\"\\n\\n initAgent(&pcol->agents[AGENT_%s], pcol, %d);\"\"\" % (ag_name.upper(), len(pcol.agents[ag_name].programs)))\n fout.write(\"\"\"\\n\\n initAgent(&pcol->agents[AGENT_%s], pcol, %d);\"\"\" % (ag_name.upper(), getNrOfProgramsAfterExpansion(pcol.agents[ag_name], nr_robots- 1)))\n\n fout.write(\"\"\"\\n //init obj multiset\"\"\")\n counter = 0;\n for obj, nr in pcol.agents[ag_name].obj.items():\n #replace %id and * with $id and $ respectively\n\n for i in range(nr):\n fout.write(\"\"\"\\n pcol->agents[AGENT_%s].obj.items[%d] = OBJECT_ID_%s;\"\"\" % (ag_name.upper(), counter, obj.upper()))\n counter += 1\n\n fout.write(\"\"\"\\n\\n //init programs\"\"\")\n for prg_nr, prg in enumerate(pcol.agents[ag_name].programs):\n fout.write(\"\"\"\\n\\n initProgram(&pcol->agents[AGENT_%s].programs[%d], %d);\"\"\" % (ag_name.upper(), prg_nr, getNrOfRulesWithoutRepetitions(prg)))\n fout.write(\"\"\"\\n //init program %d: < %s >\"\"\" % (prg_nr, prg.print()))\n\n rule_index = 0\n for rule_nr, rule in enumerate(prg):\n # skip rules that contain identical operands and thus have no effect\n if (rule.lhs == rule.rhs and rule.lhs == 'e' and rule.main_type != sim.RuleType.conditional):\n continue\n\n fout.write(\"\"\"\\n //init rule %d: %s\"\"\" % (rule_nr, rule.print(toString=True)) )\n if (rule.main_type != sim.RuleType.conditional):\n fout.write(\"\"\"\\n initRule(&pcol->agents[AGENT_%s].programs[%d].rules[%d], RULE_TYPE_%s, OBJECT_ID_%s, OBJECT_ID_%s, NO_OBJECT, NO_OBJECT);\"\"\" % (ag_name.upper(), prg_nr, rule_index, rule.type.name.upper(), rule.lhs.upper(), rule.rhs.upper()))\n else:\n fout.write(\"\"\"\\n initRule(&pcol->agents[AGENT_%s].programs[%d].rules[%d], RULE_TYPE_CONDITIONAL_%s_%s, OBJECT_ID_%s, OBJECT_ID_%s, OBJECT_ID_%s, OBJECT_ID_%s);\"\"\" % (ag_name.upper(), prg_nr, rule_index, rule.type.name.upper(), rule.alt_type.name.upper(), rule.lhs.upper(), rule.rhs.upper(), rule.alt_lhs.upper(), rule.alt_rhs.upper()))\n\n #increase rule_index\n rule_index += 1\n fout.write(\"\"\"\\n //end init program %d\n pcol->agents[AGENT_%s].init_program_nr++;\"\"\" % (prg_nr, ag_name.upper()))\n fout.write(\"\"\"\\n //end init programs\"\"\")\n\n fout.write(\"\"\"\\n //end init agent %s\"\"\" % ag_name)\n\n fout.write(\"\"\"\\n}\"\"\")\n fout.write(\"\"\"\\n\\nvoid lulu_destroy(Pcolony_t *pcol) {\n //destroys all of the subcomponents\n destroyPcolony(pcol);\n}\"\"\")\n fout.write(\"\"\"\\n\n#ifdef NEEDING_WILDCARD_EXPANSION\nuint16_t expand_pcolony(Pcolony_t *pcol, uint16_t my_id) {\n //used for a cleaner iteration through the P colony\n //instead of using agents[i] all of the time, we use just agent\n Agent_t *agent;\n\"\"\")\n\n fout.write(\"\"\"\\n uint8_t obj_with_id[] = {\"\"\")\n obj_with_id_size = 0\n for obj in pcol.A:\n if (\"_W_ID\" in obj):\n fout.write(\"OBJECT_ID_%s, \" % obj.upper())\n obj_with_id_size += 1\n fout.write(\"\"\"};\n uint8_t obj_with_id_size = %d;\"\"\" % (obj_with_id_size))\n\n fout.write(\"\"\"\\n uint8_t obj_with_any[] = {\"\"\")\n obj_with_any_size = 0\n is_obj_with_any_followed_by_id = []\n for i, obj in enumerate(pcol.A):\n if (obj.endswith(\"_W_ALL\")):\n fout.write(\"OBJECT_ID_%s, \" % obj.upper())\n # if we are at least 2 objects before the end of the list\n if (i < len(pcol.A) - 1):\n # check if this _$ wildcarded object is followed by a _$id object\n if (\"_W_ID\" in pcol.A[i+1]):\n is_obj_with_any_followed_by_id.append(1)\n else:\n is_obj_with_any_followed_by_id.append(0)\n else:\n # this (_$) object is the last one in the list\n is_obj_with_any_followed_by_id.append(0)\n obj_with_any_size += 1\n fout.write(\"\"\"};\n uint8_t obj_with_any_size = %d;\n uint8_t is_obj_with_any_followed_by_id[] = {%s};\"\"\" % (obj_with_any_size,\n str(is_obj_with_any_followed_by_id).replace(\"[\", \"\").replace(\"]\", \"\")))\n\n fout.write(\"\"\"\\n\\n uint16_t my_symbolic_id = my_id - smallest_robot_uid;\n\n //replace W_ID wildcarded objects with the object corresponding to the symbolic id\n // e.g.: B_W_ID -> B_0 for my_symbolic_id = 0\n replacePcolonyWildID(pcol, obj_with_id, obj_with_id_size, my_symbolic_id);\n\n //expand each obj_with_any[] element into nr_swarm_robots objects except my_symbolic id.\n // e.g.: B_W_ALL -> B_0, B_2 for nr_swarm_robots = 3 and my_symbolic_id = 1\n expandPcolonyWildAny(pcol, obj_with_any, is_obj_with_any_followed_by_id, obj_with_any_size, my_symbolic_id, nr_swarm_robots);\n\n return my_symbolic_id;\n}\n#endif\"\"\")", "def _compile(self, source: str, filename: str) -> CodeType:\n return compile(source, filename, \"exec\") # type: ignore", "def __init__(self, file):\n self.file = file # maybe should be a folder?\n self.file = self.file.rsplit('.')[0] + \".asm\"\n self.name = None\n self.func_name = None\n self.if_count = 0\n self.call_count = 0", "def compile_contract(file: str, class_call: str) -> str:\n print(f\"Compiling {file}.py ....\")\n exit_code = os.system(\n f\"~/smartpy-cli/SmartPy.sh compile contract/contracts/{file}.py \\\"{class_call}\\\" contract/build\")\n if exit_code != 0:\n raise Exception(f\"Failed to compile Contract : {file}.py\")", "def Build(self, out_file):\n raise NotImplementedError", "def __init__(self, code=\"\", lang=\"\", input=\"\", id=0):\n self.code = code\n self.lang = lang\n self.input = input\n self.id = str(id)\n self.output = \"\"\n self.status = \"\"\n self.create_file()\n if(self.lang == \"PYTHON\"):\n self.compile_python()\n elif(self.lang == \"C\"):\n self.compile_c()\n elif(self.lang == \"CPP\"):\n self.compile_cpp()\n elif(self.lang == \"JAVA\"): # For Java File \n self.compile_java()\n elif(self.lang==\"JS\"):\n self.compile_js()\n self.delete_file()", "def default_robot(): #py:default_robot\n class Robot(UsedRobot):\n def __init__(self):\n self.body = RUR._default_robot_body_()\n return Robot()", "def NewRobot(self,module,s,d,x,y,xT,yT,rgb): #Add robot\n\t\tsys.path.insert(0, './studentRobots')\n\t\tmname = module[:-3]\n\t\t\n\t\tspec = importlib.util.find_spec(mname)\n\t\tif spec is None:\n\t\t\tprint(\"can't find the module\")\n\t\telse:\n\t\t\t# the actual import ...\n\t\t\tmodule = importlib.util.module_from_spec(spec)\n\t\t\tspec.loader.exec_module(module)\n\t\tprint(mname,module)\n\t\trbot = module.s1Robot(s,d,x,y,xT,yT,rgb)\n\t\tself.__robotList.append(rbot)", "def compile(cls, node, filename):\n compiler = cls(filename)\n compiler.visit(node)\n code_ops = compiler.code_ops\n code = Code(\n code_ops, [], ['identifiers', 'toolkit'], False, False, True, \n node.name, filename, node.lineno, node.doc,\n )\n return code", "def compile_class(self):\n\t\t\n\t\txml = '<class>\\n' + self.tokenizer.keyword() + self.tokenizer.identifier() + self.tokenizer.symbol()\n\n\t\tself.outfile.write(xml)", "def main():\n parser = argparse.ArgumentParser(description='REA Robot')\n parser.add_argument('--c', metavar='FILE', type=str, required=False, help='File with commands to execute. One command per line')\n args = parser.parse_args()\n\n # Get list of commands to execute\n commands = load_command_list(args.c)\n if len(commands) == 0:\n commands = read_commands_from_console()\n\n logger.debug('List of commands to execute: {}'.format(commands))\n\n # Run the Robot\n robot = Robot()\n cmd_parser = CommandsParser(commands)\n while True:\n cmd_and_args = cmd_parser.get_next_command()\n if cmd_and_args:\n cmd_and_args[0].run(robot, cmd_and_args[1])\n else:\n break", "def compile(path_to_src, path_to_dest, connections, tunables, file_type=None):\n\n # if not provided a file type, infer from file extension\n if file_type == None:\n file_type = path_to_src.split(\".\")[-1]\n\n assert file_type in tokenizers\n tokenizer = tokenizers[file_type]\n\n graph = build_graph(connections)\n\n with open(path_to_src, 'r') as file:\n src = file.read()\n\n tokens = tokenizer.tokenize(src)\n\n tokens = first_pass(tokens, graph)\n\n #tokens = second_pass(tokens, gates)\n\n #tokens = third_pass(tokens, gates)\n\n compiled = tokenizer.detokenize(tokens)\n\n with open(path_to_dest, 'w') as file:\n file.write(compiled)", "def compile_class(self):\r\n self.tokenizer.advance() # ignore 'class' keyword\r\n self.class_name = self.tokenizer.identifier()\r\n self.tokenizer.advance()\r\n self.tokenizer.advance() # ignore '{' symbol\r\n while self.tokenizer.curtok < len(self.tokenizer.tokens) - 1:\r\n dec = self.tokenizer.key_word()\r\n if dec == \"field\" or dec == \"static\":\r\n self.compile_var_dec()\r\n else:\r\n self.compile_subroutine()\r\n self.tokenizer.advance()", "def compile(expression: str) -> Compiled:\r\n e = Compiled(expression)\r\n e.tokenize()\r\n return e", "def __init__(self, xml_name, recompile_cpp=False, rendering=True):\n if recompile_cpp:\n self._update_wrapper()\n\n if sys.platform.startswith('darwin'):\n cdll_path = os.path.join(os.path.dirname(__file__), \"libsimenv.dylib\")\n elif sys.platform.startswith('linux'):\n cdll_path = os.path.join(os.path.dirname(__file__), \"libsimenv.so\")\n elif sys.platform.startswith('win32'):\n cdll_path = os.path.join(os.path.dirname(__file__), \"libsimenv.dll\")\n else:\n raise EnvironmentError(\"Unknown operating system found.\")\n\n model_path = os.path.join(pathlib.Path(__file__).parent, \"mujoco_model/\", xml_name).encode('utf-8')\n self.rendering = rendering\n\n # C++ control engine.\n self.wrapper = ctypes.CDLL(cdll_path)\n self.instance = self.wrapper.get_instance(ctypes.c_char_p(model_path), ctypes.c_bool(rendering))\n\n # Indices of the object bodies.\n self.obstacle_body_index = self.get_body_index(\"obstacle\")\n self.agent_body_index = self.get_body_index(\"agent\")\n\n # Indices of the joints.\n self.obstacle_jnt_index = self.get_jnt_index(\"slider:obstacle\")\n self.agent_jnt_x_index = self.get_jnt_index(\"slider:agent-obstacle_x\")\n self.agent_jnt_y_index = self.get_jnt_index(\"slider:agent-y\")\n\n # Initial positions from the configuration.\n self.obstacle_pos = self.get_body_ini_pos(self.obstacle_body_index)\n self.agent_pos = self.get_body_ini_pos(self.agent_body_index)", "def PrepareCompile(file):\n global oilcc_I,oilcc_o,oilcc_S,oilcc_target\n fp = open(file,'r')\n # some flags\n item = ''; #one item is minimum object such as TASK,ALARM ...\n barcenum = 0;\n flag = False; #has \" { \" encountered or not\n start = False #has match an obj start or not\n for line in fp.readlines():\n #firstly, filter out the comment on this line\n el = DropComment(line);\n if(start == False):\n #{\n item = ''; \n barcenum = 0;\n flag = False;\n if(IsIt('osekObj',el)):\n start = True;\n item += el;\n if(el.count('{') > 0): #so at comment should not include '{}'\n flag = True;\n barcenum += el.count('{');\n if(el.count('}') > 0):\n barcenum -= el.count('}');\n if((flag == True) and (barcenum == 0)): #in one line\n #filter out the multi-line comment\n item = DropComment(item)\n oilcc_texts.append(item);\n start = False\n else: # special process for include\n inc = GetIt('include',el)\n if(inc != None): #include file\n flag_inc = False\n for I in oilcc_I:\n finc = I + '/' + inc[0]\n if(os.path.exists(finc)):\n print 'INFO:parse include file <%s> in the path <%s>'%(inc[0],I)\n PrepareCompile(finc);\n flag_inc = True;\n if(flag_inc == False):\n print 'ERROR:cann\\'t find out the file %s!'%(inc[0])\n sys.exit(-1)\n #}\n else:\n #{\n if(el.count('{') > 0): #so at comment should not include '{}'\n flag = True;\n barcenum += el.count('{');\n if(el.count('}') > 0):\n barcenum -= el.count('}');\n item += el;\n if((flag == True) and (barcenum == 0)):\n #filter out the multi-line comment\n item = DropComment(item)\n oilcc_texts.append(item);\n start = False\n #}\n fp.close()", "def __init__(self):\n BuildSystemBase.__init__(self, \"makefile\")", "def load_crawl():\n\n\tmodule_path = dirname(__file__)\n\twith open(sys.argv[1]) as csv_file:\n\t\tdata_file = csv.reader(csv_file)\n\t\ttemp = next(data_file)\n\t\tn_samples = int(temp[0])\n\t\tn_features = int(temp[1])\n\t\ttarget_names = np.array(temp[2:4])\n\t\tdata = np.empty((n_samples, n_features))\n\t\ttarget = np.empty((n_samples,), dtype=np.int)\n\n\t\tfor count, value in enumerate(data_file):\n\t\t\tif len(value)<28:\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tdata[count] = np.asarray(value[:28], dtype=np.float)\n\t\t\t\ttarget[count] = np.asarray(value[28], dtype=np.int)\t\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\t\t#print \"data is \" + str(data[count])\n\t\t\t#print \"target is \" + str(target[count])\n\n\twith open(join(module_path, 'descr', 'crawl.rst')) as rst_file:\n\t\tfdescr = rst_file.read()\n\n\treturn Bunch(data=data, target=target,\n\t\t\t target_names=target_names,\n\t\t\t DESCR=fdescr,\n\t\t\t feature_names = ['evalCount', 'setInterval', 'setTimeout', 'link', \n\t\t\t\t\t\t\t 'search', 'exec','escape', 'unescape', 'ratio', \n\t\t\t\t\t\t\t 'emtropyAvg', 'entropyScript', 'longStrings', \n\t\t\t\t\t\t\t 'maxEntropy', 'stringAvg', 'maxLength', 'longVarFunc', \n\t\t\t\t\t\t\t 'stringAssignments', 'stringModFuncsCount', 'eventFuncsCount', \n\t\t\t\t\t\t\t 'domModFuncsCounter', 'suspStrings', 'whiteSpaceRatio', \n\t\t\t\t\t\t\t 'hexaStrings', 'maxNonPrintableCharactersinString', 'lineAvg', \n\t\t\t\t\t\t\t 'iframeCount', 'malTagCount', 'jsLength'])", "def compile(c_file: File) -> File:\n os.system(\"gcc -c {c_file}\".format(c_file=c_file.path))\n return File(c_file.path.replace('.c', '.o'))", "def makeTestProcessor(test_processor_path):\r\n\r\n className = splitext(basename(test_processor_path))[0]\r\n\r\n with open(test_processor_path, 'w') as f:\r\n f.write(\"\"\"\\\r\n'''\r\nTest processor class - should be deleted upon completion of test\r\n'''\r\n\r\n'''___Built-In Modules___'''\r\nimport sys\r\nfrom os.path import dirname\r\n\r\n'''___Third-Party Modules___'''\r\n\r\n'''___NPL Modules___'''\r\ndataProcessing_directory = dirname(dirname(__file__))\r\nsys.path.append(dataProcessing_directory)\r\nfrom AbstractProcessor import AbstractProcessor\r\n\r\nclass %s(AbstractProcessor):\r\n processor_directory = dirname(__file__)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n\"\"\" % (className))\r\n\r\n return 0", "def _get_codeobj(pyfile):\n from imp import PY_COMPILED, PY_SOURCE\n\n result, fileobj, fullpath = _check_if_pyc(pyfile)\n\n # WARNING:\n # fp.read() can blowup if the module is extremely large file.\n # Lookout for overflow errors.\n try:\n data = fileobj.read()\n finally:\n fileobj.close()\n\n # This is a .pyc file. Treat accordingly.\n if result is PY_COMPILED:\n # .pyc format is as follows:\n # 0 - 4 bytes: Magic number, which changes with each create of .pyc file.\n # First 2 bytes change with each marshal of .pyc file. Last 2 bytes is \"\\r\\n\".\n # 4 - 8 bytes: Datetime value, when the .py was last changed.\n # 8 - EOF: Marshalled code object data.\n # So to get code object, just read the 8th byte onwards till EOF, and\n # UN-marshal it.\n import marshal\n code_obj = marshal.loads(data[8:])\n\n elif result is PY_SOURCE:\n # This is a .py file.\n code_obj = compile(data, fullpath, 'exec')\n\n else:\n # Unsupported extension\n raise Exception(\"Input file is unknown format: {0}\".format(fullpath))\n\n # Return code object\n return code_obj", "def get_codecoolers_from_file(cls, file_name):\n constructors = cls.load_data_from_file(file_name)\n\n for constructor in constructors:\n name, surname, login, password, email = constructor\n\n cls(name, surname, login, password, email)", "def compile(self, workdir):\n with open(workdir) as f:\n ast = self.parser.generate_ast(f.read())\n f.close()\n\n return None", "def compile(self,\n compile_dir: pathlib.Path) -> str:\n\n self.compile_dir = pathlib.Path(compile_dir).absolute()\n\n self.modules = subprocess.run('module list', shell=True, stderr=subprocess.PIPE).stderr\n\n # check compile directory.\n if not self.compile_dir.is_dir():\n warnings.warn(str(self.compile_dir.absolute()) + ' directory does not exist, creating')\n self.compile_dir.mkdir(parents=True)\n\n # Remove run directory if it exists in the source_dir\n source_compile_dir = self.source_dir.joinpath('Run')\n if source_compile_dir.is_dir():\n shutil.rmtree(str(source_compile_dir.absolute()))\n\n # Get directory for setEnvar\n compile_options_file = self.source_dir.joinpath('compile_options.sh')\n\n # Write setEnvar file\n with compile_options_file.open(mode='w') as file:\n for option, value in self.compile_options.items():\n file.write(\"export {}={}\\n\".format(option, value))\n\n # Compile\n # Create compile command for machine spec\n compile_cmd = '/bin/bash -c \"'\n if self.pre_compile_cmd is not None:\n compile_cmd += self.pre_compile_cmd + '; '\n compile_cmd += './configure ' + self.compiler + '; '\n compile_cmd += './compile_offline_NoahMP.sh '\n compile_cmd += str(compile_options_file.absolute())\n compile_cmd += '\"'\n compile_cmd = shlex.split(compile_cmd)\n\n self.compile_log = subprocess.run(\n compile_cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=str(self.source_dir.absolute())\n )\n\n # Add in unique ID file to match this object to prevent assosciating\n # this directory with another object\n self.object_id = str(uuid.uuid4())\n\n with self.compile_dir.joinpath('.uid').open(mode='w') as f:\n f.write(self.object_id)\n\n if self.compile_log.returncode == 0:\n # Open permissions on compiled files\n subprocess.run(['chmod', '-R', '755', str(self.source_dir.joinpath('Run'))])\n\n # Wrf hydro always puts files in source directory under a new directory called 'Run'\n # Copy files to the specified simulation directory if its not the same as the\n # source code directory\n if len(self.table_files) == 0:\n self.table_files = list(self.source_dir.joinpath('Run').glob('*.TBL'))\n\n shutil.copyfile(str(self.source_dir.joinpath('Run').joinpath('wrf_hydro.exe')),\n str(self.compile_dir.joinpath('wrf_hydro.exe')))\n\n # Remove old files\n # shutil.rmtree(str(self.source_dir.joinpath('Run')))\n\n # Open permissions on copied compiled files\n subprocess.run(['chmod', '-R', '755', str(self.compile_dir)])\n\n # Get file lists as attributes\n # Get list of table file paths\n\n # Get wrf_hydro.exe file path\n self.wrf_hydro_exe = self.compile_dir.joinpath('wrf_hydro.exe')\n\n # Save the object out to the compile directory\n with self.compile_dir.joinpath('WrfHydroModel.pkl').open(mode='wb') as f:\n pickle.dump(self, f, 2)\n\n print('Model successfully compiled into ' + str(self.compile_dir.absolute()))\n else:\n # Save the object out to the compile directory\n with self.compile_dir.joinpath('WrfHydroModel.pkl').open(mode='wb') as f:\n pickle.dump(self, f, 2)\n raise ValueError('Model did not successfully compile.' +\n self.compile_log.stderr.decode('utf-8'))", "def createClassFile( p ):\n create_modules( p[\"package\"] )\n name = p[\"protocol\"][\"name\"]\n name.lower()\n path = os.path.join( *p[\"package\"].split( \".\" ) )\n with open( \"./%s/%s.py\" % ( path, name ), \"w\" ) as f:\n for i in p[\"imports\"]:\n createClassFile( i )\n\n c = Klass( package=p[\"package\"], includes=p[\"imports\"], **p[\"protocol\"] )\t\n\n f.write( c.generate() )", "def compile(self):\n\n\t\twhile(self.tokenizer.has_more_tokens()):\n\n\t\t\tif self.tokenizer.get_token() == 'class':\n\t\t\t\tself.compile_class()\n\t\t\telif self.tokenizer.get_token() in ['field','static']:\n\t\t\t\tself.compile_class_var_dec()\n\t\t\telif self.tokenizer.get_token() in ['function', 'method', 'constructor']:\n\t\t\t\tself.compile_subroutine()\n\n\t\tself.outfile.write('<symbol> } </symbol>\\n' + '</class>')\n\t\tself.outfile.close()", "def from_stan_file(\n cls,\n stan_file: str,\n model_data: Optional[str] = None,\n *,\n stanc_args: List[str] = [],\n make_args: List[str] = [],\n seed: int = 1234,\n capture_stan_prints: bool = True,\n ):\n result = compile_model(stan_file, stanc_args=stanc_args, make_args=make_args)\n return cls(\n str(result), model_data, seed=seed, capture_stan_prints=capture_stan_prints\n )", "def compile(self):\n raise NotImplementedError()", "def FromFile(cls, filename):\n test_run = cls()\n test_run.FromDejaGnuOutput(filename)\n test_run.CleanUpTestResults()\n return test_run" ]
[ "0.5830989", "0.55374384", "0.55307627", "0.54860145", "0.54444677", "0.5422501", "0.5409071", "0.5406872", "0.5393358", "0.5390054", "0.5387643", "0.52916986", "0.52400696", "0.5232065", "0.522042", "0.5217083", "0.52124226", "0.52053887", "0.5201384", "0.5178366", "0.5175187", "0.51626575", "0.5162059", "0.5149698", "0.514757", "0.5133447", "0.51303416", "0.51298493", "0.51174104", "0.5113116" ]
0.7441334
0
Prepare normalized image and label.
def _prepare_image_and_label(self, data): image = tf.io.decode_image(data['image/encoded'], channels=3) label = tf.io.decode_image(data['image/segmentation/class/encoded'], channels=1) height = data['image/height'] width = data['image/width'] image = tf.reshape(image, (height, width, 3)) label = tf.reshape(label, (1, height, width)) label = tf.cast(label, tf.float32) # Normalizes image with mean and std pixel values. image = input_utils.normalize_image(image) return image, label
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalize(image, label):\n image -= settings.DATASET_MEAN\n image /= settings.DATASET_STD\n\n return image, label", "def img_normalize(image, label):\n mean, std = ds_stats\n image -= tf.constant(mean, shape=[1, 1, num_channels], dtype=image.dtype)\n image /= tf.constant(std, shape=[1, 1, num_channels], dtype=image.dtype)\n return image, label", "def prep_image_data(arg_dict):\n cat_df = pd.read_csv(arg_dict['category_file'],\n skiprows=1,\n sep='\\s+')\n bbox_df = pd.read_csv(arg_dict['bbox_file'],\n skiprows=1,\n sep='\\s+')\n img_dir = arg_dict['image_dir']\n\n combo_df = pd.merge(cat_df, bbox_df, how='outer', on='image_name')\n combo_df['image_name'] = combo_df['image_name'].apply(\n lambda x: x[len('img'):-len('.jpg')])\n labels = Labels(combo_df, img_dir, n_images_loaded=-1)\n labels.set_data_target('raw_image', chunksize=3000)\n return labels", "def imagenet_preprocess(image, label):\n i = image\n i = tf.cast(i, tf.float32)\n i = tf.image.resize_with_crop_or_pad(i, 224, 224)\n if model_name == 'ResNet50' or model_name == 'ResNet152':\n i = tf.keras.applications.resnet.preprocess_input(i)\n else:\n i = tf.keras.applications.densenet.preprocess_input(i)\n return (i, label)", "def preprocess_image(image, label, is_training):\n if is_training:\n # Randomly scale the image and label.\n image, label = preprocessing.random_rescale_image_and_label(\n image, label, _MIN_SCALE, _MAX_SCALE)\n\n # Randomly crop or pad a [_HEIGHT, _WIDTH] section of the image and label.\n image, label = preprocessing.random_crop_or_pad_image_and_label(\n image, label, _HEIGHT, _WIDTH, _IGNORE_LABEL)\n\n # Randomly flip the image and label horizontally.\n image, label = preprocessing.random_flip_left_right_image_and_label(\n image, label)\n\n image.set_shape([_HEIGHT, _WIDTH, 3])\n label.set_shape([_HEIGHT, _WIDTH, 1])\n print(\"seg11111111111\",image,label)\n image = preprocessing.mean_image_subtraction(image)\n\n return image, label", "def norm_input(image, label):\n cropped_image = tf.image.resize_image_with_crop_or_pad(image, FLAGS.image_size, FLAGS.image_size)\n\n norm_image = tf.image.per_image_standardization(cropped_image)\n\n return norm_image, label", "def preprocess(path, path2 , scale):\n image = imread(path)\n label_ = imread(path2)\n\n #label_ = modcrop(label, scale)\n\n # Must be normalized\n input_ = image / 255.\n label_ = label_ / 255.\n\n #input_ = scipy.ndimage.interpolation.zoom(label_, (1./scale), prefilter=False)\n #input_ = scipy.ndimage.interpolation.zoom(input_, (scale/1.), prefilter=False)\n\n return input_, label_", "def test_normalize(dummy_input):\n # Test the 2D image: H, W, C\n image, label = dummy_input(image_size=(512, 512, 3),\n label_size=(512, 512, 1))\n transform = Normalize(means=None, stds=None)\n _image, _label = transform(image, label, normalize_tags=[True, False])\n assert not (image == _image).all()\n assert (label == _label).all()\n\n # Test the 3D image: H, W, D, C\n image, label = dummy_input(image_size=(512, 512, 20, 3),\n label_size=(512, 512, 20, 1))\n transform = Normalize(means=None, stds=None)\n _image, _label = transform(image, label, normalize_tags=[True, False])\n assert not (image == _image).all()\n assert (label == _label).all()\n assert np.abs(np.mean(_image)-0) < 1e-8\n assert np.abs(np.std(_image)-1) < 1e-8", "def preprocess_data():\n le = preprocessing.LabelEncoder()\n # Reshape and normalize pixel values to be between 0 and 1\n train_images_reshaped = train_images.reshape(len(train_images), 1024, 1024, 1)/255.\n test_images_reshaped = test_images.reshape(len(test_images), 1024, 1024, 1)/255.\n\n return train_images_reshaped, test_images_reshaped, le.fit_transform(train_labels), le.fit_transform(test_labels)", "def __call__(self, src, label):\r\n # img = mx.nd.image.to_tensor(src)\r\n # img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\r\n src = mx.nd.array(src)\r\n img = mx.nd.image.to_tensor(src)\r\n img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\r\n return img, mx.nd.array(label, dtype=img.dtype)", "def _preprocess_image(self, sample):\n image = sample[common.IMAGE]\n label = sample[common.LABELS_CLASS]\n\n original_image, image, label = input_preprocess.preprocess_image_and_label(\n image=image,\n label=label,\n crop_height=self.crop_size[0],\n crop_width=self.crop_size[1],\n min_resize_value=self.min_resize_value,\n max_resize_value=self.max_resize_value,\n resize_factor=self.resize_factor,\n min_scale_factor=self.min_scale_factor,\n max_scale_factor=self.max_scale_factor,\n scale_factor_step_size=self.scale_factor_step_size,\n ignore_label=self.ignore_label,\n is_training=self.is_training,\n model_variant=self.model_variant)\n\n sample[common.IMAGE] = image\n\n if not self.is_training:\n # Original image is only used during visualization.\n sample[common.ORIGINAL_IMAGE] = original_image\n\n if label is not None:\n sample[common.LABEL] = label\n\n # Remove common.LABEL_CLASS key in the sample since it is only used to\n # derive label and not used in training and evaluation.\n sample.pop(common.LABELS_CLASS, None)\n\n return sample", "def _preprocess_image(self, input_data):\n image = self.preprocessor.preprocess(input_data.images)\n return InputData(images=image, labels=input_data.labels)", "def preprocess(img):\n \n scaler=StandardScaler() ## scaler object to perform preprocessing\n img=scaler.fit_transform(img) ## zero-center and normalize\n \n return img", "def normalise(image):", "def preprocess_image(self, inputs):\n raise NotImplementedError('preprocess_image method not implemented.')", "def prepare_data(rawimage, rawlabel, mapping, params):\n # rawimage: TF tensor: H x W x 3, tf.uint8\n # rawlabel: TF tensor: H x W, tf.uint8/16, [0,tf.uint8/16-1]\n # images: TF tensor: Nb x hf x wf x 3, tf.float32 in [0,1)\n # labels: TF tensor: Nb x hf x wf (in case of upsampling), tf.int32, [0, Nclasses] (in case of extra void class)\n\n image = tf.image.convert_image_dtype(rawimage, dtype=tf.float32)\n # resize to learnable system's dimensions\n image = tf.image.resize_images(image, [params.height_network, params.width_network])\n\n label_for_resize = tf.to_int32(rawlabel[tf.newaxis, ..., tf.newaxis])\n label = tf.image.resize_nearest_neighbor(label_for_resize, [params.height_network, params.width_network])\n label = tf.squeeze(label, axis=[0, 3])\n\n label = _lids2cids(mapping, label)\n\n return image, label", "def preprocess(self):\n \n file_name_list = os.listdir(self.image_dir)\n random.seed(1234)\n random.shuffle(file_name_list)\n \n for i,d in enumerate(self.domains):\n self.attr2idx[d]=i \n\n for i, file_name in enumerate(file_name_list):\n if (file_name.startswith('X_')):\n continue\n \n parts = file_name.split(\"-\")\n label = int(parts[0])\n if label not in self.domains:\n continue\n img_name = file_name\n\n count=self.get_sample_count(label)\n if count<self.valid_set_size:\n # create holdout set on the fly\n utils.copy_file(self.image_dir,self.valid_set_dir,img_name)\n else:\n self.dataset.append([img_name, self.attr2idx[label]])\n \n self.increment_sample_count(label)\n\n print(\"Sample count per domain: \"+str(self.sample_count)+\" (including holdout set, holdout size per domain is: \"+str(self.valid_set_size)+\")\")\n print('Finished preprocessing the dataset...')", "def preprocess(path, scale=3):\n image = imread(path, is_grayscale=True)\n label_ = modcrop(image, scale)\n\n # Must be normalized\n \n label_ = label_ / 255.\n \n\n\n input_ = scipy.ndimage.interpolation.zoom(label_, (1. / scale), prefilter=False)\n input_ = scipy.ndimage.interpolation.zoom(input_, (scale / 1.), prefilter=False)\n\n return input_, label_", "def prep_data(labels, image_root):\n labels = split_description(labels)\n labels = convert_plastics(labels)\n\n # Encoding shape and color data\n labels['Shape'] = encode_column(labels[['Shape']])\n labels['Color'] = encode_column(labels[['Color']])\n labels['isPlastic'] = encode_column(labels[['isPlastic']])\n labels = add_filenames(labels, image_root)\n labels = labels.dropna().reset_index()\n\n return labels", "def process_images(image, label):\n # Normalize images to have a mean of 0 and standard deviation of 1\n # per_image_standardization is preferred, which normalize the entire image to mean zero and std 1.\n # It also make learning fast.\n image = tf.image.per_image_standardization(image)\n # Resize images from 32x32 to 277x277\n image = tf.image.resize(image, (227,227))\n return image, label", "def preprocessing(image_data, max_height, max_width):\n img = image_data[\"image\"]\n img = resize_image(img, max_height, max_width)\n gt_boxes = image_data[\"objects\"][\"bbox\"]\n gt_labels = image_data[\"objects\"][\"label\"]\n return img, gt_boxes, gt_labels", "def _prepare_im(self, im):\n # Train and test setups differ\n train_size = cfg.TRAIN.IM_SIZE\n if \"train\" in self._split:\n # Scale and aspect ratio then horizontal flip\n im = transforms.random_sized_crop(im=im, size=train_size, area_frac=0.08)\n im = transforms.horizontal_flip(im=im, p=0.5, order=\"HWC\")\n else:\n # Scale and center crop\n im = transforms.scale(cfg.TEST.IM_SIZE, im)\n im = transforms.center_crop(train_size, im)\n # HWC -> CHW\n im = im.transpose([2, 0, 1])\n # [0, 255] -> [0, 1]\n im = im / 255.0\n # PCA jitter\n if \"train\" in self._split:\n im = transforms.lighting(im, 0.1, _EIG_VALS, _EIG_VECS)\n # Color normalization\n im = transforms.color_norm(im, _MEAN, _SD)\n return im", "def normalize_labels(self):\n self.y_mean, self.y_std = du.get_mean_std(self.y_train)\n self.y_train = du.normalize(self.y_train, self.y_mean, self.y_std)\n if self.x_test is not None and self.y_test is not None:\n self.y_test = du.normalize(self.y_test, self.y_mean, self.y_std)", "def normalize_image(img):\n arr = np.array(img)\n new_img = Image.fromarray(normalize(arr).astype('uint8'),'L')\n return new_img", "def preprocess(self):\n meta_file_path = os.path.join(database_directory, 'data.txt')\n meta = pd.read_csv(meta_file_path, delimiter=' ', header=None)\n meta = meta[meta[0] != '45567.jpg'] # Corrupt image.\n meta.to_pickle(os.path.join(database_directory, 'meta.pkl'))\n for file_name in meta.iloc[:, 0].values:\n if file_name.endswith('.jpg'):\n file_path = os.path.join(database_directory, file_name)\n image = imageio.imread(file_path).astype(np.uint8)\n image = transform.resize(image, (self.preprocessed_image_size, self.preprocessed_image_size),\n preserve_range=True)\n image = image.transpose((2, 0, 1))\n np.save(file_path.replace('.jpg', '.npy'), image)", "def __multilabel_processing(self):\n # read the raw dataset\n self.data['image_name'] = self.data['image_name'].map(lambda x: '{}.{}'.format(x, img_format))\n self.data['tags'] = self.data['tags'].map(lambda x: x.split())\n\n # create a df with the same number of rows as the dataset filled with the name of the unique values in tags\n label_names = self.data['tags'].explode().unique().tolist()\n label_df = pd.DataFrame([label_names] * self.data.shape[0], columns=label_names)\n\n # binarize the labels according to if they exist for each image or not\n self.data = pd.concat([self.data, label_df], axis=1)\n self.data[['image_name'] + label_names] = self.data.apply(lambda x: pd.Series([x[0]] + [1 if label in x[1] else 0 for label in x[2:]]), axis=1)", "def __call__(self, src, label):\n # resize shorter side but keep in max_size\n h, w, _ = src.shape\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n # no scaling ground-truth, return image scaling ratio instead\n bbox = tbbox.resize(label, (w, h), (img.shape[1], img.shape[0]))\n im_scale = h / float(img.shape[0])\n\n img = mx.nd.image.to_tensor(img)\n img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\n return img, bbox.astype('float32'), mx.nd.array([im_scale])", "def __init__(self, image_root, label_root, img_x, img_y):\n self.images_path = image_root\n self.labels_path = label_root\n self.data_len = 0\n self.images = []\n self.labels = open(self.labels_path, \"r\").readlines()\n self.transform = transforms.Compose([\n transforms.Resize((img_x, img_y)), \n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n for file in self.labels:\n self.data_len += 1\n tem = file.split(\" \")[0]\n temp = tem.split(\"-\")\n self.images.append(self.images_path + temp[0] + '/' + temp[0] + \"-\" + temp[1] + \"/\" + tem + \".png\")", "def preprocess(self, img):\n img_ = image.load_img(img, target_size=(299, 299))\n img_ = image.img_to_array(img_)\n img_ = np.expand_dims(img_, axis=0)\n img_ = preprocess_input(img_)\n return img_", "def imageprepare(self, argv):\n im = Image.open(argv).convert('L')\n img = im.resize((28, 28), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\n tv = list(img.getdata()) \n \n # normalize pixels to 0 and 1. 0 is pure white, 1 is pure black.\n tva = [(255 - x) * 1.0 / 255.0 for x in tv]\n return tva" ]
[ "0.70676106", "0.68583", "0.66364515", "0.65736663", "0.6464647", "0.64554685", "0.6352765", "0.6297984", "0.625139", "0.62383085", "0.62295324", "0.62290204", "0.62271994", "0.6218579", "0.61705977", "0.6143832", "0.61042327", "0.60809606", "0.60745084", "0.60027677", "0.5987943", "0.596762", "0.596578", "0.5960089", "0.59530795", "0.59527105", "0.594513", "0.5929658", "0.5923151", "0.58968425" ]
0.72255284
0
Parses data for prediction.
def _parse_predict_data(self, data): image, labels = self._parse_eval_data(data) return { 'images': image, 'labels': labels }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_dataset(self, data):\n pass", "def predict(self, datafile):", "def predict(self, data: List):", "def predict(self, data):\n\t\traise NotImplementedError", "def postprocess(self, data):\n all_predictions, all_nbest_json, scores_diff_json = predictions(self._dev_dataset,\n data,\n self._tokenizer)\n\n if len(all_nbest_json) == 0 or len(all_nbest_json[0]) == 0:\n return [{'predicted': '',\n 'confidence': 0}]\n\n return [{'predicted': all_nbest_json[0][0]['text'],\n 'confidence': all_nbest_json[0][0]['probability']}]", "def predict(self, data):\n\t\tres = self.neuralNetworks.inference(self.dataCenter.process_inference_data(data))\n\t\tprint(res)", "def process(self, data):\n return self.estimator.predict(data)", "def _parse_fit_and_predict_result(result):\n if len(result) > 1 and result[1] and not isinstance(result[1], str):\n # Scores object does not resemble a label prediction (always string)\n y = result[0]\n scores = result[1]\n else:\n y = result\n scores = None\n return y, scores", "def prediction():\r\n\r\n\r\n\tpredictVal = []\r\n\taccuracy = 0.0\r\n\r\n\t# Calculate accuracy for each class in testData\r\n\tfor item in testData:\r\n\t\tclass0Prediction = posProb / 100\r\n\t\tclass1Prediction = negProb / 100\r\n\t\t\r\n\t\t# Multiply the prior probablities for negative and positive reviews by their feature likelihoods \r\n\t\tfor word in item[2]:\r\n\t\t\tclass0Prediction *= class0Dict[word]\r\n\t\t\tclass1Prediction *= class1Dict[word]\r\n\r\n\t\t# Give every item in testData a predicted value\r\n\t\tif(class0Prediction > class1Prediction):\r\n\t\t\tpredictVal.append('0')\r\n\t\telse:\r\n\t\t\tpredictVal.append('1')\r\n\r\n\tfor i in range(len(testData)):\r\n\t\tif(testData[i][1] == predictVal[i]):\r\n\t\t\taccuracy += 1\r\n\r\n\t\t\t\r\n\taccuracy = 100 * (accuracy / len(testData))\r\n\treturn(predictVal, accuracy)", "def predict(self, data_in):\n pass", "def predictSet(self, testData=\"\"):\n rawTestDataDump = self._read_file(testData)\n formattedTestData = [line.split(' ') for line in rawTestDataDump.split('\\n')]\n for test in formattedTestData:\n self._predictions.append(self.predict(test))\n return self._predictions", "def parseArray(self, data):\n self.title = data[0]\n self.director = data[1]\n self.cast = data[2]\n self.producer = data[3]\n self.writer = data[4]\n self.country = data[5]\n self.language = data[6]\n self.year = data[7]\n self.genres = data[8]\n self.votes = data[9]\n self.rating = float(data[10])\n self.runtime = data[11]\n self.plot = data[12]\n self.coverUrl = data[13]", "def data_parser(data):\n\n with open(data, 'r') as inp:\n\n # take every sample\n # the last line in the text file is empty, so reading until -1\n samples = inp.read().split('\\n')[:-1]\n\n vec = []\n labels = []\n for sample in samples:\n # file is tab delimited\n split_samples = sample.split('\\t')\n # last column contains the label\n labels.append(int(split_samples[-1]))\n\n features = []\n for feature in split_samples[:-1]:\n features.append(float(feature))\n vec.append(features)\n\n # make the features and labels as a numpy array\n vec = np.array(vec)\n labels = np.array(labels)\n return vec, labels", "def _parse_raw_predictions(self, raw_predictions):\n result = []\n for line in raw_predictions.split(\"\\n\"):\n line_parts = line.split(\"|\")\n type = line_parts[0]\n assert type.lstrip(\"*\") in (\n \"FP\", \"TP\", \"TN\", \"FN\"), 'Expected {} to be in (FP, TP, TN, FN), {}'.format(line[0], line)\n\n docid = line_parts[1]\n start_end = line_parts[2]\n entity_name = line_parts[3]\n alt_gene = None\n alt_gene_start_end = None\n\n if type.lstrip(\"*\") == \"TP\":\n start_end = line_parts[3]\n entity_name = line_parts[2]\n alt_gene = line_parts[4]\n alt_gene_start_end = line_parts[5]\n\n result.append({\n \"type\": type,\n \"docid\": docid,\n \"start_end\": start_end,\n \"entity_name\": entity_name,\n \"alt_gene\": alt_gene,\n \"alt_gene_start_end\": alt_gene_start_end,\n })\n return result", "def parse_prediction(self, predictions):\n\t\tusers = list()\n\t\tprint(predictions)\n\t\tfor prediction in predictions:\n\t\t\tfor email in prediction:\n\t\t\t\tusers.append(email)\n\t\t\t\t\n\t\treturn users", "def parse(cls, data):\n raise NotImplementedError", "def predict(self, data):\n return self.result.predict(data)", "def load_data(self):\r\n self.logger.log(self.log_file, 'Loading prediction data!')\r\n try:\r\n prediction_data = self.aws_operations.read_csv(self.prediction_file_path)\r\n if prediction_data is None:\r\n return None\r\n self.logger.log(self.log_file, 'Prediction data loaded successfully!')\r\n return prediction_data\r\n except Exception as e:\r\n self.logger.log(self.log_file, 'Error occurred while loading prediction data: %s' % e)\r\n raise e", "def post(self):\n result = {'status': 'error'}\n\n args = input_parser.parse_args()\n input_data = args['image'].read()\n image = self.model_wrapper._read_image(input_data)\n preds = self.model_wrapper._predict(image)\n\n # Modify this code if the schema is changed\n label_preds = [{'label_id': p[0], 'label': p[1], 'probability': p[2]} for p in [x for x in preds]]\n result['predictions'] = label_preds\n result['status'] = 'ok'\n\n return result", "def parse(self, data):\n raise NotImplementedError", "def predict(self, x):\n \n\n return predictions", "def postprocess(self, data):\n if self.error is not None:\n return [self.error]\n\n # Iterating over inference results to render the normalized probabilities\n response = []\n for inference_result in data:\n softmax_result = inference_result.softmax().asnumpy()\n for idx, label in enumerate(self.labels):\n response.append({label: float(softmax_result[0][idx])})\n return [response]", "def _predict(self, data):\n # make sure we're talking about arrays\n data = N.asarray(data)\n\n # checks only in debug mode\n if __debug__:\n if not data.ndim == 2:\n raise ValueError, \"Data array must be two-dimensional.\"\n\n if not data.shape[1] == self.__data.nfeatures:\n raise ValueError, \"Length of data samples (features) does \" \\\n \"not match the classifier.\"\n\n # compute the distance matrix between training and test data with\n # distances stored row-wise, ie. distances between test sample [0]\n # and all training samples will end up in row 0\n dists = self.__dfx(self.__data.samples, data).T\n\n # determine the k nearest neighbors per test sample\n knns = dists.argsort(axis=1)[:, :self.__k]\n\n # predicted class labels will go here\n predicted = []\n\n if self.__voting == 'majority':\n vfx = self.getMajorityVote\n elif self.__voting == 'weighted':\n vfx = self.getWeightedVote\n else:\n raise ValueError, \"kNN told to perform unknown voting '%s'.\" \\\n % self.__voting\n\n # perform voting\n results = [vfx(knn) for knn in knns]\n\n # extract predictions\n predicted = [r[0] for r in results]\n\n # store the predictions in the state. Relies on State._setitem to do\n # nothing if the relevant state member is not enabled\n self.predictions = predicted\n self.values = [r[1] for r in results]\n\n return predicted", "def _predict_all(self, data):\n preds = np.zeros(len(data))\n for row in data.itertuples():\n index, item, _, user = row\n preds[index] = self.predict(user, item)\n return preds", "def validate(self, validate_data):\n with open(validate_data, 'r') as validate_data:\n true_positive = 0\n true_negative = 0\n false_positive = 0\n false_negative = 0\n result = {}\n for type in self.label_type_map:\n result[type] = []\n while True:\n tokens = validate_data.readline().split()\n pos = validate_data.readline().split()\n labels = validate_data.readline().split()\n if not tokens or not pos or not labels:\n break\n # Classify all named entities in a sentence 85\n curr_results = self.viterbi(tokens)\n for i in range(0, len(labels)):\n if curr_results[i] != 'O':\n if labels[i] == 'O':\n false_positive += 1 # Not 'O', but should be 'O'\n else:\n if self.label_type_map[labels[i]] == self.label_type_map[curr_results[i]]:\n true_positive += 1 # Correct prediction\n else:\n if labels[i] == 'O':\n true_negative += 1 # Correct prediction of 'O'\n else:\n false_negative += 1 # Predicted 'O', not 'O'\n # Calculate precision - TP / (TP + FP)\n precision = float(true_positive) / float(true_positive + false_positive)\n # Calculate recall - TP / (TP + FN)\n recall = float(true_positive) / float(true_positive + false_negative)\n # Calculate F-Score - 2 * P * R / (P + R)\n f_score = float(2*precision * recall) / float(precision + recall)\n print \"Precision: \" + str(precision)\n print \"Recall: \" + str(recall)\n print \"F-score: \" + str(f_score)", "def predict(self, data):\n data['predicted'] = self.sentiment_classifier.predict_estimator(data)\n return data", "def parse_data(self):\n vehicle_data = self.data.get('vehicles')\n for vehicle_datum in vehicle_data:\n self.vehicles.append(Vehicle(**vehicle_datum))\n\n job_data = self.data.get('jobs')\n for job_datum in job_data:\n self.jobs.append(Job(**job_datum))\n\n self.matrix = self.add_dummy_location_to_matrix(self.data.get('matrix'))", "def analyze_data(data_path):\n # Check whether this data is a prediction file or not\n is_prediction, is_not_prediction = is_prediction_file(data_path)\n assert (is_prediction or is_not_prediction) and not (is_prediction and is_not_prediction), \\\n \"The file should be either a prediction file or not, i.e. it should contain either 2 or 3 columns\"\n sequence_pairs = []\n # if prediction, recover the original data and also compute accuracy\n per_instance_accuracy = -1\n per_sequence_accuracy = -1\n if is_prediction:\n per_instance_accuracy, per_sequence_accuracy = recover_original_data(data_path, sequence_pairs)\n # Construct sequence data\n data = SequenceData(sequence_pairs) if is_prediction else SequenceData(data_path)\n if is_prediction:\n print(\"A prediction data file:\", data_path)\n else:\n print(\"A non-prediction data file:\", data_path)\n print(\"{0} sequences (average length: {1:.1f})\".format(\n len(data.sequence_pairs), data.get_sequence_average_length()))\n print(\"{0} words\".format(data.num_of_words))\n print(\"{0} labeled words\".format(data.num_labeled_words))\n print(\"{0} word types\".format(len(data.word_count)))\n print(\"{0} label types\".format(len(data.label_count)))\n if is_prediction:\n print(\"Per-instance accuracy: {0:.3f}%\".format(per_instance_accuracy))\n print(\"Per-sequence accuracy: {0:.3f}%\".format(per_sequence_accuracy))", "def predict(self,data):\n results = []\n predict_instances = np.shape(data)[0]\n stored_instances = np.shape(self.data)[0]\n for predict_index in range(predict_instances):\n neighbors = [] # dist, label\n for stored_index in range(stored_instances):\n neighbors.append((self._distance(self.data[stored_index], data[predict_index]), self.data_labels[stored_index][0], data[predict_index]))\n neighbors = sorted(neighbors, key=lambda x: x[0])[:self.k]\n results.append(self._analyze_neighbors(neighbors))", "def predict(self, predPoints=None):" ]
[ "0.70179003", "0.68369985", "0.67741996", "0.6700655", "0.665754", "0.6550395", "0.6539271", "0.6422481", "0.6383678", "0.63100535", "0.6299874", "0.62833273", "0.6269634", "0.6264631", "0.6253759", "0.62460303", "0.62442476", "0.621106", "0.6204033", "0.6197395", "0.61885244", "0.61729884", "0.6166981", "0.6099921", "0.60890776", "0.6082593", "0.60725206", "0.6065796", "0.60638803", "0.606002" ]
0.7601515
0
If an iteriter_op is given an iterator as input, no exception should be thrown, and we should return the wrapped function's output.
def test_iteriter_op_1(): @ops.iteriter_op def f(x): return iter([4, 5, 6]) result = f(iter([1, 2, 3])) # Passing in an iterator, as expected assert(isinstance(result, collections.abc.Iterator)), f"{result}" assert(list(result) == [4, 5, 6])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of a list\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def test_listiter_op_3():\n\n @ops.listiter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def test_iterlist_op_2():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def chek_iter_obj(func):\n\n def wrapper(self, lst):\n try:\n iter(lst)\n return func(self, lst)\n except TypeError:\n raise TypeError(f\"{lst} isn't iterable object\")\n\n return wrapper", "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def iter(space, w_collection_or_callable, w_sentinel=None):\n if w_sentinel is None:\n return space.iter(w_collection_or_callable)\n else:\n return iter_sentinel(space, w_collection_or_callable, w_sentinel)", "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def __iter__(self):\n return iter(())", "def generator(func):\n\n @fn\n @wraps(func)\n def gen(*args, **kwargs):\n return Iter(func(*args, **kwargs))\n\n return gen", "def next(space, w_iterator, w_default=None):\n try:\n return space.next(w_iterator)\n except OperationError as e:\n if w_default is not None and e.match(space, space.w_StopIteration):\n return w_default\n raise", "def __iter__(self):\r\n return self._iterate()", "def safe_iterator(i):\n return i or []", "def ireduce(f, it):\n acc = it.next()\n yield acc\n for x in it:\n acc = f(acc, x)\n yield acc", "def toiter(x):\n if iterable(x):\n return x\n else:\n return [x]", "def _iterator_codegen(resty):\n\n def codegen(context, builder, sig, args):\n [d] = args\n [td] = sig.args\n iterhelper = context.make_helper(builder, resty)\n iterhelper.parent = d\n iterhelper.state = iterhelper.state.type(None)\n return impl_ret_borrowed(\n context,\n builder,\n resty,\n iterhelper._getvalue(),\n )\n\n return codegen", "def iter_except(function, exception):\r\n try:\r\n while True:\r\n yield function()\r\n except exception:\r\n return", "def intercept(iterable, function):\n\n def intercepting(iterable_):\n for item in iterable_:\n function(item)\n yield item\n\n return intercepting(iterable)", "def iter_except(function, exception):\n try:\n while True:\n yield function()\n except exception:\n return", "def next(self, in_op):\n raise NotImplementedError", "def cotakewhile(function, iterator):\n results = []\n\n def checkTake(shouldTake, item):\n if shouldTake == True:\n results.append(item)\n return item\n\n def dotake(item):\n d = maybeDeferred(function, item)\n d.addCallback(checkTake, item)\n return d\n\n def dostop(takeResult):\n return takeResult is None\n\n cfc = _CoFunCaller(resultCollector=dotake, stopFunction=dostop)\n return cfc.coiterate(iterator).addCallback(lambda _: results)", "def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]", "def iter_except(func, exception):\n try:\n while True:\n yield func()\n except exception:\n pass", "def wrap_generator(generator, wrapper_function):\n for item in generator:\n yield wrapper_function(item)", "def generator_wrapper(iterable):\n\n num_items = len(iterable)\n for idx in range(num_items):\n yield iterable[idx]", "def get_next_as_optional(iterator):\n return iterator.get_next_as_optional()" ]
[ "0.734651", "0.7288327", "0.6928596", "0.6799886", "0.66857606", "0.6591529", "0.6551025", "0.6511721", "0.63353145", "0.59754914", "0.5973058", "0.59268594", "0.5852119", "0.58467436", "0.5843037", "0.583689", "0.57858115", "0.5740564", "0.5712443", "0.56730723", "0.5633042", "0.55738294", "0.55536985", "0.55025804", "0.54791933", "0.5469948", "0.545959", "0.54455984", "0.5414505", "0.5411123" ]
0.74068475
0
If an iteriter_op is given something besides an iterator as input, raise a ValueError.
def test_iteriter_op_2(): @ops.iteriter_op def f(x): return iter([4, 5, 6]) with pytest.raises(ValueError): f([1, 2, 3]) # Passing in a list instead of an iterator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of a list\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_iterlist_op_2():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_listiter_op_3():\n\n @ops.listiter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_iteriter_op_1():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def test_generator_input_with_no_iterable_len_raises(self):\n for chunk_size, n_splits, n_jobs in product([None, 1, 3], [None, 1, 3], [None, 1, 3]):\n with self.subTest(chunk_size=chunk_size, n_splits=n_splits, n_jobs=n_jobs), self.assertRaises(ValueError):\n get_n_chunks(iter(self.test_data), iterable_len=None, chunk_size=chunk_size, n_splits=n_splits,\n n_jobs=n_jobs)", "def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def _assert_valid_value_and_cast(self, value):\n if not hasattr(value, '__iter__'):\n raise AssertionError(f\"{value} is not a valid iterable type\")\n return value", "def test_generator_without_iterable_len(self):\n with self.assertRaises(ValueError):\n next(chunk_tasks(iter([]), n_splits=1))", "def isiterable(x):\n try:\n iter(x)\n except TypeError:\n return False\n return True", "def _data_validation(data):\n if isinstance(data, (list, tuple, type(None))) is not True:\n raise ValueError(f\"data must be tuple, list, or None, \"\n f\"data type is '{type(data).__name__}'. \"\n f\"Iterable data cannot be empty.\")", "def is_iterable(thing):\n\n try:\n iter(thing)\n except TypeError:\n return False\n return True", "def _is_iterable(items):\n return isinstance(items, (list, tuple, set, np.ndarray))", "def isiterable(x):\n try:\n iter(x)\n return True\n except TypeError:\n return False", "def iterable(x):\n try:\n iter(x)\n return True\n except TypeError:\n return False", "def test_iter(\n self, start: Result[int, int], exp: t.Tuple[int, ...]\n ) -> None:\n assert tuple(start.iter()) == exp", "def _is_iterable(value):\n return isinstance(value, list) or isinstance(value, tuple)", "def isIterable(value):\n try:\n iter(value)\n return True\n except:\n return False", "def test_assert_iterator(self):\n iterator = iter([1,2,3,4])\n # Should pass\n self.assert_iterator(iterator,\n count=4,\n assert_item_function=lambda i: i>0)", "def _raise_if(predicate, *args):\n if predicate:\n raise InvalidChunk(*args)", "def assert_is_iter(ext):\n if not parallel.is_iter(ext):\n ext = [ext]\n return ext", "def safe_iterator(i):\n return i or []", "def only(it: Union[Iterator[_T], Iterable[_T]]) -> _T:\n if hasattr(it, \"__next__\"):\n # noinspection PyTypeHints\n iterator: Iterator[_T] = it # type: ignore\n else:\n iterator = iter(it)\n\n try:\n ret = next(iterator)\n except StopIteration:\n raise ValueError(\"Expected only a single element in an iterable, but got none\")\n\n second_element = next(iterator, _SENTINEL)\n if second_element != _SENTINEL:\n raise ValueError(\"Expected only a single element in iterable, but got at least 2\")\n return ret", "def test_Validator_iter_errors_two_arguments(self):\n\n validator = validators.Draft7Validator({})\n with self.assertWarns(DeprecationWarning) as w:\n error, = validator.iter_errors(\"foo\", {\"type\": \"number\"})\n\n self.assertEqual(error.validator, \"type\")\n self.assertEqual(w.filename, __file__)\n self.assertTrue(\n str(w.warning).startswith(\n \"Passing a schema to Validator.iter_errors is deprecated \",\n ),\n )", "def EOF_or_raise(f):\n try:\n f.next()\n except StopIteration:\n return\n else:\n raise Exception(str(f))", "def is_iterator(x):\n if sys.version_info >= (2, 7):\n return isinstance(x, collections.Iterator)\n return isinstance(x, collections.Iterator) and hasattr(x, '__iter__')" ]
[ "0.7217243", "0.7193178", "0.6957775", "0.68198645", "0.6498388", "0.6485671", "0.63076574", "0.6105469", "0.58576494", "0.5822482", "0.57532716", "0.57530195", "0.5725821", "0.56677824", "0.5644975", "0.5638798", "0.5572163", "0.5566992", "0.5561028", "0.5558914", "0.5509184", "0.54428643", "0.54236645", "0.54004943", "0.5400248", "0.536194", "0.53423", "0.5340234", "0.5337365", "0.5331542" ]
0.75408363
0
If an iteriter_op returns something besides an iterator as output, raise a ValueError.
def test_iteriter_op_3(): @ops.iteriter_op def f(x): return [4, 5, 6] # Returning a list instead of an iterator with pytest.raises(ValueError): result = f(iter([1, 2, 3]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of a list\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_iterlist_op_2():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_iteriter_op_1():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def test_listiter_op_3():\n\n @ops.listiter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def _assert_valid_value_and_cast(self, value):\n if not hasattr(value, '__iter__'):\n raise AssertionError(f\"{value} is not a valid iterable type\")\n return value", "def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def test_generator_without_iterable_len(self):\n with self.assertRaises(ValueError):\n next(chunk_tasks(iter([]), n_splits=1))", "def test_iter(\n self, start: Result[int, int], exp: t.Tuple[int, ...]\n ) -> None:\n assert tuple(start.iter()) == exp", "def test_assert_iterator(self):\n iterator = iter([1,2,3,4])\n # Should pass\n self.assert_iterator(iterator,\n count=4,\n assert_item_function=lambda i: i>0)", "def test_generator_input_with_no_iterable_len_raises(self):\n for chunk_size, n_splits, n_jobs in product([None, 1, 3], [None, 1, 3], [None, 1, 3]):\n with self.subTest(chunk_size=chunk_size, n_splits=n_splits, n_jobs=n_jobs), self.assertRaises(ValueError):\n get_n_chunks(iter(self.test_data), iterable_len=None, chunk_size=chunk_size, n_splits=n_splits,\n n_jobs=n_jobs)", "def safe_iterator(i):\n return i or []", "def _iterator_unknown_size(self) -> Iterator[int]:\n raise NotImplementedError", "def iterable(x):\n try:\n iter(x)\n return True\n except TypeError:\n return False", "def isiterable(x):\n try:\n iter(x)\n except TypeError:\n return False\n return True", "def isIterable(value):\n try:\n iter(value)\n return True\n except:\n return False", "def only(it: Union[Iterator[_T], Iterable[_T]]) -> _T:\n if hasattr(it, \"__next__\"):\n # noinspection PyTypeHints\n iterator: Iterator[_T] = it # type: ignore\n else:\n iterator = iter(it)\n\n try:\n ret = next(iterator)\n except StopIteration:\n raise ValueError(\"Expected only a single element in an iterable, but got none\")\n\n second_element = next(iterator, _SENTINEL)\n if second_element != _SENTINEL:\n raise ValueError(\"Expected only a single element in iterable, but got at least 2\")\n return ret", "def is_iterable(thing):\n\n try:\n iter(thing)\n except TypeError:\n return False\n return True", "def isiterable(x):\n try:\n iter(x)\n return True\n except TypeError:\n return False", "def __iter__(self):\n return NotImplemented", "def _is_iterable(value):\n return isinstance(value, list) or isinstance(value, tuple)", "def assert_is_iter(ext):\n if not parallel.is_iter(ext):\n ext = [ext]\n return ext", "def is_iterator(x):\n if sys.version_info >= (2, 7):\n return isinstance(x, collections.Iterator)\n return isinstance(x, collections.Iterator) and hasattr(x, '__iter__')", "def testIterWithException(self):\n\t\tc = Controller()\n\t\tx = c.mock()\n\t\tx.__iter__()\n\t\tc.setReturn(1)\n\t\tc.setException(Exception)\n\t\tc.replay()\n\t\ti = x.__iter__()\n\t\tself.failUnless(i.next() == 1)\n\t\tself.failUnlessRaises(Exception, i.next)", "def _is_iterable(items):\n return isinstance(items, (list, tuple, set, np.ndarray))", "def _next_exhausted(self):\n\n raise StopIteration() from None" ]
[ "0.76426315", "0.73116994", "0.7197692", "0.69156325", "0.6845044", "0.6789451", "0.63753885", "0.6361283", "0.62555766", "0.6095181", "0.6085253", "0.59735376", "0.5929287", "0.5857033", "0.58470154", "0.58267", "0.57885355", "0.57856303", "0.57807213", "0.576038", "0.5737208", "0.57153285", "0.5705049", "0.56767434", "0.5663816", "0.5636932", "0.5633784", "0.5604108", "0.5601878", "0.5542627" ]
0.7515847
1
If a listlist_op is given a list as input, no exception should be thrown, and we should return the wrapped function's output.
def test_listlist_op_1(): @ops.listlist_op def f(x): return [4, 5, 6] result = f([1, 2, 3]) # Passing in a list, as expected assert(isinstance(result, list)), f"{result}" assert(result == [4, 5, 6])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def Listor(fun):\n @functools.wraps(fun)\n def inside(*args, **kwargs):\n return list(fun(*args, **kwargs))\n return inside", "def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def to_list(f):\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n return list(f(*args, **kwargs))\n return wrapper", "def test_listiter_op_3():\n\n @ops.listiter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def list_generalizer(f):\n @functools.wraps(f)\n def wrapped(data, *args, **kwargs):\n if type(data) == list:\n return [f(d, *args, **kwargs) for d in data]\n else:\n return f(data, *args, **kwargs)\n\n return wrapped", "def listify(fn=None, wrapper=list):\n\n def listify_return(fn):\n @functools.wraps(fn)\n def listify_helper(*args, **kw):\n return wrapper(fn(*args, **kw))\n\n return listify_helper\n\n if fn is None:\n return listify_return\n return listify_return(fn)", "def visit_List(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n return to_call(to_attribute(self.operator, '__list__'), node.elts)\n return node", "def controlled_list(input_list):\n output_list = input_list\n\n if not isinstance(input_list, list):\n\n dummy_list = []\n dummy_list.append(input_list)\n output_list = dummy_list\n print('Converting')\n print('Before return')\n print(output_list)\n return output_list", "def test_iterlist_op_2():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of a list\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def handle_list(list_name, list, args):\n if not args:\n return list\n else:\n len(args) == 1 or syntax_error(\"Wrong number of args for list expression.\")\n try:\n return list[int(args[0])]\n except ValueError:\n syntax_error(\"Invald index value: '%s'\" % args[0])\n except IndexError:\n syntax_error(\"Index out of range in '%s': %d\" % (list_name, index))", "def list_func(lst: List[valueType]) -> List[valueType]:\n tmp = [] # type: List[valueType]\n for e in lst:\n if isinstance(e, (list, set, tuple)):\n tmp.append(list_func(list(e)))\n else:\n if isinstance(e, (float, int)):\n tmp.append(func(e))\n else:\n raise Exception\n return tmp", "def listify(arg):\n if isinstance(arg, list):\n return arg\n else:\n return [arg]", "def get_list_dep() -> Callable:\n args = []\n body = [\" r = {}\"]\n # Apply list ops as annotations\n for list_op in self.list_ops:\n args += [f\"{list_op.name}: Optional[List[str]] = Query(None)\"]\n body += [\n f\" if {list_op.name} is not None:\",\n f' r[\"{list_op.name}\"] = {list_op.name}',\n ]\n code = [f\"def inner({', '.join(args)}) -> dict:\"] + body + [\" return r\"]\n r = {\"Optional\": typing.Optional, \"List\": typing.List, \"Query\": Query}\n exec(\"\\n\".join(code), {}, r)\n return r[\"inner\"]", "def test_returns_list(self):\n metrics = ('input', 'output')\n\n @callback_return(*metrics)\n def returns_list():\n return [2, 1, 3]\n\n r = returns_list()\n self.assertEqual(len(metrics), len(r.keys()), 'Extra return values should be dropped.')\n self.assertEqual(2, r['input'])\n self.assertEqual(1, r['output'])\n self.assertNotIn('extra', r)", "def decorator(arg):\n return lambda: list(arg)", "def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]", "def is_listing(op):\n return isinstance(op, (list, tuple))", "def ListMonad(*elements: List[T]) -> _List[T]: # pylint: disable=invalid-name\n\n return _List(list(elements), None)", "def test_neg_operate_list_invalid_requests(self, list):\n key = (\"test\", \"demo\", \"list_key\")\n try:\n key, _, _ = self.as_connection.operate(key, list)\n except e.OpNotApplicable as exception:\n assert exception.code == 26", "def as_list(arg):\n if _is_list(arg):\n return arg\n return [arg]", "def listify(value):\n if isinstance(value, list):\n return value\n else:\n return [value]", "def _builtin_consult_as_list(op1, op2, **kwdargs):\n # TODO make non-recursive\n check_mode((op1, op2), ['*L'], functor='consult', **kwdargs)\n _builtin_consult(op1, **kwdargs)\n if _is_list_nonempty(op2):\n _builtin_consult_as_list(op2.args[0], op2.args[1], **kwdargs)\n return True", "def lists_equal_length(func):\n # Define the wrapper function.\n def wrapper(self, *args, **kwargs):\n\n # Collect all `list` objects from `args`.\n lists_args = [arg for arg in args if isinstance(arg, list)]\n # Collecgt all `list` object from `kwargs`.\n lists_kwargs = [arg for arg in kwargs.values() if isinstance(arg, list)]\n # Concatenate the lists of `list` objects.\n lists = lists_args + lists_kwargs\n\n # Check whether all the `list` objects have the same length.\n do_have_same_length = len(set(map(len, lists))) == 1\n\n # Raise an `InvalidArgumentsError` exception if there's a length\n # mismatch.\n if not do_have_same_length:\n msg_fmt = \"The argument lists must have the same length.\"\n raise InvalidArgumentsError(msg_fmt)\n\n # Simply execute the decorated method with the provided arguments\n # and return the result.\n return func(self, *args, **kwargs)\n\n return wrapper", "def cast_to_list(position):\n\n\[email protected]\n\tdef wrapper(function, instance, args, kwargs):\n\t\tif not isinstance(args[position], list):\n\t\t\targs = list(args)\n\t\t\targs[position] = [args[position]]\n\t\t\targs = tuple(args)\n\n\t\treturn function(*args, **kwargs)\n\n\treturn wrapper", "def list_wrap(spec):\n if not isinstance(spec, list):\n spec = [spec]\n return spec" ]
[ "0.7166146", "0.71624446", "0.7014189", "0.6786135", "0.6684333", "0.6671589", "0.6536824", "0.6533272", "0.6519165", "0.64344203", "0.6429467", "0.64098346", "0.6329758", "0.6261592", "0.6214731", "0.61999166", "0.6112252", "0.6028815", "0.6003266", "0.5961093", "0.5949775", "0.5941206", "0.588483", "0.58796406", "0.5859028", "0.5828931", "0.58254296", "0.57948834", "0.5788565", "0.57205945" ]
0.77922696
0
If a listlist_op is given something besides a list as input, raise a ValueError.
def test_listlist_op_2(): @ops.listlist_op def f(x): return [4, 5, 6] with pytest.raises(ValueError): f(iter([1, 2, 3])) # Passing in an iterator instead of an list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def is_listing(op):\n return isinstance(op, (list, tuple))", "def test_neg_operate_list_invalid_requests(self, list):\n key = (\"test\", \"demo\", \"list_key\")\n try:\n key, _, _ = self.as_connection.operate(key, list)\n except e.OpNotApplicable as exception:\n assert exception.code == 26", "def _validate_internal_value_list(self, value: Any):\n # Make sure the value is a list\n if not isinstance(value, list):\n raise TypeError(f\"Value must be a list\")\n\n # Make sure the list has the correct number of values in it\n length = len(value)\n if isinstance(self._nargs, int):\n if self._action == \"store\":\n if length != self._nargs:\n raise ValueError(f\"Requires list of length {self._nargs}\")\n else:\n if length == 0 or length % self._nargs != 0:\n raise ValueError(f\"Requires list of length a multiple of {self._nargs}\")\n elif self._nargs == \"+\":\n if length == 0:\n raise ValueError(f\"Requires at least one item\")\n\n if any(not isinstance(element, self._type) for element in value):\n raise TypeError(f\"All values must be of the declared type {self._type.__name__}\")", "def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def list_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (list, collections.UserList, collections.abc.MutableSequence)):\n name = type(var).__name__\n raise ListError(\n 'Function {} expected list, {} got instead.'.format(func, name))", "def handle_list(list_name, list, args):\n if not args:\n return list\n else:\n len(args) == 1 or syntax_error(\"Wrong number of args for list expression.\")\n try:\n return list[int(args[0])]\n except ValueError:\n syntax_error(\"Invald index value: '%s'\" % args[0])\n except IndexError:\n syntax_error(\"Index out of range in '%s': %d\" % (list_name, index))", "def test_iterlist_op_2():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_list_increment_with_incorrect_value_type(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"index\": 2, \"bin\": \"int_bin\", \"val\": \"twenty\"}]\n\n with pytest.raises(e.AerospikeError):\n self.as_connection.operate(key, list)", "def _is_list(val):\n\n return isinstance(val, list)", "def _validate_list_type(self, name, obj, *args):\n if obj is None:\n return\n if isinstance(obj, list):\n for i in obj:\n self._validate_type_not_null(name, i, *args)\n else:\n self._validate_type(name, obj, *args)", "def test_llist_no_parameter_negative(self):\n\n with pytest.raises(TypeError) as typeError:\n TestLList.llist_integer.add()\n\n assert \"Required argument 'value' (pos 1) not found\" in typeError.value", "def test_listiter_op_3():\n\n @ops.listiter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def list_typecheck(val, name, msg):\n if type(val) != Pair and val != Nil():\n raise SnekEvaluationError(name + \" error: \" + msg)", "def check_list(self,list_input):\n if not isinstance(list_input,list):\n raise ValueError('input is not in list type')\n for i in list_input:\n if isinstance(i,list) and len(i) != 0:\n for j in i:\n if not isinstance(j,(float,int)):\n print(j)\n raise ValueError('cannot convert')\n else:\n print(i)\n raise ValueError('wrong defined')", "def test_list_type(self):\n\n expected = TypeError\n input_ = []\n with self.assertRaises(expected):\n math.factorial(input_)", "def _is_list(arg):\n return isinstance(arg, collections.Sequence) and not _is_string(arg)", "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of a list\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def _is_list(item):\n return isinstance(item, list)", "def test_neg_operate_append_items_not_a_list(self):\n key = (\"test\", \"demo\", \"list_key\")\n\n list = [\n {\"op\": aerospike.OP_LIST_APPEND_ITEMS, \"bin\": \"int_bin\", \"val\": 7},\n ]\n\n try:\n key, _, bins = self.as_connection.operate(key, list)\n except e.ParamError as exception:\n assert exception.code == -2", "def test_neg_operate_list_operation_bin_notlist(self):\n key = (\"test\", \"demo\", 1)\n list = [{\"op\": aerospike.OP_LIST_INSERT, \"bin\": \"age\", \"index\": 2, \"val\": 9}]\n\n try:\n (key, _, _) = self.as_connection.operate(key, list)\n\n except e.BinIncompatibleType as exception:\n assert exception.code == 12", "def visit_List(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n return to_call(to_attribute(self.operator, '__list__'), node.elts)\n return node", "def is_list(value):\n return isinstance(value, list)", "def is_list(value):\n return isinstance(value, list) or None", "def ensure_list(self, x):\n return x if isinstance(x, list) else [x]", "def test_if_it_accepts_lists(self):\n with self.assertRaises(TypeError):\n prime_numbers([])", "def test_if_it_accepts_lists(self):\n with self.assertRaises(TypeError):\n prime_numbers(56.58)", "def value_error(var, _list):\n\n #if not any(r):\n if len(_list) == 2:\n divisor = \" or \"\n elif len(_list) > 2:\n divisor = \", \"\n\n print(_list)\n print(len(_list))\n raise ValueError(\"'{var_name}' must be {type}, received '{var_type}'\"\n .format(var_name=RaiseIfNot._get_name(var),\n type=divisor.join(map(\n lambda x: \"'\" + x + \"'\",\n _list)), var_type=var))", "def __allowed_values_incorrect_list(self):\n strTestName = 'Values of a list (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'tuple')\n RxCSObject.paramAddMan('parameter2', 'list')\n\n RxCSObject.paramAllowed('parameter2', ('Allowed string #1', 'Allowed string #2', 3, 4, 11))\n RxCSObject.parameter1 = (1, 3, 4)\n RxCSObject.parameter2 = [11, 3, 'Allowed string #1', 'Allowed string #11']\n\n self.__parametersCheck_error(RxCSObject, AllowedValuesError, strTestName)" ]
[ "0.6896146", "0.6843723", "0.6687192", "0.66674805", "0.6584861", "0.650589", "0.64047855", "0.6385669", "0.6318132", "0.62996775", "0.6293289", "0.62533414", "0.6251247", "0.62425464", "0.62239265", "0.62199765", "0.6166311", "0.61608076", "0.6119108", "0.6056942", "0.60092473", "0.5972202", "0.5963308", "0.59515876", "0.5946925", "0.5931063", "0.59263426", "0.5862697", "0.58481324", "0.58423495" ]
0.71428686
0
If a listlist_op returns something besides a list as output, raise a ValueError.
def test_listlist_op_3(): @ops.listlist_op def f(x): return iter([4, 5, 6]) # Returning an iterator instead of an list with pytest.raises(ValueError): result = f([1, 2, 3])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def is_listing(op):\n return isinstance(op, (list, tuple))", "def test_listiter_op_3():\n\n @ops.listiter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def ensure_list(self, x):\n return x if isinstance(x, list) else [x]", "def _is_list(val):\n\n return isinstance(val, list)", "def test_iterlist_op_2():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def is_list(value):\n return isinstance(value, list) or None", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of a list\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def _validate_internal_value_list(self, value: Any):\n # Make sure the value is a list\n if not isinstance(value, list):\n raise TypeError(f\"Value must be a list\")\n\n # Make sure the list has the correct number of values in it\n length = len(value)\n if isinstance(self._nargs, int):\n if self._action == \"store\":\n if length != self._nargs:\n raise ValueError(f\"Requires list of length {self._nargs}\")\n else:\n if length == 0 or length % self._nargs != 0:\n raise ValueError(f\"Requires list of length a multiple of {self._nargs}\")\n elif self._nargs == \"+\":\n if length == 0:\n raise ValueError(f\"Requires at least one item\")\n\n if any(not isinstance(element, self._type) for element in value):\n raise TypeError(f\"All values must be of the declared type {self._type.__name__}\")", "def _is_list(item):\n return isinstance(item, list)", "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def is_list(value):\n return isinstance(value, list)", "def handle_list(list_name, list, args):\n if not args:\n return list\n else:\n len(args) == 1 or syntax_error(\"Wrong number of args for list expression.\")\n try:\n return list[int(args[0])]\n except ValueError:\n syntax_error(\"Invald index value: '%s'\" % args[0])\n except IndexError:\n syntax_error(\"Index out of range in '%s': %d\" % (list_name, index))", "def check_list(self,list_input):\n if not isinstance(list_input,list):\n raise ValueError('input is not in list type')\n for i in list_input:\n if isinstance(i,list) and len(i) != 0:\n for j in i:\n if not isinstance(j,(float,int)):\n print(j)\n raise ValueError('cannot convert')\n else:\n print(i)\n raise ValueError('wrong defined')", "def test_neg_operate_list_invalid_requests(self, list):\n key = (\"test\", \"demo\", \"list_key\")\n try:\n key, _, _ = self.as_connection.operate(key, list)\n except e.OpNotApplicable as exception:\n assert exception.code == 26", "def list_typecheck(val, name, msg):\n if type(val) != Pair and val != Nil():\n raise SnekEvaluationError(name + \" error: \" + msg)", "def _is_list(arg):\n return isinstance(arg, collections.Sequence) and not _is_string(arg)", "def test_list_increment_with_incorrect_value_type(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"index\": 2, \"bin\": \"int_bin\", \"val\": \"twenty\"}]\n\n with pytest.raises(e.AerospikeError):\n self.as_connection.operate(key, list)", "def visit_List(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n return to_call(to_attribute(self.operator, '__list__'), node.elts)\n return node", "def aslist(something):\n return something if isinstance(something, list) else [something]", "def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def list_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (list, collections.UserList, collections.abc.MutableSequence)):\n name = type(var).__name__\n raise ListError(\n 'Function {} expected list, {} got instead.'.format(func, name))", "def test_list_type(self):\n\n expected = TypeError\n input_ = []\n with self.assertRaises(expected):\n math.factorial(input_)", "def listify(value):\n if isinstance(value, list):\n return value\n else:\n return [value]", "def is_list(self) -> bool:\n return False", "def isList(memoryManager, paramsList):\n if isEmptyList(paramsList):\n return [1.0]\n A = paramsList[0]\n if validateList(A):\n return [0.0] if len(A) <= 1 else [1.0]\n return [0.0]", "def test_creation_list():\n with pytest.raises(ValueError) as __:\n value = list()\n __ = param.Integer(value=value)", "def listify(arg):\n if isinstance(arg, list):\n return arg\n else:\n return [arg]", "def _MakeList(input):\n if len(input) == 0:\n raise ValueError(\n 'input cannot be empty.')\n elif len(input) == 1:\n output = input[0]\n if not isinstance(output, list):\n output = [output]\n else:\n output = list(input)\n return output" ]
[ "0.73349303", "0.71511084", "0.6736014", "0.66537863", "0.6652018", "0.6627136", "0.65927994", "0.6493474", "0.6485327", "0.6485104", "0.64288867", "0.6416885", "0.6406958", "0.63908684", "0.63792086", "0.6378879", "0.63132125", "0.6302996", "0.6289423", "0.62221825", "0.6194371", "0.617668", "0.61411506", "0.60991126", "0.60606086", "0.6052341", "0.6048131", "0.60342896", "0.60317266", "0.6009316" ]
0.72641724
1
If a listiter_op is given a list as input, no exception should be thrown, and we should return the wrapped function's output.
def test_listiter_op_1(): @ops.listiter_op def f(x): return iter([4, 5, 6]) result = f([1, 2, 3]) # Passing in a list, as expected assert(isinstance(result, collections.abc.Iterator)), f"{result}" assert(list(result) == [4, 5, 6])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def test_listiter_op_3():\n\n @ops.listiter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of a list\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_iterlist_op_2():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]", "def test_iteriter_op_1():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def chek_iter_obj(func):\n\n def wrapper(self, lst):\n try:\n iter(lst)\n return func(self, lst)\n except TypeError:\n raise TypeError(f\"{lst} isn't iterable object\")\n\n return wrapper", "def Listor(fun):\n @functools.wraps(fun)\n def inside(*args, **kwargs):\n return list(fun(*args, **kwargs))\n return inside", "def list_generalizer(f):\n @functools.wraps(f)\n def wrapped(data, *args, **kwargs):\n if type(data) == list:\n return [f(d, *args, **kwargs) for d in data]\n else:\n return f(data, *args, **kwargs)\n\n return wrapped", "def builtin_iterable(func):\n if sys.version_info[:1] < (3,):\n @wraps(func)\n def inner(*args, **kwargs):\n return list(func(*args, **kwargs))\n return inner\n return func", "def to_list(f):\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n return list(f(*args, **kwargs))\n return wrapper", "def listify(fn=None, wrapper=list):\n\n def listify_return(fn):\n @functools.wraps(fn)\n def listify_helper(*args, **kw):\n return wrapper(fn(*args, **kw))\n\n return listify_helper\n\n if fn is None:\n return listify_return\n return listify_return(fn)", "def _operate_recursive(\n function: Callable[..., V], iterables: RecursiveIterable[V], result: RecursiveList[V]\n) -> RecursiveList[V]:\n for items in zip(*iterables): # type: ignore\n if any(isinstance(item, Iterable) for item in items): # pylint: disable=W1116\n sub_result = [] # type: ignore\n _operate_recursive(function, items, sub_result)\n else:\n sub_result = function(*items) # type: ignore\n result.append(sub_result)\n return result", "def intercept(iterable, function):\n\n def intercepting(iterable_):\n for item in iterable_:\n function(item)\n yield item\n\n return intercepting(iterable)", "def decorator(arg):\n return lambda: list(arg)", "def list_func(lst: List[valueType]) -> List[valueType]:\n tmp = [] # type: List[valueType]\n for e in lst:\n if isinstance(e, (list, set, tuple)):\n tmp.append(list_func(list(e)))\n else:\n if isinstance(e, (float, int)):\n tmp.append(func(e))\n else:\n raise Exception\n return tmp", "def visit_List(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n return to_call(to_attribute(self.operator, '__list__'), node.elts)\n return node", "def toiter(x):\n if iterable(x):\n return x\n else:\n return [x]", "def operate_recursive(function: Callable[..., V], *iterables: RecursiveIterable[V]) -> RecursiveList[V]:\n return _operate_recursive(function, iterables, [])", "def controlled_list(input_list):\n output_list = input_list\n\n if not isinstance(input_list, list):\n\n dummy_list = []\n dummy_list.append(input_list)\n output_list = dummy_list\n print('Converting')\n print('Before return')\n print(output_list)\n return output_list", "def wrapped_func(ret_val, *args, **kwargs):\n val = func(*args, **kwargs)\n ret_val.append(val)", "def safe_iterator(i):\n return i or []", "def flatmap(iterable, function_to_list):\n for element in iterable:\n list_block = function_to_list(element)\n for result_value in list_block:\n yield result_value", "def is_listing(op):\n return isinstance(op, (list, tuple))" ]
[ "0.7756567", "0.75442225", "0.75318354", "0.75228345", "0.747424", "0.74187565", "0.7351864", "0.71246946", "0.6990763", "0.6881843", "0.68182045", "0.66678816", "0.65725154", "0.65193516", "0.62121695", "0.6203566", "0.59676445", "0.5919752", "0.5807262", "0.5753957", "0.56545615", "0.5622955", "0.5607902", "0.559432", "0.5534809", "0.5485539", "0.5474979", "0.54562104", "0.54493505", "0.54087555" ]
0.76254886
1
If a listiter_op is given something besides a list as input, raise a ValueError.
def test_listiter_op_2(): @ops.listiter_op def f(x): return iter([4, 5, 6]) with pytest.raises(ValueError): f(iter([1, 2, 3])) # Passing in an iterator instead of a list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def test_iterlist_op_2():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of a list\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_listiter_op_3():\n\n @ops.listiter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def is_listing(op):\n return isinstance(op, (list, tuple))", "def test_neg_operate_list_invalid_requests(self, list):\n key = (\"test\", \"demo\", \"list_key\")\n try:\n key, _, _ = self.as_connection.operate(key, list)\n except e.OpNotApplicable as exception:\n assert exception.code == 26", "def _validate_internal_value_list(self, value: Any):\n # Make sure the value is a list\n if not isinstance(value, list):\n raise TypeError(f\"Value must be a list\")\n\n # Make sure the list has the correct number of values in it\n length = len(value)\n if isinstance(self._nargs, int):\n if self._action == \"store\":\n if length != self._nargs:\n raise ValueError(f\"Requires list of length {self._nargs}\")\n else:\n if length == 0 or length % self._nargs != 0:\n raise ValueError(f\"Requires list of length a multiple of {self._nargs}\")\n elif self._nargs == \"+\":\n if length == 0:\n raise ValueError(f\"Requires at least one item\")\n\n if any(not isinstance(element, self._type) for element in value):\n raise TypeError(f\"All values must be of the declared type {self._type.__name__}\")", "def test_list_type(self):\n\n expected = TypeError\n input_ = []\n with self.assertRaises(expected):\n math.factorial(input_)", "def _data_validation(data):\n if isinstance(data, (list, tuple, type(None))) is not True:\n raise ValueError(f\"data must be tuple, list, or None, \"\n f\"data type is '{type(data).__name__}'. \"\n f\"Iterable data cannot be empty.\")", "def test_list_increment_with_incorrect_value_type(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [{\"op\": aerospike.OP_LIST_INCREMENT, \"index\": 2, \"bin\": \"int_bin\", \"val\": \"twenty\"}]\n\n with pytest.raises(e.AerospikeError):\n self.as_connection.operate(key, list)", "def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def test_if_it_accepts_lists(self):\n with self.assertRaises(TypeError):\n prime_numbers([])", "def list_typecheck(val, name, msg):\n if type(val) != Pair and val != Nil():\n raise SnekEvaluationError(name + \" error: \" + msg)", "def test_if_it_accepts_lists(self):\n with self.assertRaises(TypeError):\n prime_numbers(56.58)", "def test_iteriter_op_1():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def _is_iterable(value):\n return isinstance(value, list) or isinstance(value, tuple)", "def check_list(self,list_input):\n if not isinstance(list_input,list):\n raise ValueError('input is not in list type')\n for i in list_input:\n if isinstance(i,list) and len(i) != 0:\n for j in i:\n if not isinstance(j,(float,int)):\n print(j)\n raise ValueError('cannot convert')\n else:\n print(i)\n raise ValueError('wrong defined')", "def test_empty_list_error(self):\n with self.assertRaises(ValueError):\n function_inclusion_filter_builder([])", "def test_raises_typeerror_if_arg_not_list(self):\n def result():\n return num_islands({})\n\n self.assertRaises(TypeError, result)", "def list_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (list, collections.UserList, collections.abc.MutableSequence)):\n name = type(var).__name__\n raise ListError(\n 'Function {} expected list, {} got instead.'.format(func, name))", "def test_empty_list(self):\n argument = []\n with self.assertRaises(ValueError):\n find_an_even(argument)", "def _is_list(arg):\n return isinstance(arg, collections.Sequence) and not _is_string(arg)", "def test_neg_operate_append_items_not_a_list(self):\n key = (\"test\", \"demo\", \"list_key\")\n\n list = [\n {\"op\": aerospike.OP_LIST_APPEND_ITEMS, \"bin\": \"int_bin\", \"val\": 7},\n ]\n\n try:\n key, _, bins = self.as_connection.operate(key, list)\n except e.ParamError as exception:\n assert exception.code == -2", "def test_list_increment_with_missing_value(self):\n key = (\"test\", \"demo\", \"list_key\")\n list = [\n {\n \"op\": aerospike.OP_LIST_INCREMENT,\n \"bin\": \"int_bin\",\n \"index\": 2,\n }\n ]\n\n with pytest.raises(e.ParamError):\n self.as_connection.operate(key, list)", "def _assert_valid_value_and_cast(self, value):\n if not hasattr(value, '__iter__'):\n raise AssertionError(f\"{value} is not a valid iterable type\")\n return value" ]
[ "0.76405627", "0.7451966", "0.7407579", "0.73711157", "0.7272287", "0.6842472", "0.67784035", "0.6713179", "0.65655285", "0.6331", "0.62677497", "0.62505025", "0.6126729", "0.6060513", "0.60524344", "0.60413057", "0.5994478", "0.5923151", "0.5910163", "0.5815476", "0.5803793", "0.5797612", "0.5776292", "0.57018596", "0.5693806", "0.5682503", "0.56680715", "0.56610525", "0.561739", "0.56114024" ]
0.75528175
1
If an iterlist_op is given an iterator as input, no exception should be thrown, and we should return the wrapped function's output.
def test_iterlist_op_1(): @ops.iterlist_op def f(x): return [4, 5, 6] result = f(iter([1, 2, 3])) # Passing in an iterator, as expected assert(isinstance(result, list)), f"{result}" assert(result == [4, 5, 6])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_listiter_op_2():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of a list", "def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def test_iterlist_op_3():\n\n @ops.iterlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of a list\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_listiter_op_3():\n\n @ops.listiter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_iteriter_op_3():\n\n @ops.iteriter_op\n def f(x):\n return [4, 5, 6] # Returning a list instead of an iterator\n\n with pytest.raises(ValueError):\n result = f(iter([1, 2, 3]))", "def test_iteriter_op_1():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def test_iterlist_op_2():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def test_listlist_op_3():\n\n @ops.listlist_op\n def f(x):\n return iter([4, 5, 6]) # Returning an iterator instead of an list\n\n with pytest.raises(ValueError):\n result = f([1, 2, 3])", "def test_listlist_op_2():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n with pytest.raises(ValueError):\n f(iter([1, 2, 3])) # Passing in an iterator instead of an list", "def test_iteriter_op_2():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n with pytest.raises(ValueError):\n f([1, 2, 3]) # Passing in a list instead of an iterator", "def chek_iter_obj(func):\n\n def wrapper(self, lst):\n try:\n iter(lst)\n return func(self, lst)\n except TypeError:\n raise TypeError(f\"{lst} isn't iterable object\")\n\n return wrapper", "def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]", "def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def intercept(iterable, function):\n\n def intercepting(iterable_):\n for item in iterable_:\n function(item)\n yield item\n\n return intercepting(iterable)", "def safe_iterator(i):\n return i or []", "def toiter(x):\n if iterable(x):\n return x\n else:\n return [x]", "def builtin_iterable(func):\n if sys.version_info[:1] < (3,):\n @wraps(func)\n def inner(*args, **kwargs):\n return list(func(*args, **kwargs))\n return inner\n return func", "def iter(space, w_collection_or_callable, w_sentinel=None):\n if w_sentinel is None:\n return space.iter(w_collection_or_callable)\n else:\n return iter_sentinel(space, w_collection_or_callable, w_sentinel)", "def chained(func):\n def wrapper(*args, **kwargs):\n for xs in func(*args, **kwargs):\n for x in xs:\n yield x\n return wrapper", "def _operate_recursive(\n function: Callable[..., V], iterables: RecursiveIterable[V], result: RecursiveList[V]\n) -> RecursiveList[V]:\n for items in zip(*iterables): # type: ignore\n if any(isinstance(item, Iterable) for item in items): # pylint: disable=W1116\n sub_result = [] # type: ignore\n _operate_recursive(function, items, sub_result)\n else:\n sub_result = function(*items) # type: ignore\n result.append(sub_result)\n return result", "def Listor(fun):\n @functools.wraps(fun)\n def inside(*args, **kwargs):\n return list(fun(*args, **kwargs))\n return inside", "def ireduce(f, it):\n acc = it.next()\n yield acc\n for x in it:\n acc = f(acc, x)\n yield acc", "def wrap_generator(generator, wrapper_function):\n for item in generator:\n yield wrapper_function(item)", "def test_iter_method(self):\n ref = mock.Mock()\n ref.side_effect = [{'rows': [1,2,3]}, {'rows': []}]\n rslt = Result(ref)\n collection = [x for x in rslt]\n self.assertEqual(collection, [1,2,3])\n\n run_iter = lambda x: [y for y in x]\n\n rslt = Result(ref, skip=1000)\n self.assertRaises(CloudantArgumentError, run_iter, rslt)\n\n rslt = Result(ref, limit=1000)\n self.assertRaises(CloudantArgumentError, run_iter, rslt)", "def cotakewhile(function, iterator):\n results = []\n\n def checkTake(shouldTake, item):\n if shouldTake == True:\n results.append(item)\n return item\n\n def dotake(item):\n d = maybeDeferred(function, item)\n d.addCallback(checkTake, item)\n return d\n\n def dostop(takeResult):\n return takeResult is None\n\n cfc = _CoFunCaller(resultCollector=dotake, stopFunction=dostop)\n return cfc.coiterate(iterator).addCallback(lambda _: results)", "def mapf(f: Callable[[D_], R_], C: Iterable[D_]) -> Iterator[R_]:\n return (f(x) for x in C)", "def generator(func):\n\n @fn\n @wraps(func)\n def gen(*args, **kwargs):\n return Iter(func(*args, **kwargs))\n\n return gen", "def _wrapper(func, args):\n return func(*args)", "def __call__(self, func, *args, **kwargs):\n\n @wraps(func) # To keep its own namespace\n def wrapper(*args, **kwargs):\n gener = self.__iter__()\n return func(gener, *args, **kwargs)\n return wrapper", "def operate_recursive(function: Callable[..., V], *iterables: RecursiveIterable[V]) -> RecursiveList[V]:\n return _operate_recursive(function, iterables, [])" ]
[ "0.76466507", "0.76008874", "0.7570353", "0.7508546", "0.74234086", "0.7396815", "0.7382202", "0.73407215", "0.7291801", "0.72297966", "0.6824224", "0.666192", "0.6128793", "0.60040385", "0.5923186", "0.58794075", "0.58676404", "0.57020146", "0.56718487", "0.5595165", "0.5567609", "0.5557475", "0.55147606", "0.5493095", "0.5481294", "0.54799074", "0.5437992", "0.5430538", "0.5417626", "0.53754437" ]
0.76173055
1
If a pool of size 3 is used, the first 3 individuals in the input iterator should be collected into a list.
def test_pool(): pop = iter([ 'a', 'b', 'c', 'd', 'e' ]) pop = ops.pool(pop, size=3) assert(len(pop) == 3) assert(pop == [ 'a', 'b', 'c' ])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lazy_groups_of(iterator: Iterator[A], group_size: int) -> Iterator[List[A]]:\n return iter(lambda: list(islice(iterator, 0, group_size)), [])", "def construct(self, x):\n results = []\n x = self.pool0(x)\n results.append(x)\n x = self.pool1(x)\n results.append(x)\n x = self.pool2(x)\n results.append(x)\n x = self.pool3(x)\n results.append(x)\n return results", "def n_wise(x: List[Any], size: Optional[int] = 2) -> Iterable:\n\n iterator = iter(x)\n\n return iter(lambda: tuple(islice(iterator, size)), ())", "def chunk(it, size):\n it = iter(it)\n return iter(lambda: list(islice(it, size)), [])", "def chunker( it, size ):\n \n # Variables\n it = iter( it )\n \n # Selecting a bunch of jobs\n while True:\n p = tuple( itertools.islice( it, size ) )\n if not p:\n break\n yield p", "def _split_iterators(iterator, n=None):\n #if n is None:\n # item, iterator = cytoolz.peek(iterator)\n # n = len(item)\n iterators = itertools.tee(iterator, n)\n #iterators = ((sample[i] for sample in iterator) for i, iterator in enumerate(iterators))\n # Above does not work?!\n\n out = list()\n out.append(s[0] for s in iterators[0])\n out.append(s[1] for s in iterators[1])\n out.append(s[2] for s in iterators[2])\n iterators = out\n return iterators", "def chunk(it, size):\n\tit = iter(it)\n\treturn iter(lambda: tuple(islice(it, size)), ())", "def batch(size, iterable):\r\n return list(xbatch(size, iterable))", "def take(num, iterable):\n return list(islice(iterable, num))", "def __init__(self, iterator):\n self.iterator = []\n while iterator.hasNext():\n self.iterator.append(iterator.next())", "def chunk(iter_list, size):\n iter_list = iter(iter_list)\n # lambda: creates a returning expression function\n # which returns slices\n # iter, with the second argument () stops creating\n # iterators when it reaches the end\n return iter(lambda: tuple(islice(iter_list, size)), ())", "def take(n, iterable):\n return list(islice(iterable, n))", "def take(n, iterable):\n return list(islice(iterable, n))", "def grouped(iterable, n):\n # https://gist.github.com/yoyonel/fb8c9d6fb06871db527492f5144b2e7b\n iterable = iter(iterable)\n return iter(lambda: list(itertools.islice(iterable, n)), [])", "def vytvorit_generator():\n mylist = range(3)\n print 'mylist = ', mylist\n for element in mylist:\n yield element", "def take(n, iterable):\n return list(itertools.islice(iterable, n))", "def take(n, iterable):\n return list(itertools.islice(iterable, n))", "def _elements(self):\n return list(islice(self.generate(), None))", "def better_grouper(inputs, n):\n iters = [iter(inputs)] * n\n return zip(*iters)", "def batch(iterable, size):\n sourceiter = iter(iterable)\n while True:\n batchiter = islice(sourceiter, size)\n yield list(chain([batchiter.next()], batchiter))", "def chunks(sequence: Iterable[T], chunk_size: int = 2) -> Iterable[List[T]]:\n lsequence = list(sequence)\n while lsequence:\n size = min(len(lsequence), chunk_size)\n yield lsequence[:size]\n lsequence = lsequence[size:]", "def iter_chunks(sequence, chunk_size) :\n res = []\n for item in sequence :\n res.append(item)\n if len(res) >= chunk_size :\n yield res\n res = []\n if res : yield res", "def batch(iterable, k=3):\n\n for i in range(0, len(iterable), k):\n yield iterable[i:i + k]", "def chunks(iterable: Iterable, size: int) -> Iterable:\n it = iter(iterable)\n item = list(itertools.islice(it, size))\n while item:\n yield item\n item = list(itertools.islice(it, size))", "def split_chunk_iter(chunk, sizes, neighbors, rng=None):\n assert len(chunk) > len(sizes), f\"{len(chunk)} !> {len(sizes)}\"\n if not isinstance(rng, random.Random):\n rng = random\n # start by drawing three random items\n splits = [[c] for c in rng.sample(list(chunk), len(sizes))]\n unused = set(chunk) - set(sum(splits, []))\n max_iters = max(sizes) * len(sizes) # worst case\n for j in range(max_iters):\n i = j % len(sizes)\n size = sizes[i]\n split = splits[i]\n if len(split) == size:\n continue\n # get all of the neighbors of the split\n candidates = set()\n for c in split:\n candidates |= neighbors[c]\n # filter to unused cubes\n candidates = candidates & unused\n if not candidates:\n return None\n # Pick a candidate at random and add it\n choice = rng.choice(list(candidates))\n split.append(choice)\n unused.remove(choice)\n return splits", "def split(iterator, criterion):\n a = []\n b = []\n for x in iterator:\n if criterion(x):\n a.append(x)\n else:\n b.append(x)\n\n return a, b", "def iterator_peek(iterator: Iterator[T], count: int) -> tuple[list[T], Iterator[T]]:\n\n ret = []\n for _ in range(count):\n try:\n ret.append(next(iterator))\n except StopIteration:\n break\n\n return ret, chain(ret, iterator)", "def take(iterable, n):\n return list(itertools.islice(iterable, n))", "def iter_batch(iterable, size) -> Iterable:\n source_iter = iter(iterable)\n while source_iter:\n b = list(islice(source_iter, size))\n if len(b) == 0:\n break\n yield b", "def items():\n for i in self._iter_restrict(zeros, ones):\n yield self.pcdata[i]" ]
[ "0.640328", "0.6058334", "0.58099174", "0.5747291", "0.57104117", "0.5681089", "0.56514186", "0.5616282", "0.56008047", "0.55808794", "0.5538672", "0.5503756", "0.5503756", "0.54849136", "0.5432563", "0.54290795", "0.54290795", "0.54163194", "0.54163116", "0.5399655", "0.53910416", "0.53711814", "0.53680724", "0.53597134", "0.53490454", "0.53470373", "0.5339381", "0.533715", "0.5325946", "0.52909666" ]
0.7016338
0
Converts a wave to a vector of prosodic features. offset (in ms) determines where the signal will be sampled. window_len is ignored.
def wav_to_prosodic(path, sr=16000, offset=10): sound = parselmouth.Sound(path) pitch = sound.to_pitch() #timestep, pitch_floor, pitch_ceiling intensity = sound.to_intensity() features = [] max_time = sound.get_total_duration() for time in np.arange(0, max_time, 0.001): f0 = pitch.get_value_at_time(time) f0_nan = 0 if np.isnan(f0): f0 = 0 f0_nan = 1 int_db = intensity.get_value(time) if np.isnan(int_db): int_db = 0 features.append([f0, f0_nan, int_db]) array_feats = np.array(features).T print("SHAPE OF THE FEATURES:", array_feats.shape) assert(not np.any(np.isnan(array_feats))) return array_feats, max_time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _choose_wavelength_slice(self, offset):\n if 'WAVE' not in self.axes_wcs.wcs.ctype:\n raise cu.CubeError(2, \"Spectral dimension not present\")\n if self.data.ndim == 4:\n raise cu.CubeError(4, \"Can only work with 3D cubes\")\n\n axis = -2 if self.axes_wcs.wcs.ctype[0] in ['TIME', 'UTC'] else -1\n arr = None\n length = self.data.shape[axis]\n if isinstance(offset, int) and offset >= 0 and offset < length:\n arr = self.data.take(offset, axis=axis)\n\n if isinstance(offset, u.Quantity):\n delta = self.axes_wcs.wcs.cdelt[-1 - axis] * u.m\n wloffset = offset.to(u.m) / delta\n wloffset = int(wloffset)\n if wloffset >= 0 and wloffset < self.data.shape[axis]:\n arr = self.data.take(wloffset, axis=axis)\n\n return arr", "def get_features(filename, training=True):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n wav, _ = librosa.load(filename, \n sr=SAMPLE_RATE, \n mono=True,\n dtype=np.float64)\n energy = np.abs(wav)\n silence_threshold = np.percentile(energy, 95)\n offsets = np.where(energy > silence_threshold)[0]\n if training:\n audio_voice_only = wav[offsets[0]:offsets[-1]]\n else:\n #avoid cutting off too abruptly\n audio_voice_only = wav[offsets[0]:offsets[-1] + 4800]\n if training:\n if len(audio_voice_only) >= 160 * NUM_FRAMES:\n start_ = np.random.randint(len(audio_voice_only) - 160 * NUM_FRAMES + 1)\n end_ = start_ + 160 * NUM_FRAMES - 1\n audio_voice_only = audio_voice_only[start_:end_]\n else:\n return [0], [0]\n wav = librosa.util.normalize(audio_voice_only)\n #deep speaker uses preemphasis here, I do not, because I want the model to correctly transform lower\n #frequencies, too. I apply preemphasis to spectrum before putting data into model embedder instead.\n wav = lfilter([1., -PREEMPH], [1.], wav)[1:]\n #f0 extraction (most time consuming operation in this function)\n f0, timeaxis = pyworld.harvest(wav, SAMPLE_RATE, frame_period=FRAME_PERIOD, f0_floor=71.0, f0_ceil=800.0)\n sp = pyworld.cheaptrick(wav, f0, timeaxis, SAMPLE_RATE, fft_size=NFFT)\n ap = pyworld.d4c(wav, f0, timeaxis, SAMPLE_RATE, fft_size=NFFT)\n mfe = sp2mfe(sp)\n lmfe = np.log(mfe)\n mean = np.mean(lmfe)\n std = np.std(lmfe)\n nmfe = (lmfe - mean) / std\n \n if training:\n return nmfe.T, f0\n else:\n out_len = len(f0) // 4 * 4\n# out_len = len(f0)\n return nmfe[:out_len].T, mean, std, sp[:out_len], f0[:out_len], ap[:out_len]", "def wav_to_features(sample_rate, clip_duration_ms, window_size_ms,\n window_stride_ms, feature_bin_count, quantize, preprocess,\n input_wav, output_c_file):\n\n # Start a new TensorFlow session.\n sess = tf.compat.v1.InteractiveSession()\n\n model_settings = models.prepare_model_settings(\n 0, sample_rate, clip_duration_ms, window_size_ms, window_stride_ms,\n feature_bin_count, preprocess)\n audio_processor = input_data.AudioProcessor(None, None, 0, 0, '', 0, 0,\n model_settings, None)\n\n results = audio_processor.get_features_for_wav(input_wav, model_settings,\n sess)\n features = results[0]\n\n variable_base = os.path.splitext(os.path.basename(input_wav).lower())[0]\n\n # Save a C source file containing the feature data as an array.\n with gfile.GFile(output_c_file, 'w') as f:\n f.write('/* File automatically created by\\n')\n f.write(' * tensorflow/examples/speech_commands/wav_to_features.py \\\\\\n')\n f.write(' * --sample_rate=%d \\\\\\n' % sample_rate)\n f.write(' * --clip_duration_ms=%d \\\\\\n' % clip_duration_ms)\n f.write(' * --window_size_ms=%d \\\\\\n' % window_size_ms)\n f.write(' * --window_stride_ms=%d \\\\\\n' % window_stride_ms)\n f.write(' * --feature_bin_count=%d \\\\\\n' % feature_bin_count)\n if quantize:\n f.write(' * --quantize=1 \\\\\\n')\n f.write(' * --preprocess=\"%s\" \\\\\\n' % preprocess)\n f.write(' * --input_wav=\"%s\" \\\\\\n' % input_wav)\n f.write(' * --output_c_file=\"%s\" \\\\\\n' % output_c_file)\n f.write(' */\\n\\n')\n f.write('const int g_%s_width = %d;\\n' %\n (variable_base, model_settings['fingerprint_width']))\n f.write('const int g_%s_height = %d;\\n' %\n (variable_base, model_settings['spectrogram_length']))\n if quantize:\n features_min, features_max = input_data.get_features_range(model_settings)\n f.write('const unsigned char g_%s_data[] = {' % variable_base)\n i = 0\n for value in features.flatten():\n quantized_value = int(\n round(\n (255 * (value - features_min)) / (features_max - features_min)))\n if quantized_value < 0:\n quantized_value = 0\n if quantized_value > 255:\n quantized_value = 255\n if i == 0:\n f.write('\\n ')\n f.write('%d, ' % (quantized_value))\n i = (i + 1) % 10\n else:\n f.write('const float g_%s_data[] = {\\n' % variable_base)\n i = 0\n for value in features.flatten():\n if i == 0:\n f.write('\\n ')\n f.write('%f, ' % value)\n i = (i + 1) % 10\n f.write('\\n};\\n')", "def slice_signal(file, window_size, stride, sample_rate):\n wav, sr = librosa.load(file, sr=sample_rate)\n hop = int(window_size * stride)\n slices = []\n for end_idx in range(window_size, len(wav), hop):\n start_idx = end_idx - window_size\n slice_sig = wav[start_idx:end_idx]\n #print(type(slice_sig),' ',slice_sig.shape,'begin:',start_idx,'end_idx:',end_idx)\n slices.append(slice_sig)\n\n if(len(slices)*window_size<len(wav)):\n slice_sig = np.zeros((window_size,))\n temp = wav[len(slices)*window_size:]\n slice_sig[:len(temp)] = temp\n slices.append(slice_sig)\n #print(type(slice_sig), ' ', slice_sig.shape,'begin:',0,'end_idx:',len(temp))\n\n return slices", "def extract_features(wavfile, feature, sampling_rate=16000):\n\n raw_signal, sr = librosa.core.load(wavfile,\n sampling_rate,\n mono=True,\n dtype='float'\n )\n\n\n if feature == 'MFCC':\n feat_seq = librosa.feature.mfcc(raw_signal,\n sampling_rate,\n n_fft=400,\n hop_length=160,\n n_mfcc=13,\n fmin=75,\n fmax=5999\n )\n # Numerical Stability\n #feat_seq = np.where(feat_seq == 0, np.finfo(float).eps, feat_seq)\n\n\n elif feature == 'FBANK':\n feat_seq = librosa.feature.melspectrogram(raw_signal,\n sampling_rate,\n n_fft=400,\n hop_length=160,\n n_mels=13,\n fmin=75,\n fmax=5999\n )\n\n # Numerical Stability\n feat_seq = np.where(feat_seq == 0, np.finfo(float).eps, feat_seq)\n\n # 20 * log | convert to Me-Scale\n feat_seq = 20*np.log10(feat_seq)\n\n # z-norm: feature normalization\n feat_norm = preprocessing.scale(feat_seq, axis=1)\n\n return feat_norm", "def wand_features(data, signals=EMG_SIGNALS, frame_len=EMG_FRAME_LEN,\n frame_shift=EMG_SHIFT_LEN, k=10):\n\n # samples is n_signals x n_timesteps\n samples = np.array(data[signals].T)\n phones = compute_subphones(data[\"phone\"])\n\n n_signals, n_timesteps = samples.shape[0], samples.shape[1]\n\n # Create the 17-point weighted moving average filter shown in Figure 4.2.\n ramp_filter = np.linspace(0,0.1,num=9)\n ma_filter = np.concatenate((ramp_filter[:-1], ramp_filter[::-1]))\n assert len(ma_filter) == 17\n \n n_frames = int(n_timesteps / frame_shift)\n n_feats = 5\n features = np.zeros((n_signals, n_feats, n_frames))\n frame_phones = []\n\n for i in range(n_signals):\n # Mean normalize\n x = samples[i] - np.mean(samples[i])\n\n # Apply moving average filter to compute low frequency signal w\n w = np.convolve(x, ma_filter, mode=\"same\")\n\n # Compute high frequency signal p\n p = x - w\n\n # Compute rectified signal r\n r = abs(p)\n\n # Ignore any frames that are incomplete (i.e. if n_timesteps is 2500 but \n # n_frames is 416 and frame_shift is 6, count up to 416*6 = 2496 rather\n # than 2500 timesteps, so we don't end up with a unit in the features that\n # is made up of an incomplete set of samples)\n for frame_id, t in enumerate(range(0, n_frames*frame_shift, frame_shift)):\n w_frame = w[t:t+frame_len]\n p_frame = p[t:t+frame_len]\n r_frame = r[t:t+frame_len]\n M_w = np.mean(w_frame) # Frame-based mean of w\n P_w = np.mean(w_frame * w_frame) # Frame-based power of w\n P_r = np.mean(r_frame * r_frame) # Frame-based power of r\n M_r = np.mean(r_frame) # Frame-based mean of r\n\n # Zero-crossing rate of p\n z_p = len(np.where(np.diff(np.signbit(p_frame)))[0]) / len(p_frame)\n\n features[i, :, frame_id] = np.array([M_w, P_w, P_r, z_p, M_r])\n mode_phone = mode(phones[t:t+frame_len])\n frame_phones.append(mode_phone)\n\n features = np.reshape(features, [-1, n_frames])\n\n features, labels = stack_context(features, k=k, labels=frame_phones)\n\n return features, labels", "def train_sample_windowize(field, delta=1, n=20):\n padded = np.pad(field, delta, mode='constant', constant_values=-1)\n X = np.zeros((n * n, (1 + delta * 2) ** 2))\n for i in range(n):\n for j in range(n):\n X[i * n + j] = padded[i:i + 2 * delta + 1, j:j + 2 * delta + 1].ravel()\n return X", "def encode_window(self, X, window, batch_size=50, window_batch_size=10000):\n features = numpy.empty((\n numpy.shape(X)[0], self.out_channels,\n numpy.shape(X)[2] - window + 1\n ))\n masking = numpy.empty((\n min(window_batch_size, numpy.shape(X)[2] - window + 1),\n numpy.shape(X)[1], window\n ))\n for b in range(numpy.shape(X)[0]):\n for i in range(math.ceil(\n (numpy.shape(X)[2] - window + 1) / window_batch_size)\n ):\n for j in range(\n i * window_batch_size,\n min(\n (i + 1) * window_batch_size,\n numpy.shape(X)[2] - window + 1\n )\n ):\n j0 = j - i * window_batch_size\n masking[j0, :, :] = X[b, :, j: j + window]\n features[\n b, :, i * window_batch_size: (i + 1) * window_batch_size\n ] = numpy.swapaxes(\n self.encode(masking[:j0 + 1], batch_size=batch_size), 0, 1\n )\n return features", "def polyfit_window(x, window_length=5, deg=1, deriv=0, delta=1, pos=None):\n if not pos:\n pos = int(window_length/2)+1\n num_samples = len(x)\n idx = np.arange(window_length)\n x_out = np.zeros(num_samples)\n\n x_padded = np.concatenate([np.zeros(window_length-1), x])\n\n for frame_start in np.arange(num_samples):\n x_frame = x_padded[idx + frame_start]\n p = np.polyfit(idx*delta, x_frame, deg=deg)\n p = np.polyder(p, m=deriv)\n x_out[frame_start] = np.polyval(p, idx[pos]*delta)\n\n return x_out", "def get_data_rescaled(self, wave):\n m = (self.max_threshold - self.min_threshold)/(np.max(wave) - np.min(wave))\n b = self.min_threshold - m * np.min(wave)\n wave = m * wave + b\n return np.array([lfilter(self.b, self.a, wave[i]) for i in range(self.n_channels)])", "def collect_features(self, wav_path, label_path):\n n_fft = 512\n window_length = 20\n\n sound, fs = librosa.core.load(wav_path, sr=16000)\n\n if fs != 16000:\n print(wav_path)\n\n # Preemphasis\n preemp_sound = np.append(sound[0], sound[1:] - 0.97 * sound[:-1])\n\n # STFT\n spect = librosa.core.stft(preemp_sound,\n n_fft=n_fft,\n win_length=window_length * int(fs / 1000),\n hop_length=window_length * int(fs / 2000),\n window=scipy.signal.hamming,\n center=True)\n\n spect = np.log10(np.transpose(abs(spect[:, 1:]) ** 2) + 1e-16)\n\n return spect", "def get_data(self, wave):\n data = np.array([lfilter(self.b, self.a, wave[i]) for i in range(self.n_channels)])\n self.min_threshold = np.min(data)\n self.max_threshold = np.max(data)\n return data", "def predict_proba(self, window: np.array):\n \n data = np.transpose(np.array(window))[self.data_channels]\n print('data shape in wrapped:', data.shape)\n proba = self.clf.predict_proba(data)\n return proba[0][1] # proba = [[prob_left, prob_right]]", "def wave_get_pulses():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVSP, 0, 0))", "def window_data(X, window_length):\n return X[int(len(X)/2-window_length/2):int(len(X)/2+window_length/2)]", "def extract_features(audio_filename, args):\n #print(\"Extract_features\")\n spec_type = args['spec_type']\n\n if spec_type == 'cqt':\n bin_multiple = args['bin_multiple']\n max_midi = args['max_midi']\n min_midi = args['min_midi']\n note_range = max_midi - min_midi + 1\n sr = args['sr']\n hop_length = args['hop_length']\n window_size = args['window_size']\n\n bins_per_octave = 12 * bin_multiple # should be a multiple of 12\n n_bins = note_range * bin_multiple\n\n # down-sample,mono-channel\n y, _ = librosa.load(audio_filename, sr)\n # y: an np.ndarray[ shape=(n,) ] giving the audio time series. librosa.load automatically downsamples to the\n # required sample rate sr\n # doku on librosa.cqt:\n # https://librosa.github.io/librosa/generated/librosa.core.cqt.html?highlight=cqt#librosa.core.cqts\n S = librosa.cqt(y, fmin=librosa.midi_to_hz(min_midi), sr=sr, hop_length=hop_length,\n bins_per_octave=bins_per_octave, n_bins=n_bins)\n S = S.T\n S = np.abs(S)\n min_db = np.min(S)\n print(np.min(S), np.max(S), np.mean(S))\n S = np.pad(S, ((window_size // 2, window_size // 2), (0, 0)), 'constant', constant_values=min_db)\n\n windows = []\n\n # IMPORTANT NOTE:\n # Since we pad the the spectrogram frame,\n # the onset frames are actually `offset` frames.\n # To obtain a window of the center frame at each true index, we take a slice from i to i+window_size\n # starting at frame 0 of the padded spectrogram\n for i in range(S.shape[0] - window_size + 1):\n w = S[i:i + window_size, :]\n windows.append(w)\n\n # print inputs\n x = np.array(windows)\n return x\n\n else:\n print(\"WARNING: feature type \" + spec_type + \" not implemented.\")\n return 0", "def wav_to_intensity(path, sr=16000, offset=10):\n sound = parselmouth.Sound(path)\n intensity = sound.to_intensity()\n\n features = []\n\n max_time = sound.get_total_duration()\n\n for time in np.arange(0, max_time, 0.001):\n int_db = intensity.get_value(time)\n if np.isnan(int_db):\n int_db = 0\n\n features.append([int_db])\n\n array_feats = np.array(features).T\n\n print(\"SHAPE OF THE FEATURES:\", array_feats.shape)\n assert(not np.any(np.isnan(array_feats)))\n\n return array_feats, max_time", "def calc_window(shape):\n \"\"\"Compute fourier-space window function. Like the other fourier-based\n functions in this module, equi-spaced pixels are assumed. Since the\n window function is separable, it is returned as an x and y part,\n such that window = wy[:,None]*wx[None,:].\"\"\"\n wy = np.sinc(np.fft.fftfreq(shape[-2]))\n wx = np.sinc(np.fft.fftfreq(shape[-1]))\n return wy, wx", "def poly_features(frames, sample_rate, *, kwargs={}):\n l = []\n for frame in frames:\n l.append(\n np.mean(\n librosa.feature.poly_features(\n y=frame,\n sr=sample_rate,\n **kwargs\n ).T, axis=0\n )\n )\n return np.array(l)", "def welch(pro, fs, nfft, window, overlap, axis, detrend, scaling):\n\n # build the welch generating function\n genfunc = partial(_spectra_estimatives, pro, fs, nfft, window, overlap, \n axis, detrend, scaling, func=periodogram)\n\n # obtain the positive freqs.\n freqs = np.fft.rfftfreq(nfft, 1/fs)\n\n # num. segments that fit into pro samples of len nfft with % overlap\n nsegs = int((pro.shape[axis] - nfft) // (nfft * (1-overlap)) + 1)\n shape = list(pro.shape)\n shape[axis] = nsegs\n\n # return producer from welch gen func with each yielded \n result = producer(genfunc, chunksize=len(freqs), axis=axis, shape=shape)\n return freqs, result", "def fram_wave(waveform: np.array, hop_length: int = 160, fft_window_size: int = 400, center: bool = True):\n frames = []\n for i in range(0, waveform.shape[0] + 1, hop_length):\n if center:\n half_window = (fft_window_size - 1) // 2 + 1\n start = i - half_window if i > half_window else 0\n end = i + half_window if i < waveform.shape[0] - half_window else waveform.shape[0]\n frame = waveform[start:end]\n if start == 0:\n padd_width = (-i + half_window, 0)\n frame = np.pad(frame, pad_width=padd_width, mode=\"reflect\")\n\n elif end == waveform.shape[0]:\n padd_width = (0, (i - waveform.shape[0] + half_window))\n frame = np.pad(frame, pad_width=padd_width, mode=\"reflect\")\n\n else:\n frame = waveform[i : i + fft_window_size]\n frame_width = frame.shape[0]\n if frame_width < waveform.shape[0]:\n frame = np.lib.pad(\n frame, pad_width=(0, fft_window_size - frame_width), mode=\"constant\", constant_values=0\n )\n frames.append(frame)\n\n frames = np.stack(frames, 0)\n return frames", "def createIntegratedPsf(self):\n\n (wavelengths, weights) = self.filter\n for i in range(len(wavelengths)):\n\n wavelength = wavelengths[i]\n weight = weights[i]\n self.convertToOpd(wavelength) # creates self.opd\n opd = self.embedOpd()\n zf = numpy.fft.fft2(opd)\n del opd\n # Compute the amplitude squared.\n # (psf is not really the point spread function yet)\n psf = np.conjugate(zf)\n # psf will now be the point spread function, but still complex\n np.multiply(psf, zf, psf)\n del zf\n # normalize the PSF, and convert to single precision\n psf = psf.real / psf.size\n psf = psf.astype(np.float32)\n\n self.center(psf)\n\n # This describes the image scale if no resampling is done.\n cdelt_before_resampling = (wavelength * MICRONStoMETERS) / \\\n (self.D * self.oversample) * RADIANStoDEGREES\n if self.pixel_size is None:\n # we won't resample the output image\n self.cdelt = cdelt_before_resampling\n # Extract a subset.\n if self.output_size < self.npix:\n o_npix = self.output_size\n n0 = (self.npix - o_npix) // 2\n self.integrated_psf += \\\n (psf[n0:n0 + o_npix, n0:n0 + o_npix] * weight)\n else:\n self.integrated_psf += (psf * weight)\n else:\n # we'll resample to this image scale\n self.cdelt = self.pixel_size / self.oversample * ARCSECtoDEGREES\n # These three parameters are only used by mapPsf and for\n # normalizing the weight after resampling.\n self.rescale = self.cdelt / cdelt_before_resampling\n self.input_center = (self.npix + 1) // 2\n self.output_center = (self.output_size + 1) // 2\n sub_psf = np.zeros((self.output_size, self.output_size),\n dtype=np.float32)\n # Do the resampling, writing the output to sub_psf.\n ndimage.geometric_transform(psf, self.mapPsf,\n output_shape=(self.output_size, self.output_size),\n output=sub_psf, prefilter=True)\n weight = weight * self.rescale**2\n self.integrated_psf += (sub_psf * weight)\n del sub_psf\n\n if self.verbose:\n print(\"PSF for wavelength %g has been computed\" % wavelength)", "def smooth(inpt, window_len=10, window_type='flat'):\n if not (window_len % 2 == 0):\n window_len += 1\n print('Window length supplied is odd - using next highest integer: {}.'.format(window_len))\n\n if window_len <= 3:\n print('Error in data smoothing - please select a larger window length')\n return\n\n # window_type = 'hanning'\n if not window_type in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n print('Error - Invalid window_type')\n return\n\n # Generate two arguments to pass into numpy.convolve()\n # s is the input signal doctored with reflections of the input at the beginning and end\n # this serves to remove noise in the smoothing method\n # w is the window matrix based on pre-defined window functions or unit matrix for flat window\n\n s = np.r_[inpt[window_len -1:0:-1], inpt, inpt[-1:-window_len:-1]]\n # w = eval('np.'+window_type+'(window_len)')\n if window_type == 'flat': # moving average\n w = np.ones(window_len, 'd')\n else:\n w = eval('np.' + window_type + '(window_len)')\n\n # create smoothed data via numpy.convolve using the normalized input window matrix\n otpt = np.convolve(w / w.sum(), s, mode='valid')\n\n # format otpt to be same size as inpt and return\n return otpt[int(window_len / 2 -1):-int(window_len / 2)]", "def freq_window(self, startwindow, stopwindow, window=\"hann\"):\n n = self.times.size\n fwindow = _freq_window(self.fs, n, startwindow, stopwindow, window=window)\n new_response = self.from_freq(self.fs, self.in_freq * fwindow)\n\n return new_response", "def track_energy(wave, win_len, hop_len, win):\n\n wave = np.lib.pad(\n wave, pad_width=(win_len-hop_len, 0), mode='constant', constant_values=0\n )\n\n # post padding\n wave = librosa.util.fix_length(\n wave, int(win_len * np.ceil(len(wave) / win_len))\n )\n\n # cut into frames\n wavmat = librosa.util.frame(wave, frame_length=win_len, hop_length=hop_len)\n\n # Envelope follower\n wavmat = hwr(wavmat) ** 0.5 # half-wave rectification + compression\n\n return np.mean((wavmat.T * win), axis=1)", "def read_process_song(path, window=1, overlap=0, debug=True):\n\n arr_features = []\n\n signal, sr = librosa.load(path)\n signal = signal[:660000]\n\n # Debug process\n if debug:\n print(\"Reading file: {}\".format(path))\n\n # Split songs:\n samples = split_songs(signal, window, overlap)\n\n # Append the result to the data structure\n for s in samples:\n features = get_features(s, sr)\n arr_features.append(features)\n return arr_features", "def extract_window_data(df, window_len=30, zero_base=True):\n window_data = []\n for idx in range(len(df) - window_len):\n tmp = df[idx: (idx + window_len)].copy()\n if zero_base:\n tmp = normalise_min_max(tmp)\n window_data.append(tmp.values)\n return np.array(window_data)\n #return window_data", "def extract_window_data(df, window_len=10, zero_base=True):\n window_data = []\n for idx in range(len(df) - window_len):\n tmp = df[idx: (idx + window_len)].copy()\n if zero_base:\n tmp = normalise_zero_base(tmp)\n window_data.append(tmp.values)\n return np.array(window_data)", "def compute_chunk_features(mp3_file):\n # Extract MP3 file to a mono, 10kHz WAV file\n sox_command = \"/usr/local/bin/sox\"\n out_file = 'temp.wav'\n #cmd = sox_command % (out_file, mp3_file)\n temp2 = subprocess.call([sox_command, mp3_file,'-r 10000','-c 1',out_file])\n # Read in chunks of data from WAV file\n wav_data1, wav_data2 = read_wav(out_file)\n # We'll cover how the features are computed in the next section!\n return np.array(features(wav_data1)), np.array(features(wav_data2))", "def compute_chunk_features(mp3_file):\n # On convertit le fichier mp3 en un fichier wav mono, 1avec un sample rate de 10000Hertz: on utilise\n # On utilise l'application sox \"c:/Program Files (x86)/sox-14.4.0/sox\"\n\n sox_command = \"./sox-14.4.0/sox\"\n out_file = 'temp.wav'\n #cmd = sox_command % (out_file, mp3_file)\n temp2 = subprocess.call([sox_command, mp3_file,'-r 10000','-c 1',out_file])\n # Read in chunks of data from WAV file\n wav_data1, wav_data2 = read_wav(out_file)\n # We'll cover how the features are computed in the next section!\n return np.array(features(wav_data1)), np.array(features(wav_data2))" ]
[ "0.5480287", "0.5404212", "0.5282063", "0.50624555", "0.50224143", "0.50093454", "0.4989605", "0.49353826", "0.486388", "0.48465458", "0.4828349", "0.47970074", "0.47777593", "0.47630838", "0.4727671", "0.47226426", "0.4674579", "0.4674468", "0.46724492", "0.46629953", "0.46431035", "0.46397656", "0.4619603", "0.46153444", "0.46127573", "0.46108994", "0.4610209", "0.4599091", "0.45964658", "0.4592125" ]
0.6025671
0
Print percentage of rows that have been processed.
def _print_stat_rows(title,rows_before,rows_after): self.strprint(str(title)+" : Percent of processed rows = %1.2F"\ %(np.abs(rows_before-rows_after)*100/rows_before))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def printProgress(self, percentage):\n #print '%s\\r' % ' '*20, # clean up row\n #print '%3d%% ' % percentage, # ending with comma prevents newline from being appended\n sys.stdout.flush()", "def PrintProgress(self):\n print ' Examined %d nodes, found %d unique...' % (\n self.nodes_examined, self.unique_nodes\n )", "def _print_progress(self):\n if self.current_training_size % 1000 == 0:\n print(self.current_training_size, end='')\n elif self.current_training_size % 100 == 0:\n print('.', end='')", "def report_progress(self):\r\n stats = self.simulation_stats.stats\r\n solutions = len(self.solutions)\r\n round = self.round\r\n scores = stats[round]\r\n best_score = min(scores)\r\n avg_score = sum(scores) / solutions\r\n line = f\"Round: {round}\\t\\tBest Score: {best_score}\\t\\t Average Score: {avg_score}\"\r\n print(line)", "def _log_progress(self):\n self.num_of_requests_in_pipeline += 1\n if self.num_of_requests_in_pipeline % 20 == 0:\n print('-' * 200)\n print(f'DB PIPELINE: {self.num_of_requests_in_pipeline} items wenth though pipeline.')\n print('-' * 200)", "def _progress(self, num_completed_batches, data_loader):\n return '[{}/{} ({:.0f}%)]'.format(num_completed_batches, len(data_loader),\n 100.0 * num_completed_batches / len(data_loader))", "def _print_progress(self):\n \n print 'Completed %d of %d' %(self.progress_id, self.total_work)\n self.progress_id += 1", "def print_progress(done,total):\n \n percent = 100.0*done/(total) \n bar = int(0.2*percent) \n \n sys.stdout.write('\\r')\n sys.stdout.write('[%-20s] %d%%' % ('='*bar, percent))\n sys.stdout.flush()\n \n return", "def _printProgressBar(self, fractionComplete):\n import sys\n nInc = 50\n count = int(nInc * fractionComplete)\n proBar = \"|\"\n for i in range(nInc):\n if i < count:\n proBar += \"-\"\n else:\n proBar += \" \"\n proBar += \"|\"\n print((proBar, int(fractionComplete * 100), \"%\\r\",))\n sys.stdout.flush()\n\n return", "def print_progress(self, info_dict):\n if self.n_print != 0:\n t = info_dict['t']\n if t == 1 or t % self.n_print == 0:\n string = 'Iteration {0}'.format(str(t).rjust(len(str(self.n_iter))))\n string += ' [{0}%]'.format(str(int(t / self.n_iter * 100)).rjust(3))\n print(string)", "def progress_msg(processed, total):\n if total > 1:\n percent = int((float(processed) / total) * 100)\n stderr.write(\"\\r[%d/%d] %d%%\" % (processed, total, percent))\n stderr.flush()", "def percent_processed(self):\n try:\n return (self.pos / self.data_encap.size) * 100.0\n except ZeroDivisionError:\n return 100.0", "def _printProgress(self, progress):\n if not self._quiet:\n sys.stdout.write('\\rWriting store to CSV: [{0:50s}] {1:.2f}% '.format('#' * int(progress * 50.0), progress * 100.0))\n sys.stdout.flush()", "def download_report_hook(count, block_size, total_size):\n percent = int(count * block_size * 100 / total_size)\n print(\"\\r%d%%\" % percent + \" completed\", end=\"\\r\")", "def download_report_hook(count, block_size, total_size):\n percent = int(count * block_size * 100 / total_size)\n print(\"\\r%d%%\" % percent + \" completed\", end=\"\\r\")", "def __show_progress(self, _cur_file_idx, _file_count):\n if (self.__is_show_proegress == False):\n return\n\n if(_file_count == 0):\n raise StandardError('no file found.')\n\n # show progress for each 5% (20 steps)\n digit = math.modf(math.log10(_file_count))[1]\n if(digit < 3):\n print \"prog: [{0}%] {1}/{2}\".format((100 * _cur_file_idx) /_file_count,\n _cur_file_idx, _file_count)\n else:\n digit = digit - 2\n skipstep10 = math.pow(10, digit)\n if ((_cur_file_idx % skipstep10) == 0):\n print \"prog: [{0}%] {1}/{2}\".format((100 * _cur_file_idx) /_file_count,\n _cur_file_idx, _file_count)", "def percentage(self):\n return sum(self.chunk_percentage) / self.total_steps", "def as_percent(self):\n if self.dynamic:\n self._update_db_obj()\n return f\"{(self._db_obj.progress / self._db_obj.total) * 100}%\"", "def _progressBar(self, percent, printEvery=10):\n floor = int(percent)\n sys.stdout.write('\\r' * (floor + 9))\n sys.stdout.write('[')\n sys.stdout.write('=' * (floor/printEvery))\n sys.stdout.write('>] {:02.2f}%'.format(percent))\n sys.stdout.flush()", "def repetitive(df):\r\n total_rows = df.shape[0] \r\n for col in df.columns:\r\n count = df[col].value_counts(dropna=False)\r\n high_percent = (count/total_rows).iloc[0] \r\n if high_percent > 0.95:\r\n print('{0}: {1:.1f}%'.format(col, high_percent*100))\r\n print(count)\r\n print()", "def print_progress(remaining_pairs, G):\n total_pairs = G.number_of_nodes()**2\n uncomputed_pairs = len(remaining_pairs)\n print(1 - uncomputed_pairs/total_pairs)", "def displaySummary(self, dictionnary, lineCount):\n for key in self.summaryDict.keys():\n dictionnary[key] = (dictionnary[key] / lineCount) * 100\n #print(str(key)+\" => \"+str(dictionnary[key])+\" %\")", "def occurance(row):\r\n # divide the row's highest counted cause by the row's total number of deaths\r\n percentage = row['max_count'] / row['all_count']\r\n percentage *= 100\r\n # round the percentage up so it's two digits\r\n return round(percentage)", "def dl_progress(count, block_size, total_size):\n percent = int(count*block_size*100/total_size)\n sys.stdout.write(\"\\r\" + 'Progress:' + \"...%d%%\" % percent)\n sys.stdout.flush()", "def print_results(self) -> None:\n print(\"=\" * 70, file=sys.stderr)\n total = 0.0\n max_points = 0.0\n for problem in self.problems:\n total += problem.run_tests()\n max_points += problem.max_grade\n print(f\"Total Grade: {total}/{max_points}\", file=sys.stderr)", "def _print_progress(self):\n print(\n 'E {} S {} TR {:6.2f} G {:6.2f} Reg {:6.5f} Loss {:6.5f} AvgQ {:6.2f}'\n ' MinR {:6.2f} MaxR {:6.2f}'.format(\n self.episode, self.episode_step, self.tracker.total_reward, self.tracker.discounted_rewards,\n self.reg_loss_val, self.critic_loss_val, self.mean_q_val,\n self.tracker.min_reward, self.tracker.max_reward))", "def update_progress(self, done):\r\n if done % 100 == 0:\r\n print >>sys.stderr, \" %d processed, run time %d secs\" % (done, (datetime.now() - self.started_at).seconds)", "def OnProgress(bytes_read, total_bytes, percent):\n sys.stdout.write(\"progress: %.2f%% \\r\" % (percent))\n sys.stdout.flush()", "def progress(count, total):\r\n bar_len = 45\r\n filled_len = int(round(bar_len * count / float(total)))\r\n\r\n percents = round(100 * count / float(total), 1)\r\n p_bar = '=' * filled_len + '.' * (bar_len - filled_len)\r\n try:\r\n sys.stdout.write(' File {} of {} [{}] {}{}\\r'.format(count, total, p_bar, percents, '%'))\r\n except:\r\n pass\r\n sys.stdout.flush()", "def _percent(self, lines_total, lines_covered):\n\n if lines_total == 0:\n return '0.0'\n return str(float(float(lines_covered) / float(lines_total)))" ]
[ "0.7295432", "0.690456", "0.6810523", "0.65595317", "0.64016294", "0.63650185", "0.63531333", "0.6352935", "0.6349985", "0.6310579", "0.6283685", "0.6247981", "0.6222485", "0.6173231", "0.61398053", "0.61388904", "0.61365205", "0.6132179", "0.61136687", "0.61033106", "0.6093487", "0.60699356", "0.6062022", "0.6045899", "0.6032401", "0.60060966", "0.60033005", "0.5999316", "0.59792435", "0.5973629" ]
0.74636936
0
Remove raws with countries other then 'United Kingdom' then remove Country feature.
def _feature_country_process(self): if 'Country' not in self._df_invoice_line.columns: return list_countries_keep = ['United Kingdom'] rows_before = self._df_invoice_line.shape[0] df_invoice_line_new = pd.DataFrame() for country in list_countries_keep : df_invoice_line_new = df_invoice_line_new.append(\ self._df_invoice_line[self._df_invoice_line['Country']==country]\ , ignore_index=True) self.df_invoice_line = df_invoice_line_new del(df_invoice_line_new) rows_after = self._df_invoice_line.shape[0] _print_stat_rows("Countries filtering : ",rows_before, rows_after) #------------------------------------------------------------------------- # Due to the fact only one country is used, then this feature is dropped #------------------------------------------------------------------------- list_col_to_keep = [col for col in self._df_invoice_line.columns \ if col not in 'Country'] self._df_invoice_line = self._df_invoice_line[list_col_to_keep] return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply_feature_filter(self):\n self.features = set()\n for language in self.data.values():\n features_in_data = set(language.keys())\n features_to_keep = features_in_data & self.feature_filter\n self.features |= features_to_keep\n features_to_remove = features_in_data - features_to_keep\n for feat in features_to_remove:\n language.pop(feat)\n self.features = sorted(list(self.features))", "def clean_iso_country(spark, input_data):\n try:\n #read file\n df_iso_country = spark.read.option(\"header\",\"true\").csv(input_data+'wikipedia-iso-country-codes.csv')\n df = (df_iso_country.withColumnRenamed('English short name lower case','country_name') \\\n .withColumnRenamed('Alpha_2', 'country_iso2') \\\n .withColumnRenamed('Alpha_3', 'country_iso3') \\\n .withColumnRenamed('Num_code','country_num'))\n\n df_clean_iso_country = df_iso_country.drop(\"ISO_3166-2\") \\\n .select(F.col(\"Country\").alias(\"country_name\"), \\\n F.col(\"Alpha_2\").alias(\"country_iso2\"), \\\n F.col(\"Alpha_3\").alias(\"country_iso3\"), \\\n F.col(\"Num_code\").alias(\"country_num\") \\\n .cast(\"int\")) \\\n .dropDuplicates()\n print('***** Make df_clean_iso_country processing ')\n df_clean_iso_country.printSchema()\n #df_clean_iso_country.show(2)\n except Exception as e:\n print(\"Unexpected error: %s\" % e)\n else:\n return(df_clean_iso_country)", "def findCountryCode(self):\n RecordsWithCountry = []\n for state in pycountry.subdivisions:\n #print(state.name)\n for record in self.Records: \n if state.name == record.state:\n #print(state.country, record.state)\n r = RecordCountry(date=record.date,\n country=state.country.alpha_3,\n impressions=record.impressions,\n CTR=record.CTR)\n self.Records.remove(record)\n RecordsWithCountry.append(r)\n for record in self.Records: \n r = RecordCountry(date=record.date,\n country=\"XXX\",\n impressions=record.impressions,\n CTR=record.CTR)\n RecordsWithCountry.append(r)\n self.Records = RecordsWithCountry", "def clean_countries(event_db):\n event_db[\"country_edb\"] = event_db[\"country_edb\"].apply(_clean_country_str)\n event_db = my_utils.split_strings_at_comma_and_distribute_to_new_rows(event_db, 'country_edb')\n return event_db", "def clean_country(raw_country):\n #\n if raw_country[0:2]==\"l'\":\n raw_country = raw_country[2:]\n country = ''.join((c for c in unicodedata.normalize('NFD', raw_country) if unicodedata.category(c) != 'Mn'))\n country = re.sub(r\"(\\s|')\", \"-\", country) # replace space and quotes with dash\n return country", "def mask_foreign_country(column):\n codes = misc_utils.load_country_code()\n # Remove New Zealand from foreign country list\n codes = codes.drop(codes[codes['Alpha-2'] == 'nz'].index)\n # Remove texts in brackets: belgian franc (convertible) -> belgian franc\n codes['Country'] = codes['Country'].replace({r'\\(.*\\)': ''}, regex=True).str.strip()\n regex = list()\n regex.append('|'.join(r'\\s' + codes['Country'] + r'\\b'))\n # Don't use Alpha-2 and Alpha-3 since there are lots of misreplacement\n # regex.append('|'.join(r'\\s' + codes['Alpha-2'] + r'\\b'))\n # regex.append('|'.join(r'\\s' + codes['Alpha-3'] + r'\\b'))\n regex_str = '|'.join(regex)\n column = column.replace(regex_str, ' $FOREIGN_COUNTRY ', regex=True)\n return column", "def clean_data():\n datapath = Path(os.getcwd()) / \"data\"\n files = [str(file) for file in datapath.glob(\"*.csv\")]\n for file in files:\n if file.endswith(\"confirmed.csv\"):\n Confirmed = pd.read_csv(file)\n elif file.endswith(\"deaths.csv\"):\n Deaths = pd.read_csv(file)\n elif file.endswith(\"recovered.csv\"):\n Recovered = pd.read_csv(file)\n\n dataFrames = [Confirmed, Deaths, Recovered]\n countryList = list(dataFrames[0][\"Country/Region\"]) #list of valid countries\n countryList = list(dict.fromkeys(countryList))\n\n #create country population dictionary and align values with those in countryList\n countriesPop = {}\n countriesPop[\"US\"] = CountryInfo(\"USA\").population()\n countriesPop[\"Czechia\"] = CountryInfo(\"Czech Republic\").population()\n countriesPop[\"Taiwan*\"] = CountryInfo(\"Taiwan\").population()\n countriesPop[\"Korea, South\"] = CountryInfo(\"South Korea\").population()\n countriesPop[\"Eswatini\"] = CountryInfo(\"Swaziland\").population()\n countriesPop[\"Cote d'Ivoire\"] = CountryInfo(\"Ivory Coast\").population()\n\n for country in countryList:\n try:\n countriesPop[country] = CountryInfo(country).population()\n except KeyError:\n pass\n\n #remove unnecessary information from dataframes\n for count in range(len(dataFrames)):\n dataFrames[count] = dataFrames[count].drop(\"Province/State\",axis=1)\n dataFrames[count] = dataFrames[count].drop(\"Lat\",axis=1)\n dataFrames[count] = dataFrames[count].drop(\"Long\",axis=1)\n dataFrames[count] = dataFrames[count].rename(columns={\"Country/Region\": \"Country\"})\n dataFrames[count][\"Country\"] = dataFrames[count][\"Country\"].replace({\"Korea, South\": \"South Korea\"})\n dataFrames[count] = dataFrames[count].groupby(\"Country\").sum()\n\n # create per 100k capita values by dividing country data by population\n ConfirmedPC = dataFrames[0].copy()\n DeathsPC = dataFrames[1].copy()\n RecoveredPC = dataFrames[2].copy()\n countryList.append(\"South Korea\")\n\n for country in countryList:\n try:\n ConfirmedPC.loc[country] = ConfirmedPC.loc[country].divide(countriesPop[country]).multiply(100000) #confirmed cases per 100k inhabitants\n DeathsPC.loc[country] = DeathsPC.loc[country].divide(countriesPop[country]).multiply(100000) #deaths per 100k inhabitants\n RecoveredPC.loc[country] = RecoveredPC.loc[country].divide(countriesPop[country]).multiply(100000) #recovered cases per 100k inhabitants\n except KeyError:\n pass\n\n dataFrames.extend([ConfirmedPC, DeathsPC, RecoveredPC])\n\n return dataFrames, countryList", "def build_feature_filter(self):\n if self.features == [\"*\"]:\n random_iso = list(self.data.keys())[0]\n self.features = set()\n for lang_features in self.data.values():\n self.features |= set(lang_features.keys())\n self.features = list(self.features)\n if self.exclusions:\n self.features = [f for f in self.features if f not in self.exclusions]\n self.feature_filter = set(self.features)", "def discard(self):\n for f in self.featureNames:\n self.data = self.data[self.data[:,self._getFIdx(f)] != '-99999']\n return", "def clean_information(data):\n\n\t# create a list dict\n\tcountries = []\n\t\n\t# open csv file\n\twith open('input.csv') as csvfile:\n\n\t\t# read in file as dictionary\n\t\tdatareader = csv.DictReader(csvfile)\n\n\t\t# for every row in data reader\n\t\tfor row in datareader:\n\n\t\t\t# create space for a dictionary\n\t\t\tdictionary = {}\n\n\t\t\t# if value is unknown go to next country\n\t\t\tif row['Pop. Density (per sq. mi.)'] == 'unknown':\n\t\t\t\tcontinue\n\n\t\t\tif row['GDP ($ per capita) dollars'] == 'unknown':\n\t\t\t\tcontinue\n\n\t\t\t# if no value go to next country\n\t\t\tif not row['Pop. Density (per sq. mi.)']:\n\t\t\t\tcontinue\n\n\t\t\t# if no value go to next country\t\n\t\t\tif not row['Infant mortality (per 1000 births)']:\n\t\t\t\tcontinue\n\n\t\t\t# if no value go to next country\n\t\t\tif not row['GDP ($ per capita) dollars']:\n\t\t\t\tcontinue\n\n\t\t\t# find country and strip for white space\n\t\t\tdictionary['Country'] = row['Country'].rstrip()\n\n\t\t\t# get region and put it in a dictionary\n\t\t\tdictionary['Region'] = row['Region'].rstrip()\n\n\t\t\t# add population density to dictionary\n\t\t\tdictionary['Pop. Density (per sq. mi.)'] = row['Pop. Density (per sq. mi.)']\n\n\t\t\t# add infant mortality to dictionary\n\t\t\tdictionary['Infant mortality (per 1000 births)'] = row['Infant mortality (per 1000 births)']\n\n\t\t\t# add GDP per capita to dictionary and keep only numbers\n\t\t\tdictionary['GDP ($ per capita) dollars'] = row['GDP ($ per capita) dollars'].split()[0]\n\n\t\t\t# append everything to a list\n\t\t\tcountries.append(dictionary)\n\n\t\treturn countries", "def remove_redundant_regions(self):\r\n self.flanking_region.attributes.id = self._flanking_region.attributes.id\r\n self.flanking_region.attributes.parent = ''\r\n for feature in self.pcr_product:\r\n feature.attributes.id = feature.attributes.parent\r\n feature.attributes.parent = ''\r\n self._flanking_region = None\r\n self.gt_seq_region = []\r\n if self.pcr_product:\r\n snp_parent = self.pcr_product[0].attributes.id\r\n else:\r\n snp_parent = self.flanking_region.attributes.id\r\n for snp in self.snp:\r\n snp.attributes.parent = snp_parent", "def populate_countries(self):\n # For each country in population.\n for name, pop in self.population.iterrows():\n p = pop['Population']\n # Get all relevant time series based on country name.\n c = self.raw_confirmed.loc[self.raw_confirmed['Country/Region'] == name].sum(numeric_only=True)\n d = self.raw_deceased.loc[self.raw_deceased['Country/Region'] == name].sum(numeric_only=True)\n r = self.raw_recovered.loc[self.raw_recovered['Country/Region'] == name].sum(numeric_only=True)\n # Create new country object.\n self.countries.append(country.Country(name, p, c, d, r))", "def trim_features():\n pass", "def country_code_update(df):\n from pycountry import countries as ct\n new_df = country_grouping(df)\n # country names in the data set that are not fit ISO standard\n completion = pd.DataFrame(np.array([['Bolivia', 'BO'],\n ['Brunei', 'BN'],\n ['Congo (Brazzaville)', 'CG'],\n ['Congo (Kinshasa)', 'CD'],\n ['Cote d\\'Ivoire', 'CI'],\n ['Holy See', 'VA'],\n ['Iran', 'IR'],\n ['Korea, South', 'KR'],\n ['Moldova', 'MD'],\n ['Russia', 'RU'],\n ['Taiwan*', 'TW'],\n ['Tanzania', 'TZ'],\n ['US', 'US'],\n ['Venezuela', 'VE'],\n ['Vietnam', 'VN'],\n ['Syria', 'SY'],\n ['Laos', 'LA'],\n ['West Bank and Gaza', 'PS'],\n ['Kosovo', 'XK'],\n ['Burma', 'MM']\n ]),\n columns=['c_name', 'c_code']\n )\n country_code_list = []\n for country_name in new_df['Country/Region']:\n try:\n if country_name in completion['c_name'].tolist():\n # print('exception covered: ', country_name)\n country_code = completion['c_code'].loc[completion['c_name'] == country_name].item()\n # identifies the cruise ships in the data set considered as a 'country'\n elif country_name == 'Diamond Princess' or country_name == 'MS Zaandam':\n country_code = 'Cruise Ship'\n else:\n country_code = ct.get(name=country_name).alpha_2\n except KeyError:\n print('no result: ', country_name)\n country_code = 'None'\n pass\n country_code_list.append(country_code)\n # print(country_code_list)\n new_df.insert(0, \"country_code\", country_code_list, True)\n new_df = new_df.drop(columns='Country/Region')\n unknown_index = new_df[new_df['country_code'] == 'Cruise Ship'].index\n new_df.drop(unknown_index, inplace=True) # drop when country_code = 'None', most likely are Cruise ships\n # new_df.set_index(new_df['country_code'])\n return new_df", "def clean_data(df, start = 1995, stop = 2018):\n country_iso3_code = pd.read_html('https://unstats.un.org/unsd/methodology/m49/')\n country_iso3_code = country_iso3_code[0]['ISO-alpha3 code']\n df = df.loc[df.country_iso3_code.isin(country_iso3_code)]\n df = df.set_index(['indicator', 'country_iso3_code', 'country', 'year']).unstack(level = 0)\n df.columns = df.columns.get_level_values(1)\n df = df.rename(columns = {'NY.GDP.PCAP.KD.ZG': 'pc_GDP_growth',\n 'NY.GDP.PCAP.PP.CD': 'pc_GDP_PPP'})\n df = df.reset_index()\n df = df.loc[(df.year >= (start - 1)) & (df.year <= stop)]\n df = df.dropna()\n return df", "def get_China_exhubei(df) -> pandas.core.frame.DataFrame:\n return df[(df['countryCode']=='CN') & (df['province']!='Hubei Province') & ~(df['province'].isnull()) \\\n & ~(df['city'].isnull())]", "def test_country_name_not_in_countries(self):\n\t\tcountry_code = get_country_code('Venezuela, RB')\n\t\tself.assertEqual(country_code, 've')", "def remove_empty_sources(self):\n for source in [\"dxf\", \"edilizia\", \"easyroom\", \"merged\"]:\n if source in self and not self[source]:\n del self[source]", "def process_country_shapes():\n path_processed = os.path.join(\n SHAPEFILE_DIR, 'national_outline_{}.shp'.format(COUNTRY_ABBRV))\n\n single_country = None\n if not os.path.exists(path_processed):\n print('Working on national outline')\n path_raw = os.path.join(BASE_DIR, 'data', 'gadm36_levels_shp', 'gadm36_0.shp')\n countries = geopandas.read_file(path_raw)\n\n for name in countries.GID_0.unique():\n if not name == COUNTRY_ABBRV:\n continue\n\n print('Working on {}'.format(name))\n single_country = countries[countries.GID_0 == name]\n\n print('Excluding small shapes')\n single_country['geometry'] = single_country.apply(\n exclude_small_shapes,axis=1)\n\n print('Simplifying geometries')\n single_country['geometry'] = single_country.simplify(\n tolerance = 0.005, preserve_topology=True\n ).buffer(0.01).simplify(tolerance = 0.005,\n preserve_topology=True)\n\n print('Writing national outline to file')\n single_country.to_file(path_processed, driver='ESRI Shapefile')\n found = True\n break\n \n if not found:\n raise ValueError(f'country abbrv {COUNTRY_ABBRV} does not exist')\n\n else:\n single_country = geopandas.read_file(path_processed)\n\n return single_country", "def prune_features(self, verbose=False):\n # Collect all features and prune those occurring only once.\n features = defaultdict(int)\n for k in self.utterance_features:\n for f in self.utterance_features[k]:\n features[f] += 1\n\n if verbose:\n print \"Total number of features: \", len(features)\n\n self.remove_features = []\n for k in features:\n if features[k] <= 2:\n self.remove_features.append(k)\n\n if verbose:\n print \"Number of unique features: \", len(self.remove_features)\n\n self.remove_features = set(self.remove_features)\n for k in self.utterance_features:\n self.utterance_features[k].prune(self.remove_features)\n\n features = defaultdict(int)\n for k in self.utterance_features:\n for f in self.utterance_features[k]:\n features[f] += 1\n\n if verbose:\n print \"Total number of features: \", len(features)", "def remove_city(g, city_name):\n code = g.convert[city_name]\n \n for key in g.city_dict:\n \n old_flights_in = g.city_dict[key].get_flights_in()\n new_flights_in = []\n for flight in old_flights_in:\n if(flight[0] != code):\n new_flights_in.append(flight)\n \n old_flights_out = g.city_dict[key].get_flights_out()\n new_flights_out = []\n for flight in old_flights_out:\n if(flight[0] != code):\n new_flights_out.append(flight)\n \n g.city_dict[key].set_flights_in(new_flights_in)\n g.city_dict[key].set_flights_out(new_flights_out)\n \n del g.city_dict[code]\n del g.convert[city_name]\n \n return g", "def clean_and_save_country(country_name, df):\n drop_columns = ['Lat', \n 'Long', \n 'Province/State']\n\n df.drop(columns=drop_columns, inplace = True)\n df_group = df.groupby(['Country/Region'])\n\n country = df_group.get_group(country_name)\n country.drop(columns = ['Country/Region'], inplace=True)\n country = country.agg(['sum'])\n country = country.T\n country.reset_index(level=0, inplace=True)\n country['index'] = pd.to_datetime(country['index'])\n country.rename(columns={'index': 'date'}, inplace=True)\n\n country.to_csv('../data/' + country_name + '_timeseries.csv', index=False)", "def load_country_code_data():\n name_conversion = {\n 'East Timor': 'Timor-Leste',\n 'Republic of the Congo': 'Congo (Kinshasa)',\n 'Ivory Coast': 'Cote d\\'Ivoire',\n 'Macedonia': 'North Macedonia',\n 'Myanmar': 'Burma',\n 'Republic of Serbia': 'Serbia',\n 'Taiwan': 'Taiwan*',\n 'The Bahamas': 'Bahamas',\n 'United Republic of Tanzania': 'Tanzania',\n 'United States of America': 'US'\n }\n\n shapefile = os.path.join('data', 'ne_110m_admin_0_countries.shp')\n\n gdf = gpd.read_file(shapefile)[['ADMIN', 'ADM0_A3', 'geometry']]\n gdf.columns = ['country', 'country_code', 'geometry']\n\n gdf.loc[gdf['country'].isin(name_conversion.keys()), 'country'] = gdf['country'].map(name_conversion)\n\n return gdf", "def clean_and_save_worldwide(df):\n drop_columns = ['FIPS',\n 'Lat', \n 'Long_', \n 'Combined_Key', \n 'Admin2', \n 'Province_State']\n\n df.drop(columns=drop_columns, inplace=True)\n\n df_cases = df.groupby(['Country_Region'], as_index=False).sum()\n df_cases.to_csv('../data/Total_cases_worldwide.csv', index=False)", "def exclude_small_shapes(x,regionalized=False):\n # if its a single polygon, just return the polygon geometry\n if x.geometry.geom_type == 'Polygon':\n return x.geometry\n\n # if its a multipolygon, we start trying to simplify and\n # remove shapes if its too big.\n elif x.geometry.geom_type == 'MultiPolygon':\n\n if regionalized == False:\n area1 = 0.1\n area2 = 250\n\n elif regionalized == True:\n area1 = 0.01\n area2 = 50\n\n # dont remove shapes if total area is already very small\n if x.geometry.area < area1:\n return x.geometry\n # remove bigger shapes if country is really big\n\n if x['GID_0'] in ['CHL','IDN']:\n threshold = 0.01\n elif x['GID_0'] in ['RUS','GRL','CAN','USA']:\n if regionalized == True:\n threshold = 0.01\n else:\n threshold = 0.01\n\n elif x.geometry.area > area2:\n threshold = 0.1\n else:\n threshold = 0.001\n\n # save remaining polygons as new multipolygon for the\n # specific country\n new_geom = []\n for y in x.geometry:\n if y.area > threshold:\n new_geom.append(y)\n\n return MultiPolygon(new_geom)", "def getUniqCountries(df):\n # getting a unique list of the countries\n countries_uniq_list = df.geo_country.unique().tolist()\n # delete the \"nan\"\n del countries_uniq_list[0]\n # sort list alphabatically\n return sorted(countries_uniq_list)", "def removeHindiNames(data):\n\thi = r\"\\w+:hi$\"\n\tfor each in data:\n\t\tk = list(filter(lambda e: re.findall(hi, e) != [], each['k']))\n\t\tif k:\n\t\t\tfor e in k:\n\t\t\t\tindex = each['k'].index(e)\n\t\t\t\teach['k'].pop(index)\n\t\t\t\teach['v'].pop(index)\n\t\tyield each", "def test_countries():\n test_path = tempfile.mkdtemp()\n x_train, metadata = countries(test_path)\n try:\n assert x_train.shape == (288, 3)\n except:\n shutil.rmtree(test_path)\n raise()", "def data_clean(countryfile, folder):\n with open(r\"C:\\Users\\User\\Documents\\LUCAS2015_spectra\\LUCAS2015_Soil_Spectra_EU28\\spectra_ \" + countryfile + \" .csv\") as f:\n # maakt csv reader aan\n reader = csv.reader(f)\n # Open\n with open(folder + r\"\\spectra_ \" + countryfile + \" .csv\", 'w', newline='') as file:\n writer = csv.writer(file)\n for c, row in enumerate(reader):\n if c == 0:\n writer.writerow(row[:5] + row[205:-200:2])\n else:\n x = np.array(row[205:-200:2], dtype='float64')\n reflectance = 10 ** (-x)\n writer.writerow(row[:5] + list(reflectance))", "def removeLanguage(language):" ]
[ "0.59036386", "0.5863824", "0.5825038", "0.5797917", "0.5704074", "0.5514831", "0.55131525", "0.5411934", "0.5399952", "0.53786814", "0.53708786", "0.53605676", "0.5328933", "0.53094655", "0.51719415", "0.5159425", "0.51442134", "0.5126341", "0.51245403", "0.50806504", "0.50648564", "0.50499237", "0.5031425", "0.50312304", "0.5019079", "0.4963156", "0.49614692", "0.49172637", "0.49153462", "0.49069163" ]
0.70100856
0
Builds features issued from InvoiceDate. A dataframe is built per new feature and dumped into a file. Each one of the dataframe have encoded features issues from InvoiceDate.
def data_transform_timeFeature(self): #------------------------------------------------------------------------- # All new features are built into separate dataframes # and each of them are dumped into a separate file. #------------------------------------------------------------------------- self.strprint("self.df_invoice_line : "+str(self.df_invoice_line.shape)) self._dict_timeFeature_encoder, df_customers_timeFeature \ = p5_util.time_list_feature_build(self.df_invoice_line\ , self._list_new_feature, dict_encoder = self._dict_timeFeature_encoder\ ,is_verbose=self.is_verbose) #------------------------------------------------------------------------- # New time features are aggregated into a single dataframe. # Values are scaled. #------------------------------------------------------------------------- df_customers_timeFeature, self._std_scaler_timeFeature \ = p5_util.time_list_feature_restore(self._list_new_feature \ , std_scale = self._std_scaler_timeFeature\ , df_timeFeature = df_customers_timeFeature, is_verbose = self.is_verbose) self.strprint("df_customers_timeFeature : "+str(df_customers_timeFeature.shape)) #------------------------------------------------------------------------- # Dimension reduction thanks to PCA #------------------------------------------------------------------------- n_dim=30 root_name = 'time_pca_' # Column CustomerID is used into df_pca_reduce df_customers_timeFeature['CustomerID'] = df_customers_timeFeature.index df_customers_timeFeature, pca_timeFeature \ = p5_util.df_pca_reduce(df_customers_timeFeature, n_dim, root_name\ , p_is_scale=False, pca = self._pca_timeFeature) self.strprint(df_customers_timeFeature.shape) if self._pca_timeFeature is None: #---------------------------------------------------------------------- # Data-model is in built process with part of data-set. #---------------------------------------------------------------------- self._pca_timeFeature = pca_timeFeature p5_util.object_dump(df_customers_timeFeature\ , self._df_customers_timeFeature_fileName) else: #---------------------------------------------------------------------- # Data-model is already built and this method is called # for a customer classification. #---------------------------------------------------------------------- self._df_customers_timeFeature = df_customers_timeFeature.copy() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_date_features(df = None, date = None):\n #TODO", "def data_transform_rfm(self) :\n \n is_built_step = False\n if self._encoder_rfm is None:\n is_built_step = True \n \n #-------------------------------------------------------------------------\n # RFM feature is built\n #-------------------------------------------------------------------------\n ser_invoice_date = self._df_invoice_line.InvoiceDate\n \n self.df_invoice_line, df_RFM, self.df_RFM_quantiles, self._day_now \\\n = p5_util.p5_df_rfm_build(self.df_invoice_line, day_now = self._day_now\\\n , df_RFM_threshold=self.df_RFM_quantiles)\n \n self._df_invoice_line.InvoiceDate = ser_invoice_date\n \n #-------------------------------------------------------------------------\n # RFM score is added to dataframe\n #-------------------------------------------------------------------------\n df_merged = pd.merge(self.df_invoice_line\\\n , df_RFM[['CustomerID','RFM']], how='left', on=['CustomerID'])\n\n self._df_invoice_line \\\n = pd.DataFrame(df_merged.values, index = self._df_invoice_line.index\\\n , columns=df_merged.columns)\n \n\n #self._df_invoice_line \\\n #= pd.concat([ self.df_invoice_line,df_RFM[['CustomerID','RFM']] ], axis=1\\\n #,join='inner')\n \n \n #-------------------------------------------------------------------------\n # RFM encoding\n #-------------------------------------------------------------------------\n self._encoder_rfm, df_RFM_encoded \\\n = p5_util.df_rfm_one_hot_encode(df_RFM,'RFM', encoder=self._encoder_rfm)\n\n #-------------------------------------------------------------------------\n # Encoded RFM features are renamed\n #-------------------------------------------------------------------------\n df_customers_rfm, list_col_unchanged \\\n = p5_util.df_rename_columns(df_RFM_encoded, df_RFM_encoded.columns\\\n , 'w_rfm_')\n \n self.strprint(\"df_customers_rfm =\" +str(df_customers_rfm.shape))\n\n #-------------------------------------------------------------------------\n # dataframe with RFM encoded values per customer is dumped\n #-------------------------------------------------------------------------\n if is_built_step is True:\n p5_util.object_dump(df_customers_rfm, self.df_customers_rfm_fileName)\n else :\n self._df_customers_rfm = df_customers_rfm.copy()\n return", "def df_customers_features_build(self):\n\n df_customers_rfm = self._df_customers_rfm.copy()\n df_customers_timeFeature = self._df_customers_timeFeature.copy()\n df_customers_nlp = self._df_customers_pca_nlp.copy()\n\n #-------------------------------------------------------------------------\n # Dataframe are aggregated; note that indexes are customerID.\n #-------------------------------------------------------------------------\n df_customers = pd.DataFrame()\n\n df_customers = pd.concat([df_customers,df_customers_rfm], axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_timeFeature]\\\n , join='inner', axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_nlp]\\\n , join='inner', axis=1)\n \n self.strprint(\"All features : \"+str(df_customers.shape))\n self._df_customers = df_customers.copy()\n return", "def features_past_generation(features_creation_function,\n days,\n feature_names_prefix,\n data,\n indices):\n matches_outcomes=[]\n for i,match_indice in enumerate(indices):\n match=data.iloc[match_indice,:]\n past_matches=data[(data.Date<match.Date)&(data.Date>=match.Date-datetime.timedelta(days=days))]\n match_features_outcome_1=features_creation_function(1,match,past_matches)\n match_features_outcome_2=features_creation_function(2,match,past_matches)\n matches_outcomes.append(match_features_outcome_1)\n matches_outcomes.append(match_features_outcome_2)\n if i%100==0:\n print(str(i)+\"/\"+str(len(indices))+\" matches treated. \"+ features_creation_function.__name__ + str(days))\n train=pd.DataFrame(matches_outcomes)\n train.columns=[feature_names_prefix + \"_\" + str(days) +\"_\" +str(i) for i in range(len(train.columns))]\n \n \n \n return train", "def data_process_deprecated(self, CustomerID, InvoiceDate, InvoiceNo, Description, Quantity\\\n , UnitPrice ):\n dict_invoice = {'InvoiceDate':InvoiceDate, 'Description':Description\\\n , 'Quantity':Quantity, 'UnitPrice':UnitPrice}\n dict_invoice['CustomerID'] = CustomerID\n dict_invoice['InvoiceNo'] = InvoiceNo\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys(), index=[0])\n \n self.data_transform(df_invoice_line)\n\n #self.feature_rfm_encode()\n\n self.feature_scale()\n\n self.list_feature_drop()\n\n self.feature_description_nlp()\n return", "def data_transform(self, df) :\n\n #-------------------------------------------------------------------------\n # Copy of given dataframe to be transformed\n #-------------------------------------------------------------------------\n self.df_invoice_line = df\n \n #-------------------------------------------------------------------------\n # Features issued from InvoiceDate are created\n #-------------------------------------------------------------------------\n if self.is_transform_timeFeature is True:\n self.strprint(\"\\n*** Time features transformation ***\")\n self.data_transform_timeFeature()\n\n #-------------------------------------------------------------------------\n # RFM is computed and encoded\n #-------------------------------------------------------------------------\n if self.is_transform_rfm is True:\n self.strprint(\"\\n*** RFM transformation ***\")\n self.data_transform_rfm()\n\n #-------------------------------------------------------------------------\n # NLP features issued from Description are created\n #-------------------------------------------------------------------------\n if self.is_transform_nlp is True:\n self.strprint(\"\\n*** NLP transformation ***\")\n self.data_transform_nlp()\n \n return self.df_invoice_line", "def make_features(self, x_hits, y_hits, dow, lagged_hits, pf_age, pf_si, pf_network, pf_gender, page_ix, pf_price_cat,\n page_popularity, quarter_autocorr):\n # Split day of week to train and test\n x_dow, y_dow = tf.split(dow, [self.train_window, self.predict_window], axis=0)\n\n # Normalize hits\n mean = tf.reduce_mean(x_hits)\n std = tf.sqrt(tf.reduce_mean(tf.squared_difference(x_hits, mean)))\n norm_x_hits = (x_hits - mean) / std\n norm_y_hits = (y_hits - mean) / std\n norm_lagged_hits = (lagged_hits - mean) / std\n\n # Split lagged hits to train and test\n x_lagged, y_lagged = tf.split(norm_lagged_hits, [self.train_window, self.predict_window], axis=0)\n\n # Combine all page features into single tensor\n stacked_features = tf.stack([page_popularity, quarter_autocorr])\n flat_ucdoc_features = tf.concat([pf_age, pf_si, pf_network, pf_gender, pf_price_cat, stacked_features], axis=0) #pf_region\n ucdoc_features = tf.expand_dims(flat_ucdoc_features, 0)\n\n # Train features\n x_features = tf.concat([\n # [n_days] -> [n_days, 1]\n tf.expand_dims(norm_x_hits, -1),\n x_dow,\n x_lagged,\n # Stretch ucdoc_features to all training days\n # [1, features] -> [n_days, features]\n tf.tile(ucdoc_features, [self.train_window, 1])\n ], axis=1)\n\n # Test features\n y_features = tf.concat([\n # [n_days] -> [n_days, 1]\n y_dow,\n y_lagged,\n # Stretch ucdoc_features to all testing days\n # [1, features] -> [n_days, features]\n tf.tile(ucdoc_features, [self.predict_window, 1])\n ], axis=1)\n\n return x_hits, x_features, norm_x_hits, x_lagged, y_hits, y_features, norm_y_hits, mean, std, flat_ucdoc_features, page_ix", "def new_features(df):\n print(\"Add new features ...\")\n # distinguish Spring, Fall and pregnant females (don't care about juvenilles/unknown)\n df[\"gender_plus\"] = df[\"Gender\"]\n df.loc[df.Gravid, \"gender_plus\"] = \"f_gra\"\n\n df[\"gender_seasons\"] = df[\"Gender\"]\n df.loc[df.Gravid, \"gender_seasons\"] = \"f_gra\"\n\n # add features\n df[\"Age_To_Weight\"] = df[\"Annuli\"] / df[\"Weight\"]\n\n # Calcuate Number of recaptures\n df_captures = df[[\"ID\", \"Date\"]].groupby(\"ID\").count()\n df_captures.columns = [\"recapture_count\"]\n df_captures.reset_index(inplace=True)\n df = pd.merge(df, df_captures, how=\"outer\", on=\"ID\")\n\n # recalculate annuli\n df_min = pd.pivot_table(\n df[df.Annuli > 0],\n values=[\"Date\", \"Annuli\"],\n index=[\"ID\"],\n aggfunc={\"Date\": min, \"Annuli\": min},\n )\n df_min.columns = [\"annuli_min\", \"date_min\"]\n df_min.reset_index(inplace=True)\n\n df = pd.merge(df, df_min, how=\"outer\", on=\"ID\")\n df[\"year\"] = df.Date.map(lambda x: x.year)\n df[\"year_min\"] = df.date_min.map(lambda x: x.year)\n df[\"Annuli_orig\"] = df.Annuli\n df.Annuli = df.year - df.year_min + df.annuli_min\n df.Annuli = np.nan_to_num(df.Annuli)\n df[\"Annuli\"] = pd.to_numeric(df[\"Annuli\"], downcast=\"integer\")\n\n # Annuli Buckets\n buckets = 5\n interval = int(df[\"Annuli\"].max() / buckets)\n buckets = [i for i in range(0, df[\"Annuli\"].max() + interval, interval)]\n labels = [\"'{0} - {1}'\".format(i, i + interval) for i in buckets]\n df[\"Annuli_Group\"] = pd.cut(\n df.Annuli, buckets, labels=labels[:-1], include_lowest=True\n )\n\n return df", "def create_features(energy_data, label=None):\n energy_data['date'] = energy_data.index\n energy_data['hour'] = energy_data['Datetime'].dt.hour\n energy_data['dayofweek'] = energy_data['Datetime'].dt.dayofweek\n energy_data['month'] = energy_data['Datetime'].dt.month\n energy_data['quarter'] = energy_data['Datetime'].dt.quarter\n energy_data['year'] = energy_data['Datetime'].dt.year\n energy_data['dayofyear'] = energy_data['Datetime'].dt.dayofyear\n energy_data['dayofmonth'] = energy_data['Datetime'].dt.day\n energy_data['weekofyear'] = energy_data['Datetime'].dt.weekofyear\n energy_data['pjme_2_hrs_lag'] = energy_data['PJME_MW'].shift(2)\n energy_data['pjme_4_hrs_lag'] = energy_data['PJME_MW'].shift(4)\n energy_data['pjme_8_hrs_lag'] = energy_data['PJME_MW'].shift(8)\n energy_data['pjme_12_hrs_lag'] = energy_data['PJME_MW'].shift(12)\n energy_data['pjme_24_hrs_lag'] = energy_data['PJME_MW'].shift(24)\n energy_data['pjme_4_hrs_mean'] = energy_data['PJME_MW'].rolling(window=4).mean()\n energy_data['pjme_8_hrs_mean'] = energy_data['PJME_MW'].rolling(window=8).mean()\n energy_data['pjme_12_hrs_mean'] = energy_data['PJME_MW'].rolling(window=12).mean()\n energy_data['pjme_24_hrs_mean'] = energy_data['PJME_MW'].rolling(window=24).mean()\n energy_data['pjme_4_hrs_std'] = energy_data['PJME_MW'].rolling(window=4).std()\n energy_data['pjme_8_hrs_std'] = energy_data['PJME_MW'].rolling(window=8).std()\n energy_data['pjme_12_hrs_std'] = energy_data['PJME_MW'].rolling(window=12).std()\n energy_data['pjme_24_hrs_std'] = energy_data['PJME_MW'].rolling(window=24).std()\n energy_data['pjme_4_hrs_max'] = energy_data['PJME_MW'].rolling(window=4).max()\n energy_data['pjme_8_hrs_max'] = energy_data['PJME_MW'].rolling(window=8).max()\n energy_data['pjme_12_hrs_max'] = energy_data['PJME_MW'].rolling(window=12).max()\n energy_data['pjme_24_hrs_max'] = energy_data['PJME_MW'].rolling(window=24).max()\n energy_data['pjme_4_hrs_min'] = energy_data['PJME_MW'].rolling(window=4).min()\n energy_data['pjme_8_hrs_min'] = energy_data['PJME_MW'].rolling(window=8).min()\n energy_data['pjme_12_hrs_min'] = energy_data['PJME_MW'].rolling(window=12).min()\n energy_data['pjme_24_hrs_min'] = energy_data['PJME_MW'].rolling(window=24).min()\n\n features = energy_data[['hour', 'dayofweek', 'quarter', 'month', 'year',\n 'dayofyear', 'dayofmonth', 'weekofyear', 'pjme_2_hrs_lag', 'pjme_4_hrs_lag',\n 'pjme_8_hrs_lag', 'pjme_12_hrs_lag', 'pjme_24_hrs_lag', 'pjme_4_hrs_mean',\n \"pjme_8_hrs_mean\", \"pjme_12_hrs_mean\", \"pjme_24_hrs_mean\", \"pjme_4_hrs_std\",\n \"pjme_8_hrs_std\", \"pjme_12_hrs_std\", \"pjme_24_hrs_std\",\n \"pjme_4_hrs_max\", \"pjme_8_hrs_max\", \"pjme_12_hrs_max\", \"pjme_24_hrs_max\",\n \"pjme_4_hrs_min\", \"pjme_8_hrs_min\", \"pjme_12_hrs_min\", \"pjme_24_hrs_min\"]]\n if label:\n label = energy_data[label]\n return features, label\n return features", "def df_customers_fileRead(self):\n \n #-------------------------------------------------------------------------\n # RFM features are restored\n #-------------------------------------------------------------------------\n df_customers_rfm \\\n = p5_util.object_load(self.df_customers_rfm_fileName)\n self.strprint(\"RFM features : \"+str(df_customers_rfm.shape))\n \n #-------------------------------------------------------------------------\n # Time features are restored\n #-------------------------------------------------------------------------\n df_customers_timeFeature \\\n = p5_util.object_load(self._df_customers_timeFeature_fileName)\n self.strprint(\"Time features : \"+str(df_customers_timeFeature.shape))\n \n #-------------------------------------------------------------------------\n # NLP features are restored\n #-------------------------------------------------------------------------\n df_customers_nlp = p5_util.object_load(self._df_customers_nlp_fileName)\n self.strprint(\"NLP features : \"+str(df_customers_nlp.shape))\n\n if False:\n df_customers_rfm = self._df_customers_rfm.copy()\n df_customers_timeFeature = self._df_customers_timeFeature.copy()\n df_customers_nlp = self._df_customers_pca_nlp.copy()\n\n #-------------------------------------------------------------------------\n # Dataframe are aggregated; note that indexes are customerID.\n #-------------------------------------------------------------------------\n df_customers = pd.DataFrame()\n\n df_customers = pd.concat([df_customers,df_customers_rfm], axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_timeFeature]\\\n , join='inner', axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_nlp]\\\n , join='inner', axis=1)\n\n self.strprint(\"All features : \"+str(df_customers.shape))\n\n #----------------------------------------------------------------------\n # Dataframe is dumped into a file\n #----------------------------------------------------------------------\n p5_util.object_dump(df_customers, self._df_customers_fileName)\n if False:\n #----------------------------------------------------------------------\n # Dataframe is copied as an attribute\n #----------------------------------------------------------------------\n self._df_customers = df_customers.copy()\n \n return", "def generate_data(filename_in, filename_out):\n file_in = open(filename_in, 'r')\n file_out = open(filename_out, 'w+')\n\n df = pd.read_csv(file_in, header=None, sep=' ', quoting=csv.QUOTE_NONE)\n x = df.iloc[:, 0].values\n y_class = df.iloc[:, -1].values\n file_in.close()\n\n y_class = np.where(y_class == 'O', 0, 1)\n\n x_features = []\n size_x = len(x)\n for i in range(3, size_x):\n if i % 5000 == 0:\n print(i, \"/\", size_x)\n x_features.append(features(x[i-2], x[i-1], x[i], y_class[i]))\n\n df_write = pd.DataFrame(x_features)\n\n tab = [x for x in range(1, NUMBER_OF_FEATURE + 2)]\n df_write.columns = tab\n write_csv(df_write, file_out)\n file_out.close()", "def create_features(\r\n df:pd.DataFrame,\r\n path_data_dir:str\r\n ) -> pd.DataFrame:\r\n # Check input.\r\n # Copy dataframe to avoid in place modification.\r\n df = df.copy()\r\n # Check file path.\r\n if not os.path.exists(path_data_dir):\r\n raise IOError(textwrap.dedent(\"\"\"\\\r\n Path does not exist:\r\n path_data_dir = {path}\"\"\".format(\r\n path=path_data_dir)))\r\n ########################################\r\n # Returned_asm\r\n # Interpretation of assumptions:\r\n # If DSEligible=0, then the vehicle is not eligible for a guarantee.\r\n # * And Returned=-1 (null) since we don't know whether or not it would have been returned,\r\n # but given that it wasn't eligible, it may have been likely to have Returned=1.\r\n # If DSEligible=1, then the vehicle is eligible for a guarantee.\r\n # * And if Returned=0 then the guarantee was purchased and the vehicle was not returned.\r\n # * And if Returned=1 then the guarantee was purchased and the vehicle was returned.\r\n # * And if Returned=-1 (null) then the guarantee was not purchased.\r\n # We don't know whether or not it would have been returned,\r\n # but given that the dealer did not purchase, it may have been likely to have Returned=0.\r\n # Assume:\r\n # If Returned=-1 and DSEligible=0, then Returned_asm=1\r\n # If Returned=-1 and DSEligible=1, then Returned_asm=0\r\n logger.info(textwrap.dedent(\"\"\"\\\r\n Returned_asm: Assume returned status to fill nulls as new feature.\r\n If Returned=-1 and DSEligible=0, then Returned_asm=1 (assumes low P(resale|buyer, car))\r\n If Returned=-1 and DSEligible=1, then Returned_asm=0 (assumes high P(resale|buyer, car))\"\"\"))\r\n df['Returned_asm'] = df['Returned']\r\n df.loc[\r\n np.logical_and(df['Returned'] == -1, df['DSEligible'] == 0),\r\n 'Returned_asm'] = 1\r\n df.loc[\r\n np.logical_and(df['Returned'] == -1, df['DSEligible'] == 1),\r\n 'Returned_asm'] = 0\r\n logger.info(\"Relationship between DSEligible and Returned:\\n{pt}\".format(\r\n pt=pd.pivot_table(\r\n df[['DSEligible', 'Returned']].astype(str),\r\n index='DSEligible', columns='Returned',\r\n aggfunc=len, margins=True, dropna=False)))\r\n logger.info(\"Relationship between DSEligible and Returned_asm:\\n{pt}\".format(\r\n pt=pd.pivot_table(\r\n df[['DSEligible', 'Returned_asm']].astype(str),\r\n index='DSEligible', columns='Returned_asm',\r\n aggfunc=len, margins=True, dropna=False)))\r\n logger.info(\"Relationship between Returned and Returned_asm:\\n{pt}\".format(\r\n pt=pd.pivot_table(\r\n df[['Returned', 'Returned_asm']].astype(str),\r\n index='Returned', columns='Returned_asm',\r\n aggfunc=len, margins=True, dropna=False)))\r\n ########################################\r\n # SellingLocation_lat, SellingLocation_lon\r\n # Cell takes ~1 min to execute if shelf does not exist.\r\n # Google API limit: https://developers.google.com/maps/documentation/geocoding/usage-limits\r\n logger.info(textwrap.dedent(\"\"\"\\\r\n SellingLocation: Geocode.\r\n Scraping webpages for addresses and looking up latitude, longitude coordinates.\"\"\"))\r\n path_shelf = os.path.join(path_data_dir, 'sellloc_geoloc.shelf')\r\n seconds_per_query = 1.0/50.0 # Google API limit\r\n sellloc_geoloc = dict()\r\n with shelve.open(filename=path_shelf, flag='c') as shelf:\r\n for loc in df['SellingLocation'].unique():\r\n if loc in shelf:\r\n raw = shelf[loc]\r\n if raw is None:\r\n location = raw\r\n else:\r\n address = raw['formatted_address']\r\n latitude = raw['geometry']['location']['lat']\r\n longitude = raw['geometry']['location']['lng']\r\n location = geopy.location.Location(\r\n address=address, point=(latitude, longitude), raw=raw)\r\n else: \r\n url = r'https://www.manheim.com/locations/{loc}/events'.format(loc=loc)\r\n page = requests.get(url)\r\n tree = bs4.BeautifulSoup(page.text, 'lxml')\r\n address = tree.find(name='p', class_='loc_address').get_text().strip()\r\n try:\r\n components = {\r\n 'country': 'United States',\r\n 'postal_code': address.split()[-1]}\r\n location = geopy.geocoders.GoogleV3().geocode(\r\n query=address,\r\n exactly_one=True,\r\n components=components)\r\n except:\r\n logger.warning(textwrap.dedent(\"\"\"\\\r\n Exception raised. Setting {loc} geo location to `None`\r\n sys.exc_info() =\r\n {exc}\"\"\".format(loc=loc, exc=sys.exc_info())))\r\n location = None\r\n finally:\r\n time.sleep(seconds_per_query)\r\n if location is None:\r\n shelf[loc] = location\r\n else:\r\n shelf[loc] = location.raw\r\n sellloc_geoloc[loc] = location\r\n logger.info(\"Mapping SellingLocation to latitude, longitude coordinates.\")\r\n sellloc_lat = {\r\n sellloc: (geoloc.latitude if geoloc is not None else 0.0)\r\n for (sellloc, geoloc) in sellloc_geoloc.items()}\r\n sellloc_lon = {\r\n sellloc: (geoloc.longitude if geoloc is not None else 0.0)\r\n for (sellloc, geoloc) in sellloc_geoloc.items()}\r\n df['SellingLocation_lat'] = df['SellingLocation'].map(sellloc_lat)\r\n df['SellingLocation_lon'] = df['SellingLocation'].map(sellloc_lon)\r\n # # TODO: experiment with one-hot encoding (problems is that it doesn't scale)\r\n # df = pd.merge(\r\n # left=df,\r\n # right=pd.get_dummies(df['SellingLocation'], prefix='SellingLocation'),\r\n # how='inner',\r\n # left_index=True,\r\n # right_index=True)\r\n ########################################\r\n # JDPowersCat: One-hot encoding\r\n # TODO: Estimate sizes from Wikipedia, e.g. https://en.wikipedia.org/wiki/Vehicle_size_class.\r\n logger.info(\"JDPowersCat: One-hot encoding.\")\r\n # Cast to string, replacing 'nan' with 'UNKNOWN'.\r\n df['JDPowersCat'] = (df['JDPowersCat'].astype(str)).str.replace(' ', '').apply(\r\n lambda cat: 'UNKNOWN' if cat == 'nan' else cat)\r\n # One-hot encoding.\r\n df = pd.merge(\r\n left=df,\r\n right=pd.get_dummies(df['JDPowersCat'], prefix='JDPowersCat'),\r\n left_index=True,\r\n right_index=True)\r\n ########################################\r\n # LIGHT_N0G1Y2R3\r\n # Rank lights by warning level.\r\n logger.info(\"LIGHT_N0G1Y2R3: Rank lights by warning level (null=0, green=1, yellow=2, red=3).\")\r\n df['LIGHT_N0G1Y2R3'] = df['LIGHTG']*1 + df['LIGHTY']*2 + df['LIGHTR']*3\r\n ########################################\r\n # SaleDate_*: Extract timeseries features.\r\n logger.info(\"SaleDate: Extract timeseries features.\")\r\n df['SaleDate_dow'] = df['SaleDate'].dt.dayofweek\r\n df['SaleDate_doy'] = df['SaleDate'].dt.dayofyear\r\n df['SaleDate_day'] = df['SaleDate'].dt.day\r\n df['SaleDate_decyear'] = df['SaleDate'].dt.year + (df['SaleDate'].dt.dayofyear-1)/366\r\n ########################################\r\n # BuyerID, SellerID, VIN, SellingLocation, CarMake, JDPowersCat:\r\n # Make cumulative informative priors (*_num*, *_frac*) for string features.\r\n logger.info(textwrap.dedent(\"\"\"\\\r\n BuyerID, SellerID, VIN, SellingLocation, CarMake, JDPowersCat:\r\n Make cumulative informative priors (*_num*, *_frac*) for string features.\"\"\"))\r\n # Cumulative features require sorting by time.\r\n df.sort_values(by=['SaleDate'], inplace=True)\r\n df.reset_index(drop=True, inplace=True)\r\n for col in ['BuyerID', 'SellerID', 'VIN', 'SellingLocation', 'CarMake', 'JDPowersCat']:\r\n logger.info(\"Processing {col}\".format(col=col))\r\n ####################\r\n # Cumulative count of transactions and DSEligible:\r\n # Cumulative count of transactions (yes including current).\r\n df[col+'_numTransactions'] = df[[col]].groupby(by=col).cumcount().astype(int) + 1\r\n df[col+'_numTransactions'].fillna(value=1, inplace=True)\r\n # Cumulative count of transactions that were DealShield-eligible (yes including current).\r\n df[col+'_numDSEligible1'] = df[[col, 'DSEligible']].groupby(by=col)['DSEligible'].cumsum().astype(int)\r\n df[col+'_numDSEligible1'].fillna(value=0, inplace=True)\r\n # Cumulative ratio of transactions that were DealShield-eligible (0=bad, 1=good).\r\n df[col+'_fracDSEligible1DivTransactions'] = (df[col+'_numDSEligible1']/df[col+'_numTransactions'])\r\n df[col+'_fracDSEligible1DivTransactions'].fillna(value=1, inplace=True)\r\n ####################\r\n # DSEligible and Returned\r\n # Note:\r\n # * DealShield-purchased ==> Returned != -1 (not null)\r\n # * below requires\r\n # DSEligible == 0 ==> Returned == -1 (is null)\r\n # Returned != -1 (not null) ==> DSEligible == 1\r\n assert (df.loc[df['DSEligible']==0, 'Returned'] == -1).all()\r\n assert (df.loc[df['Returned']!=-1, 'DSEligible'] == 1).all()\r\n # Cumulative count of transactions that were DealShield-eligible and DealShield-purchased.\r\n df_tmp = df[[col, 'Returned']].copy()\r\n df_tmp['ReturnedNotNull'] = df_tmp['Returned'] != -1\r\n df[col+'_numReturnedNotNull'] = df_tmp[[col, 'ReturnedNotNull']].groupby(by=col)['ReturnedNotNull'].cumsum().astype(int)\r\n df[col+'_numReturnedNotNull'].fillna(value=0, inplace=True)\r\n del df_tmp\r\n # Cumulative ratio of DealShield-eligible transactions that were DealShield-purchased (0=mode).\r\n df[col+'_fracReturnedNotNullDivDSEligible1'] = df[col+'_numReturnedNotNull']/df[col+'_numDSEligible1']\r\n df[col+'_fracReturnedNotNullDivDSEligible1'].fillna(value=0, inplace=True)\r\n # Cumulative count of transactions that were DealShield-elegible and DealShield-purchased and DealShield-returned.\r\n df_tmp = df[[col, 'Returned']].copy()\r\n df_tmp['Returned1'] = df_tmp['Returned'] == 1\r\n df[col+'_numReturned1'] = df_tmp[[col, 'Returned1']].groupby(by=col)['Returned1'].cumsum().astype(int)\r\n df[col+'_numReturned1'].fillna(value=0, inplace=True)\r\n del df_tmp\r\n # Cumulative ratio of DealShield-eligible, DealShield-purchased transactions that were DealShield-returned (0=good, 1=bad).\r\n # Note: BuyerID_fracReturned1DivReturnedNotNull is the cumulative return rate for a buyer.\r\n df[col+'_fracReturned1DivReturnedNotNull'] = df[col+'_numReturned1']/df[col+'_numReturnedNotNull']\r\n df[col+'_fracReturned1DivReturnedNotNull'].fillna(value=0, inplace=True)\r\n # Check that weighted average of return rate equals overall return rate.\r\n # Note: Requires groups sorted by date, ascending.\r\n assert np.isclose(\r\n (df[[col, col+'_fracReturned1DivReturnedNotNull', col+'_numReturnedNotNull']].groupby(by=col).last().product(axis=1).sum()/\\\r\n df[[col, col+'_numReturnedNotNull']].groupby(by=col).last().sum()).values[0],\r\n sum(df['Returned']==1)/sum(df['Returned'] != -1),\r\n equal_nan=True)\r\n ####################\r\n # DSEligible and Returned_asm\r\n # NOTE:\r\n # * Below requires\r\n # DSEligible == 0 ==> Returned_asm == 1\r\n # Returned_asm == 0 ==> DSEligible == 1\r\n assert (df.loc[df['DSEligible']==0, 'Returned_asm'] == 1).all()\r\n assert (df.loc[df['Returned_asm']==0, 'DSEligible'] == 1).all()\r\n # Cumulative number of transactions that were assumed to be returned.\r\n df_tmp = df[[col, 'Returned_asm']].copy()\r\n df_tmp['Returnedasm1'] = df_tmp['Returned_asm'] == 1\r\n df[col+'_numReturnedasm1'] = df_tmp[[col, 'Returnedasm1']].groupby(by=col)['Returnedasm1'].cumsum().astype(int)\r\n df[col+'_numReturnedasm1'].fillna(value=0, inplace=True)\r\n del df_tmp\r\n # Cumulative ratio of transactions that were assumed to be returned (0=mode).\r\n df[col+'_fracReturnedasm1DivTransactions'] = df[col+'_numReturnedasm1']/df[col+'_numTransactions']\r\n df[col+'_fracReturnedasm1DivTransactions'].fillna(value=0, inplace=True)\r\n # Check that weighted average of assumed return rate equals overall assumed return rate.\r\n assert np.isclose(\r\n (df[[col, col+'_fracReturnedasm1DivTransactions', col+'_numTransactions']].groupby(by=col).last().product(axis=1).sum()/\\\r\n df[[col, col+'_numTransactions']].groupby(by=col).last().sum()).values[0],\r\n sum(df['Returned_asm']==1)/sum(df['Returned_asm'] != -1),\r\n equal_nan=True)\r\n # Note:\r\n # * Number of transactions that were DealShield-eligible and assumed to be returned ==\r\n # number of transactions that were DealShield-elegible and DealShield-purchased and DealShield-returned\r\n # (numReturned1)\r\n return df", "def make_data(input_filepath, output_filepath):\n\n df_train = pd.read_csv(input_filepath+'train_u6lujuX_CVtuZ9i.csv', index_col=0)\n df_test = pd.read_csv(input_filepath+'test_Y3wMUE5_7gLdaTN.csv', index_col=0)\n print('Sizes', df_train.shape, df_test.shape)\n print(\"Outcome dispersion:\\n\", df_train['Loan_Status'].value_counts())\n\n\n # recode and save outcome vector\n y = df_train['Loan_Status'].map({'N': 0, 'Y': 1})\n\n del df_train['Loan_Status']\n\n # all in one dataframe\n df = pd.concat([df_train, df_test])\n print(df.shape)\n\n from src.features.build_features import make_features\n df = make_features(df)\n\n # Divide data on train and test again and save\n data_train = df[df.index.isin(df_train.index)]\n data_test = df[df.index.isin(df_test.index)]\n print(data_train.shape, data_test.shape)\n\n data_tmp = data_train.copy()\n data_tmp['y'] = y\n\n\n data_tmp.to_csv(output_filepath + 'train_ready.csv', index=False)\n data_test.to_csv(output_filepath + 'test_ready.csv', index=False)\n id_test = pd.DataFrame(data=df_test.index, columns=['Loan_ID'])\n id_test.to_csv(output_filepath + 'id_test.csv', index=False)", "def mistakes_dataframe(filename, outfilename):\n counter = 0\n data_list = []\n list_of_lists = []\n feature_dict = dict()\n mistake_counter = 0\n \n #The crf file only makes use of the token and assigns a label. \n #For the mistakes file, we are using the features of the gold file.\n #The features of the gold file are used together with the labels of the \n #crf file to provide the reader with a better understanding of the mistakes.\n if filename == crf_file:\n file_rows = []\n for system, gold in zip(file_to_listrows(crf_file), file_to_listrows(dev_file)):\n system_label = [system[-1]]\n line = gold + system_label\n file_rows.append(line)\n else: \n #The baseline and SVM classifier have a file with all the features \n #present, for that reason we just apply the file_to_listrows-function.\n file_rows = file_to_listrows(filename)\n \n for features in file_rows[1:]:\n counter += 1\n mistake_counter += 1\n feature_dict = {\n 'IndexInDataset': counter+1, #The number from the original \n #dataset is inserted so that the \n #tokens are easy to find.\n 'Mistake-type': None,\n 'Token': features[0],\n 'lemma': features[1],\n 'UPOS': features[2],\n 'XPOS': features[3],\n 'DepRel': features[4],\n 'head': features[5],\n 'PrevTok': features[6],\n 'PrevPOS': features[7],\n 'NextTok': features[8],\n 'NextPOS': features[9],\n 'NegPrefix': features[10],\n 'NegPostfix': features[11],\n 'NegExpList': features[12],\n 'GoldLabel': features[13],\n 'SystemLabel': features[14] #This is the label that the system gave to the token\n }\n if features[13] == 'O' and features[14] =='NEG':\n feature_dict['Mistake-type'] = 'FalsePositive'\n elif features[13] == 'NEG' and features[14] == 'O':\n feature_dict['Mistake-type'] = 'FalseNegative'\n \n data_list.append(feature_dict)\n if counter == 13567: #The last line of every classifier is empty, to prevent the code from breaking,this if-statement is inserted.\n break\n \n filename = filename.replace('-out.conll', '')\n mistakes = f'This system ({filename}) made {mistake_counter} mistakes' #The function shows the amount of mistakes the system made\n df = pd.DataFrame(data_list)\n df.to_csv(outfilename, sep='\\t')\n return data_list, df, mistakes #The list of dictionaries, together with the dataframe and the mistakes are returned", "def gen_features(log_file_path: str, out_path: str):\n raise RuntimeError(\"Feature extraction is not supported yet in AutoScheduler dialect\")", "def make_claim_df(claim_list, columns = ['Sl','Name of Bank','Name of Branch','A/C Number (15 digit)','A/C Title','Amount of Remittance in BDT','Date of A/C Credit','Remittance Received through BEFTN/RTGS','Name of Remittance Collecting/BEFTN Processing Bank','Date of Claim']):\n sl=[]\n nrbc_bank = []\n branch = []\n ac_no = []\n ac_title = []\n amount=[]\n date_account_credit=[]\n channel = []\n other_bank=[]\n claim_date=[]\n i=1\n for claim in claim_list:\n sl.append(i)\n i=i+1\n nrbc_bank.append(\"NRBC Bank Ltd.\")\n branch.append(claim.branch.name.upper())\n ac_no.append(claim.account_no)\n ac_title.append(claim.account_title)\n amount.append(claim.remittance_amount)\n date_account_credit.append(claim.date_account_credit)\n channel.append(claim.get_channel_display())\n other_bank.append(claim.collecting_bank.name)\n claim_date.append(claim.date_claim.date())\n dc = {\n 'SL':sl,\n 'Name of Bank':nrbc_bank,\n 'Name of Branch': branch,\n 'A/C Number': ac_no,\n 'A/C Title': ac_title,\n 'Amount of Remittance in BDT': amount,\n 'Date of A/C Credit': date_account_credit,\n 'Remittance Received Through BEFTN/RTGS': channel,\n 'Name of Remittance Processing Bank': other_bank,\n 'Date of Claim': claim_date\n }\n df = pd.DataFrame(dc)\n return df.sort_values(by=['Name of Remittance Processing Bank',])", "def extract_temporal_info(self, featurelist, strExpDate, strOnsetDate, strReceiveDate):\n \n expDateInput = self.parse_time_string(strExpDate)\n onsetDateInput = self.parse_time_string(strOnsetDate) \n receiveDate = self.parse_time_string(strReceiveDate) \n \n self.exposureDate = expDateInput\n self.onsetDate = onsetDateInput\n self.receiveDate = receiveDate\n self.exposureDateConfidence = 0\n self.onsetDateConfidence = 0\n \n ##: Obtain timex list\n timexList = timexan.annotateTimexes(self.text, expDateInput) \n \n self.sentence_full_tags = self.create_sentence_full_tags(featurelist, timexList)\n \n timexList = self.preprocess_timex_list(timexList, featurelist)\n \n ###: divide features that contain multiple timexes\n featurelist = self.divide_feature_containing_multiple_timexes(featurelist, timexList)\n \n featurelist = self.create_feature_timex_association(featurelist, timexList)\n \n timexList = self.construct_timeline(timexList, featurelist)\n \n# (expDate, onsetDate, state) = self.calculate_exposure_onset_dates(\n# timexList, featurelist, sentences, taggedSentences, expDateInput, onsetDateInput, expDate)\n \n featurelist = self.process_feature_durations(featurelist)\n \n featurelist = self.postprocess_features(featurelist)\n \n if self.exposureDateConfidence==1:\n if self.onsetDateConfidence==1:\n datesConfidence = 1\n else:\n datesConfidence = 0.9\n else:\n datesConfidence = 0.8\n \n ##: Create DocumentFeature object for return\n docFeature = DocumentFeature(featurelist, timexList, self.exposureDate, self.onsetDate, self.receiveDate, datesConfidence, expDateInput, onsetDateInput) \n \n return docFeature", "def featureprepare(self,df):\n try:\n df = self.featureselection(df)\n emp_len_dict= pickleload(self.dict_employ_len) # Load emp len\n df['emp_length'] = df['emp_length'].map(emp_len_dict)\n df['Long_emp_length'] = df['emp_length'].apply(lambda x: 'Yes' if x == 10 else 'No') # creating new feature\n df[\"emp_title\"].fillna('Missing', inplace=True)\n\n # Handling missing numerical value\n dict_Mean_var = pickleload(self.dict_Mean_var)\n for col, mean_val in dict_Mean_var.items():\n df[col].fillna(mean_val, inplace=True)\n\n # Handling rare values\n Freqlabels = pickleload(self.Freqlabels)\n for variable, frequent_labels in Freqlabels.items():\n df[variable] = np.where(df[variable].isin(frequent_labels), df[variable], 'Rare')\n\n # Encoding Categorical features\n x = pickleload(self.labelEncoder)\n for features, labels in x.items():\n df.loc[:, features] = labels.transform(df.loc[:, features])\n return df\n except Exception as e:\n self._Logger.error(\"Error in feature preparation: {}\".format(e))", "def create_features(df,rsi_window = 14,macd_feat = [12,26,9]):\n df.dropna(inplace=True)\n ## day and month\n df['Date'] = pd.to_datetime(df['Date'])\n df['Month'] = df['Date'].dt.month\n df['dayowk'] = df['Date'].dt.dayofweek\n df = pd.get_dummies(data = df,columns = ['Month','dayowk'])\n \n ##Previos n-day pct_changes\n df['1day_pct'] = df['Adj Close'].pct_change()\n df['2day_pct'] = df['Adj Close'].pct_change(periods = 2)\n df['3day_pct'] = df['Adj Close'].pct_change(periods = 3)\n df['4day_pct'] = df['Adj Close'].pct_change(periods = 4)\n df['5day_pct'] = df['Adj Close'].pct_change(periods = 5)\n df['7day_pct'] = df['Adj Close'].pct_change(periods = 7)\n \n ##Cumulative sum of 1day_pct\n df['1day_pct_cs'] = df['Adj Close'].pct_change().cumsum()\n \n ##EWMA of 7, 50 and 200 days\n df['ewma_7'] = df['Adj Close'].ewm(span=7).mean()/df['Adj Close']\n df['ewma_50'] = df['Adj Close'].ewm(span=50).mean()/df['Adj Close']\n df['ewma_200'] = df['Adj Close'].ewm(span=200).mean()/df['Adj Close']\n ## Golden Cross vs Death Cross etc.\n #df['7g(50&200)'] = (df['ewma_7'] > df['ewma_50']) & (df['ewma_7'] > df['ewma_200'])\n #df['7l(50&200)'] = (df['ewma_7'] < df['ewma_50']) & (df['ewma_7'] < df['ewma_200'])\n #df['7g50'] = (df['ewma_7'] > df['ewma_50']) & (df['ewma_7'] < df['ewma_200'])\n #df['7g200'] = (df['ewma_7'] < df['ewma_50']) & (df['ewma_7'] > df['ewma_200'])\n \n ##RSI and MACD\n df = RSI(df,14)\n df = MACD_mod(df,nl=macd_feat[0],nh=macd_feat[1],nsig=macd_feat[2])\n \n df['day_var'] = (df['High'] - df['Low'])/df['Close']## Days variance\n df['open_close'] = (df['Open'] - df['Close'])/df['Close'] ## Days Open-Close\n df['high_close'] = (df['High'] - df['Close'])/df['Close'] ##Days High-Close\n df['open_prev_close'] = (df['Open'] - df['Close'].shift(1))/df['Close'] ## Days open - Previos Dyas Close\n \n ##Classification target\n df['target'] = round((np.sign(df['1day_pct']).shift(-1)+1)/2) ## Target for classification\n #df['1_day_target'] = df['Adj Close'].shift(-1) - df['Adj Close'] ## Target for Regression\n #df['target2'] = round((np.sign(df['1day_pct']).shift(-1)+1)/2)## Will the price go up intra-day\n \n ## IS the stock Overbought or Oversold based on RSI?\n df['RSI_overbought'] = df['RSI']>70\n df['RSI_oversold'] = df['RSI']<30\n \n \n #df.drop(['Open','High','Low','Close'],axis=1,inplace=True)\n# df = df.dropna()\n \n #df = df.reset_index(drop=True)\n \n ## Calculating how large the previos hot and cold streaks were\n f = 0\n df['prev_hot_streak'] = np.zeros(df.shape[0])\n for i in range(df.shape[0]-1):\n if df['target'][i] ==1:\n f += 1\n if df['target'][i+1] ==0:\n df['prev_hot_streak'][i+1] = f\n f = 0\n for i in range(1,df.shape[0]):\n #print(i)\n if df['prev_hot_streak'][i]==0:\n df['prev_hot_streak'][i]=df['prev_hot_streak'][i-1]\n \n \n df['prev_cold_streak'] = np.zeros(df.shape[0])\n for i in range(df.shape[0]-1):\n if df['target'][i] ==0:\n f += 1\n if df['target'][i+1] ==1:\n df['prev_cold_streak'][i+1] = f\n f = 0\n\n for i in range(1,df.shape[0]):\n #print(i)\n if df['prev_cold_streak'][i]==0:\n df['prev_cold_streak'][i] = df['prev_cold_streak'][i-1]\n \n ## Calculating current hot and cold streaks\n df['current_hot_streak'] = np.zeros(df.shape[0])\n df['current_cold_streak'] = np.zeros(df.shape[0])\n fhot=0\n fcold=0\n for i in range(df.shape[0]):\n if df['target'][i]==1:\n fhot += 1\n fcold = 0\n df['current_hot_streak'][i] = fhot\n elif df['target'][i]==0:\n fcold += 1\n fhot = 0\n df['current_cold_streak'][i] = fcold\n \n df['prev_hot_streak'] = df['prev_hot_streak'].shift(1)\n df['prev_cold_streak'] = df['prev_cold_streak'].shift(1)\n df['current_hot_streak'] = df['current_hot_streak'].shift(1)\n df['current_cold_streak'] = df['current_cold_streak'].shift(1)\n \n ## Combinations of previos streaks\n df['prev_current_hot'] = df['prev_hot_streak'] - df['current_hot_streak']\n df['prev_current_cold'] = df['prev_cold_streak'] - df['current_cold_streak']\n df['current_hot_prev_cold'] = df['current_hot_streak'] - df['prev_cold_streak']\n df['current_cold_prev_hot'] = df['current_cold_streak'] - df['prev_hot_streak']\n \n ##Calculating days since max\n current_max = df['Adj Close'][0]\n df['days_from_max'] = np.zeros(df.shape[0])\n df['pct_from_max'] = np.zeros(df.shape[0])\n #print('blah')\n for i in range(1,df.shape[0]):\n if df['Adj Close'][i] > current_max:\n current_max = df['Adj Close'][i]\n # print(current_max)\n else:\n df['days_from_max'][i] = df['days_from_max'][i-1]+1\n df['pct_from_max'][i] = (df['Adj Close'][i]-current_max)/current_max\n #print(df['days_from_max'][i])\n \n \n \n df.dropna(inplace=True)\n df = df.reset_index(drop=True)\n return df", "def generate_features(df):\n df_new = pd.DataFrame()\n \n # 6 original features\n df_new['open'] = df['open']\n df_new['open_1'] = df['open'].shift(1)\n df_new['close_1'] = df['close'].shift(1)\n df_new['high_1'] = df['high'].shift(1)\n df_new['low_1'] = df['low'].shift(1)\n df_new['volume_1'] = df['volume'].shift(1)\n \n # 50 original features\n # average price\n df_new['avg_price_5'] = df['close'].rolling(window=5).mean().shift(1)\n df_new['avg_price_30'] = df['close'].rolling(window=21).mean().shift(1)\n df_new['avg_price_90'] = df['close'].rolling(window=63).mean().shift(1)\n df_new['avg_price_365'] = df['close'].rolling(window=252).mean().shift(1)\n \n # average price ratio\n df_new['ratio_avg_price_5_30'] = df_new['avg_price_5'] / df_new['avg_price_30']\n df_new['ratio_avg_price_905_'] = df_new['avg_price_5'] / df_new['avg_price_90']\n df_new['ratio_avg_price_5_365'] = df_new['avg_price_5'] / df_new['avg_price_365']\n df_new['ratio_avg_price_30_90'] = df_new['avg_price_30'] / df_new['avg_price_90']\n df_new['ratio_avg_price_30_365'] = df_new['avg_price_30'] / df_new['avg_price_365']\n df_new['ratio_avg_price_90_365'] = df_new['avg_price_90'] / df_new['avg_price_365'] \n \n \n # average volume\n df_new['avg_volume_5'] = df['volume'].rolling(window=5).mean().shift(1)\n df_new['avg_volume_30'] = df['volume'].rolling(window=21).mean().shift(1)\n df_new['avg_volume_90'] = df['volume'].rolling(window=63).mean().shift(1)\n df_new['avg_volume_365'] = df['volume'].rolling(window=252).mean().shift(1)\n \n #average volume ratio\n df_new['ratio_avg_volume_5_30'] = df_new['avg_volume_5'] / df_new['avg_volume_30']\n df_new['ratio_avg_volumee_5_90'] = df_new['avg_volume_5'] / df_new['avg_volume_90'] \n df_new['ratio_avg_volume_5_365'] = df_new['avg_volume_5'] / df_new['avg_volume_365']\n df_new['ratio_avg_volume_30_90'] = df_new['avg_volume_30'] / df_new['avg_volume_90']\n df_new['ratio_avg_volume_30_365'] = df_new['avg_volume_30'] / df_new['avg_volume_365']\n df_new['ratio_avg_volume_90_365'] = df_new['avg_volume_90'] / df_new['avg_volume_365'] \n \n \n # standard deviation of prices\n df_new['std_price_5'] = df['close'].rolling(window=5).std().shift(1)\n df_new['std_price_30'] = df['close'].rolling(window=21).std().shift(1)\n df_new['std_price_90'] = df['close'].rolling(window=63).std().shift(1) \n df_new['std_price_365'] = df['close'].rolling(window=252).std().shift(1)\n \n # standard deviation ratio of prices \n df_new['ratio_std_price_5_30'] = df_new['std_price_5'] / df_new['std_price_30']\n df_new['ratio_std_price_5_90'] = df_new['std_price_5'] / df_new['std_price_90']\n df_new['ratio_std_price_5_365'] = df_new['std_price_5'] / df_new['std_price_365']\n df_new['ratio_std_price_30_90'] = df_new['std_price_30'] / df_new['std_price_90'] \n df_new['ratio_std_price_30_365'] = df_new['std_price_30'] / df_new['std_price_365'] \n df_new['ratio_std_price_90_365'] = df_new['std_price_90'] / df_new['std_price_365'] \n \n \n # standard deviation of volumes\n df_new['std_volume_5'] = df['volume'].rolling(window=5).std().shift(1)\n df_new['std_volume_30'] = df['volume'].rolling(window=21).std().shift(1)\n df_new['std_volume_90'] = df['volume'].rolling(window=63).std().shift(1)\n df_new['std_volume_365'] = df['volume'].rolling(window=252).std().shift(1)\n \n #standard deviation ratio of volumes\n df_new['ratio_std_volume_5_30'] = df_new['std_volume_5'] / df_new['std_volume_30']\n df_new['ratio_std_volume_5_90'] = df_new['std_volume_5'] / df_new['std_volume_90']\n df_new['ratio_std_volume_5_365'] = df_new['std_volume_5'] / df_new['std_volume_365'] \n df_new['ratio_std_volume_30_90'] = df_new['std_volume_30'] / df_new['std_volume_90']\n df_new['ratio_std_volume_30_365'] = df_new['std_volume_30'] / df_new['std_volume_365']\n df_new['ratio_std_volume_90_365'] = df_new['std_volume_90'] / df_new['std_volume_365'] \n \n # return\n df_new['return_1'] = ((df['close'] - df['close'].shift(1)) / df['close'].shift(1)).shift(1)\n df_new['return_5'] = ((df['close'] - df['close'].shift(5)) / df['close'].shift(5)).shift(1)\n df_new['return_30'] = ((df['close'] - df['close'].shift(21)) / df['close'].shift(21)).shift(1)\n df_new['return_90'] = ((df['close'] - df['close'].shift(63)) / df['close'].shift(63)).shift(1) \n df_new['return_365'] = ((df['close'] - df['close'].shift(252)) / df['close'].shift(252)).shift(1)\n \n #average of return\n df_new['moving_avg_5'] = df_new['return_1'].rolling(window=5).mean()\n df_new['moving_avg_30'] = df_new['return_1'].rolling(window=21).mean()\n df_new['moving_avg_90'] = df_new['return_1'].rolling(window=63).mean()\n df_new['moving_avg_365'] = df_new['return_1'].rolling(window=252).mean()\n \n # the target\n df_new['close'] = df['close']\n df_new = df_new.dropna(axis=0)\n return df_new", "def generate_features(self, df):\n df = df.reset_index()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return extract_features(df, column_id=\"id\", impute_function=impute,\n default_fc_parameters=self.extraction_settings)", "def create_date_data(gt_id,\n target_horizon,\n experiment,\n date_features=[\"mei\", \"mjo\",\n \"pca_sst_2010\", \"pca_icec_2010\",\n \"pca_wind_hgt_10_2010\",\n \"pca_wind_hgt_100_2010\",\n \"pca_wind_hgt_500_2010\",\n \"pca_wind_hgt_850_2010\"]):\n\n time_start = time.time()\n\n # --------\n # Prepare experiment cache directory and saved file names\n # --------\n\n # Name of cache directory for storing non-submission-date specific\n # intermediate files\n cache_dir = os.path.join('results', experiment, 'shared',\n '{}_{}'.format(gt_id, target_horizon))\n # e.g., cache_dir = 'results/regression/shared/contest_precip_34w'\n\n # if cache_dir doesn't exist, create it\n if not os.path.isdir(cache_dir):\n os.makedirs(cache_dir)\n\n # Filenames for data file to be stored in cache_dir\n date_data_file = os.path.join(\n cache_dir, \"date_data-{}_{}.h5\".format(gt_id, target_horizon))\n\n # --------\n # Creates date_data dataframe.\n # --------\n # Get number of days between start date of observation period used for prediction\n # (2 weeks behind) and start date of target period (2 or 4 weeks ahead)\n start_deltas = [get_start_delta(target_horizon, gt_id)\n for gt_id in date_features]\n\n # Load masked date features\n print \"Loading date features\"\n date_data = get_date_features(gt_ids=date_features, gt_shifts=start_deltas,\n first_year=get_first_year(gt_id))\n\n print \"Loading additional date features\"\n t = time.time()\n if 'mjo' in date_features:\n # Add cosine and sine transforms of MJO phase\n mjo_phase_name = 'phase_shift'+str(get_start_delta(target_horizon, 'mjo'))\n date_data['cos_'+mjo_phase_name] = np.cos((2*np.pi*date_data[mjo_phase_name])/8)\n date_data['sin_'+mjo_phase_name] = np.sin((2*np.pi*date_data[mjo_phase_name])/8)\n print \"Elapsed: {}s\".format(time.time() - t)\n # Save date features to disk\n print \"Saving date features to \"+date_data_file\n t = time.time()\n date_data.to_hdf(date_data_file, key=\"data\", mode=\"w\")\n subprocess.call(\"chmod a+w \"+date_data_file, shell=True)\n print \"Elapsed: {}s\".format(time.time() - t)\n\n print \"Finished generating date_data matrix.\"\n print \"Total time elapsed: {}s\".format(time.time()-time_start)\n return list(date_data)", "def features_customers(df_customers):\n for i in PREMIER_VALS:\n k = 'premier_' + str(i)\n df_customers[k] = np.where(df_customers['premier'] == i, 1, 0)\n\n df_customers['age'] = datetime.now().date().year - df_customers['yearOfBirth']\n df_customers['male'] = np.where(df_customers['gender'] == 'M', 1, 0)\n df_customers['female'] = np.where(df_customers['gender'] == 'F', 1, 0)\n df_customers['days_in_asos'] = (datetime.now().date() - df_customers['dateCreated']).dt.days\n\n logger.info(\"Features from the customers table: {shape} {dtypes}\"\n .format(shape=df_customers.shape, dtypes=df_customers.dtypes))\n return df_customers", "def finalizeFeatures(featureSet, path_out):\r\n features = featureSet.columns.tolist()\r\n features.remove('url')\r\n features.remove('country')\r\n features.remove('file extension')\r\n for f in features:\r\n featureSet[f] = featureSet[f].astype(int)\r\n\r\n # Data Imputation by filling NA to the country and file extension column\r\n featureSet['country']=featureSet['country'].fillna('None')\r\n featureSet['file extension']=featureSet['file extension'].fillna('None')\r\n logger.info('The NAs in feature columns country and file extension have been filled with string None.')\r\n \r\n # clean country code\r\n country = featureSet.country\r\n new_country = []\r\n for i in range(len(country)):\r\n c = str(country[i])\r\n if c.upper() in iso3166.countries_by_name:\r\n new_country.append(iso3166.countries_by_name[c.upper()][1])\r\n elif len(c) == 2 and not c.isupper():\r\n new_country.append(c.upper())\r\n elif len(c) != 2 and c != 'REDACTED FOR PRIVACY':\r\n new_country.append('None')\r\n else:\r\n new_country.append(c)\r\n featureSet['country'] = new_country\r\n\r\n ## Create a new feature called Risk Indicator\r\n c1 = featureSet['no of dots'] >= 1\r\n c2 = featureSet['no of hyphen'] >= 1\r\n c3 = featureSet['no of subdir'] >= 6\r\n c4 = featureSet['contains IP'] == 1\r\n c5 = featureSet['presence of Suspicious_TLD'] == 1\r\n\r\n featureSet['risk indicator'] = np.array([c1 | c2| c3| c4| c5]).astype(int).T\r\n logger.info('New feature Risk Indicator has been created!')\r\n featureSet.to_csv(path_out)\r\n logger.info('Feature Set has been saved to the data folder.')", "def classify_incidents(in_features, date_field, report_location, repeatdist,\n spatial_bands, temporal_bands, out_lines_dir,\n out_lines_name, *args):\n try:\n # Fix for potential issue with xlsx files as report locations\n if not path.isdir(report_location):\n report_location = path.dirname(report_location)\n\n # Build sorted lists of band values\n spatial_bands = [float(b) for b in spatial_bands.split(';')]\n temporal_bands = [float(b) for b in temporal_bands.split(';')]\n\n repeatdist = float(repeatdist)\n spatial_bands.append(repeatdist)\n\n spatial_bands = list(set(spatial_bands))\n temporal_bands = list(set(temporal_bands))\n\n spatial_bands.sort()\n temporal_bands.sort()\n\n arcpy.env.overwriteOutput = True\n\n # Report run time used for file names\n now = dt.strftime(dt.now(), \"%Y-%m-%d_%H-%M-%S\")\n now_nice = dt.strftime(dt.now(), \"%Y-%m-%d %H:%M:%S\")\n\n # Check for and delete existing fields necessary for classification\n reset_fields(in_features)\n\n # Get name of OID field\n oidname = arcpy.Describe(in_features).oidFieldName\n\n # Get sorted list of unique incident date values\n with arcpy.da.SearchCursor(in_features, date_field) as rows:\n date_vals = [row[0] for row in rows]\n\n date_vals = list(set(date_vals))\n date_vals.sort()\n\n # Range of incident dates\n min_date = date_vals[0]\n max_date = date_vals[-1]\n\n # Keep track of origins and nrs\n oids = []\n nrids = []\n rids = []\n\n # Connecting line segments and table rows\n new_lines = []\n new_rows = []\n\n # Build empty dictionary to hold type tallies\n type_counts = {}\n for sband in spatial_bands:\n type_counts[sband] = {}\n for tband in temporal_bands:\n type_counts[sband][tband] = {'oids': [],\n 'nrids': [],\n 'rids': []}\n\n # Value lists for half life calculations\n all_distances = {}\n for sband in spatial_bands:\n all_distances[sband] = []\n\n all_lives = {}\n for tband in temporal_bands:\n all_lives[tband] = []\n\n found_connections = []\n\n # Build table of all records within the max spatial band of anther feature\n near_table = arcpy.GenerateNearTable_analysis(in_features, in_features, search_radius=temporal_bands[-1], closest='ALL', method='GEODESIC')\n\n # Identify and process relevent near features\n with arcpy.da.SearchCursor(near_table, field_names=['IN_FID', 'NEAR_FID', 'NEAR_DIST']) as nearrows:\n\n # Process each identified connection within the spatial bands\n for nearrow in nearrows:\n dist = nearrow[2]\n if not dist <= spatial_bands[-1]:\n continue\n\n links= []\n\n # Find the two features that are part of the connection\n where_clause = \"\"\"{} in ({},{})\"\"\".format(oidname, nearrow[0], nearrow[1])\n fields = [oidname, date_field, z_value_field, 'SHAPE@X','SHAPE@Y']\n with arcpy.da.UpdateCursor(in_features, field_names=fields, where_clause=where_clause) as cur_link:\n for feat in cur_link:\n # Calculate the z values of each incident in the pair\n zval = feat[1] - min_date\n feat[2] = zval.days\n cur_link.updateRow(feat)\n links.append([feat[0], feat[1], feat[3], feat[4], feat[2]])\n\n # Identify which feature is the oldest and id it as the source\n if links[0][1] > links[1][1]:\n oid, odate, ox, oy, oz = links[1]\n fid, fdate, fx, fy, fz = links[0]\n\n else:\n oid, odate, ox, oy, oz = links[0]\n fid, fdate, fx, fy, fz = links[1]\n\n # test for new connection\n if (oid, fid) in found_connections:\n continue\n\n # Calculate the days between the two dates\n datediff = fdate - odate\n daydiff = datediff.days\n\n # only process rows within defined temporal bands\n if daydiff > temporal_bands[-1]:\n continue\n\n # Identify the spatial bands that are covered by this relationship and create a connecting line feature\n link_found = False\n for sband in spatial_bands:\n if dist <= sband:\n for tband in temporal_bands:\n if daydiff <= tband:\n if not link_found:\n # track distances and lives for half measures\n all_distances[sband].append(dist)\n all_lives[tband].append(daydiff)\n incident_sband = sband\n incident_tband = tband\n\n link_found = True\n\n # id classification\n if oid not in type_counts[sband][tband]['oids']:\n type_counts[sband][tband]['oids'].append(oid)\n if dist <= spatial_bands[0]:\n if fid not in type_counts[sband][tband]['rids']:\n type_counts[sband][tband]['rids'].append(fid)\n elif fid not in type_counts[sband][tband]['nrids']:\n type_counts[sband][tband]['nrids'].append(fid)\n\n if link_found:\n found_connections.append((oid, fid))\n\n # create connecting line from x, y, z values of two pts\n end = arcpy.Point(X=fx, Y=fy, Z=fz)\n start = arcpy.Point(X=ox, Y=oy, Z=oz)\n vertices = arcpy.Array([start, end])\n feature = arcpy.Polyline(vertices, None, True, False)\n new_lines.append([fid, oid, dist, daydiff, incident_sband, incident_tband, feature])\n\n # Delete near table\n arcpy.Delete_management(near_table)\n\n # Create feature class for connecting lines\n sr = arcpy.Describe(in_features).spatialReference\n connectors = arcpy.CreateFeatureclass_management(out_lines_dir,\n out_lines_name,\n 'POLYLINE',\n has_z='ENABLED',\n spatial_reference=sr)\n arcpy.AddField_management(connectors, 'FEATUREID', \"LONG\")\n arcpy.AddField_management(connectors, origin_feat_field, \"LONG\")\n arcpy.AddField_management(connectors, dist_orig_field, \"FLOAT\")\n arcpy.AddField_management(connectors, 'RPTDAYS', \"FLOAT\")\n arcpy.AddField_management(connectors, spatial_band_field, \"FLOAT\")\n arcpy.AddField_management(connectors, temporal_band_field, \"FLOAT\")\n\n # Insert connecting line features from the array of values\n fields = ['FEATUREID', origin_feat_field, dist_orig_field, 'RPTDAYS', spatial_band_field, temporal_band_field, 'SHAPE@']\n with arcpy.da.InsertCursor(connectors, fields) as rows:\n for new_line in new_lines:\n rows.insertRow(new_line)\n\n # Manage classification fields\n fieldnames = []\n for sband in spatial_bands:\n for tband in temporal_bands:\n fieldnames.append('s{}t{}'.format(int(sband), int(tband)))\n\n cur_fields = [f.name for f in arcpy.ListFields(in_features)]\n for fieldname in fieldnames:\n if fieldname in cur_fields:\n arcpy.DeleteField_management(in_features, fieldname)\n arcpy.AddField_management(in_features, fieldname, 'TEXT', field_length=2)\n\n # Classify & count incidents by type\n for sband in spatial_bands:\n for tband in temporal_bands:\n band = type_counts[sband][tband]\n type_counts[sband][tband]['oids'] = [id for id in band['oids'] if id not in band['nrids'] and id not in band['rids']]\n type_counts[sband][tband]['nrids'] = [id for id in band['nrids'] if id not in band['rids']]\n\n fields = [\"OID@\", date_field, z_value_field]\n fields.extend(fieldnames)\n\n with arcpy.da.UpdateCursor(in_features, fields) as rows:\n inc_count = 0\n for row in rows:\n inc_count += 1\n\n # calc z value if missing\n if not row[2]:\n zval = row[1] - min_date\n row[2] = zval.days\n\n classifications = []\n\n for sband in spatial_bands:\n for tband in temporal_bands:\n if row[0] in type_counts[sband][tband]['nrids']:\n classifications.append('NR')\n elif row[0] in type_counts[sband][tband]['rids']:\n classifications.append('R')\n elif row[0] in type_counts[sband][tband]['oids']:\n classifications.append('O')\n else:\n classifications.append(None)\n row[3:] = classifications\n\n rows.updateRow(row)\n\n # Build empty dictionary to hold spatial and temporal band tallies\n band_counts = {}\n for sband in spatial_bands:\n band_counts[sband] = {}\n for tband in temporal_bands:\n band_counts[sband][tband] = 0\n\n for sband in spatial_bands:\n for tband in temporal_bands:\n if sband == spatial_bands[0]:\n band_counts[sband][tband] = len(type_counts[sband][tband]['rids'])\n else:\n band_counts[sband][tband] = len(type_counts[sband][tband]['nrids'])\n\n # Get unit of feature class spatial reference system\n try:\n unit = units[sr.linearUnitName]\n except KeyError:\n unit = ''\n\n # Get half-life and half-distance\n test_distances = []\n half_distances = {}\n for sband in spatial_bands:\n test_distances.extend(all_distances[sband])\n test_distances.sort()\n if len(test_distances) > 0:\n half_distances[sband] = test_distances[int(len(test_distances)/2)]\n else:\n half_distances[sband] = 'Not Calculated'\n\n test_lives = []\n half_lives = {}\n for tband in temporal_bands:\n test_lives.extend(all_lives[tband])\n test_lives.sort()\n if len(test_lives) > 0:\n half_lives[tband] = test_lives[int(len(test_lives)/2)]\n else:\n half_lives[tband] = 'Not Calculated'\n\n # Build report content\n report_header = ('Repeat and Near Repeat Incident Summary\\n'\n 'Created {}\\n'.format(now_nice))\n\n data_info = ('Data Source: {}\\n'\n 'Incident Date Range: {} - {}\\n'\n '# Incidents Processed: {}'.format(in_features, min_date, max_date, inc_count))\n\n## inc_type_reports = ''\n## console_type_rpts = ''\n##\n## for sband in spatial_bands:\n## for tband in temporal_bands:\n## cnt_o = len(type_counts[sband][tband]['oids'])\n## cnt_n = len(type_counts[sband][tband]['nrids'])\n## cnt_r = len(type_counts[sband][tband]['rids'])\n##\n## perc_o = \"{:.1f}\".format(100.0*float(cnt_o)/inc_count)\n## perc_n = \"{:.1f}\".format(100.0*float(cnt_n)/inc_count)\n## perc_r = \"{:.1f}\".format(100.0*float(cnt_r)/inc_count)\n##\n## inc_type_reports += ('Count and percentage of each type of incident in spatial band {}{} and temporal band {} days\\n'\n## ', Count, Percentage\\n'\n## 'All Incidents,{}, 100\\n'\n## 'Originators,{},{}\\n'\n## 'Near Repeats,{},{}\\n'\n## 'Repeats,{},{}\\n\\n'.format(sband, unit, tband,\n## inc_count,\n## cnt_o, perc_o,\n## cnt_n, perc_n,\n## cnt_r, perc_r))\n## console_type_rpts += ('Count and percentage of each type of incident in spatial band {}{} and temporal band {} days\\n'\n## ' Count Percentage\\n'\n## 'All Incidents {:^10} {:^13}\\n'\n## 'Originators {:^10} {:^13}\\n'\n## 'Near Repeats {:^10} {:^13}\\n'\n## 'Repeats {:^10} {:^13}\\n\\n'.format(sband, unit, tband,\n## inc_count, 100,\n## cnt_o, perc_o,\n## cnt_n, perc_n,\n## cnt_r, perc_r))\n\n half_lives_str = 'Estimated incident half-life\\n'\n half_lives_str_console = 'Estimated incident half-life\\n'\n for tband in temporal_bands:\n half_lives_str += '{} days temporal band, {:.1f} days\\n'.format(tband, half_lives[tband])\n half_lives_str_console += '{} days temporal band: {:.1f} days\\n'.format(tband, half_lives[tband])\n\n half_distance_str = 'Estimated incident half-distance\\n'\n half_distance_str_console = 'Estimated incident half-distance\\n'\n for sband in spatial_bands[1:]:\n half_distance_str += '{0} {1} spatial band, {2:.1f} {1}\\n'.format(sband, unit, half_distances[sband])\n half_distance_str_console += '{0} {1} spatial band: {2:.1f} {1}\\n'.format(sband, unit, half_distances[sband])\n\n temp_band_strs = [\"<={} days\".format(b) for b in temporal_bands]\n temporal_band_labels = ','.join(temp_band_strs)\n console_tband_labels = ' '.join(['{:^12}'.format(bnd) for bnd in temp_band_strs])\n\n counts_title = 'Number of Repeat and Near-Repeat incidents per spatial and temporal band\\n'\n percent_title = 'Percentage of all incidents classified as Repeat or Near-Repeat and appearing in each spatial and temporal band\\n'\n\n counts_header = ',{}\\n'.format(temporal_band_labels)\n console_counts_header = ' {}'.format(console_tband_labels)\n\n percent_header = ',{}\\n'.format(temporal_band_labels)\n console_perc_header = ' {}'.format(console_tband_labels)\n\n counts_table = \"\"\n percent_table = \"\"\n console_count = \"\"\n console_perc = \"\"\n\n row_sum = [0 for tband in temporal_bands]\n\n for sband in spatial_bands:\n\n # get temporal bands and their incident counts\n vals = [band_counts[sband][tband] for tband in temporal_bands]\n\n # Get spatial band count in each temporal band\n # Sums include counts from smaller bands\n## row_counts = [vals[tband] for tband in temporal_bands]\n## try:\n## row_sums = [sum(row_counts[0:i]) for i in xrange(1,len(row_counts)+1)]\n## except:\n## row_sums = [sum(row_counts[0:i]) for i in range(1,len(row_counts)+1)]\n##\n## row_sum = [x + y for (x, y) in zip(row_sums, row_sum)]\n row_perc = [100.0 * float(val)/inc_count for val in vals]\n\n # append counts & percentages to the table\n if sband == spatial_bands[0]:\n counts_table += '<={} {},{}\\n'.format(sband, unit, ','.join([str(cnt) for cnt in vals]))\n console_count += '{:>25} {}\\n'.format('<={} {}'.format(sband, unit), ' '.join(['{:^12}'.format(cnt) for cnt in vals]))\n percent_table += '<={} {},{}\\n'.format(sband, unit, ','.join([\"{:.1f}\".format(prc) for prc in row_perc]))\n console_perc += '{:>25} {}\\n'.format('<={} {}'.format(sband, unit), ' '.join(['{:^12}'.format(\"{:.1f}\".format(prc)) for prc in row_perc]))\n else:\n counts_table += '>{} to {} {},{}\\n'.format(spatial_bands[0], sband, unit, ','.join([str(cnt) for cnt in vals]))\n console_count += '{:>25} {}\\n'.format('>{} to {} {}'.format(spatial_bands[0], sband, unit), ' '.join(['{:^12}'.format(cnt) for cnt in vals]))\n percent_table += '>{} to {} {},{}\\n'.format(spatial_bands[0], sband, unit, ','.join([\"{:.1f}\".format(prc) for prc in row_perc]))\n console_perc += '{:>25} {}\\n'.format('>{} to {} {}'.format(spatial_bands[0], sband, unit), ' '.join(['{:^12}'.format(\"{:.1f}\".format(prc)) for prc in row_perc]))\n\n # Write report\n reportname = path.join(report_location, \"{}_{}.csv\".format('Summary', now))\n with open(reportname, 'w') as report:\n\n report.write(report_header)\n report.write('\\n')\n report.write(data_info)\n report.write('\\n')\n report.write(half_distance_str)\n report.write('\\n')\n report.write(half_lives_str)\n report.write('\\n')\n## report.write(inc_type_reports)\n report.write(counts_title)\n report.write(counts_header)\n report.write(counts_table)\n report.write('\\n')\n report.write(percent_title)\n report.write(percent_header)\n report.write(percent_table)\n\n arcpy.SetParameterAsText(9, path.join(out_lines_dir, out_lines_name))\n arcpy.AddMessage(\"\\nView incident summary report: {}\\n\".format(reportname))\n\n arcpy.AddMessage(report_header)\n arcpy.AddMessage('')\n arcpy.AddMessage(data_info)\n arcpy.AddMessage('')\n arcpy.AddMessage(half_distance_str_console)\n arcpy.AddMessage('')\n arcpy.AddMessage(half_lives_str_console)\n arcpy.AddMessage('')\n## arcpy.AddMessage(console_type_rpts)\n arcpy.AddMessage(counts_title)\n arcpy.AddMessage(console_counts_header)\n arcpy.AddMessage(console_count)\n arcpy.AddMessage('')\n arcpy.AddMessage(percent_title)\n arcpy.AddMessage(console_perc_header)\n arcpy.AddMessage(console_perc)\n\n## print(\"\\nView incident summary report: {}\\n\".format(reportname))\n##\n## print(report_header)\n## print('')\n## print(data_info)\n## print('')\n## print(half_distance_str_console)\n## print('')\n## print(half_lives_str_console)\n## print('')\n#### arcpy.AddMessage(console_type_rpts)\n## print(counts_title)\n## print(console_counts_header)\n## print(console_count)\n## print('')\n## print(percent_title)\n## print(console_perc_header)\n## print(console_perc)\n\n except arcpy.ExecuteError:\n # Get the tool error messages\n msgs = arcpy.GetMessages()\n arcpy.AddError(msgs)\n print(msgs)\n\n except:\n # Return error messages for use in script tool or Python Window\n arcpy.AddError(str(sys.exc_info()[1]))\n\n # Print Python error messages for use in Python / Python Window\n print(str(sys.exc_info()[1]) + \"\\n\")", "def gen_main_df(add_list: list):\r\n # 由Bert 计算得来的 sentiment信息\r\n if 'sentiment' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('sentiment')\r\n sentiment = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'daily_svm_sentiment_6class' , 'csv')[0],\r\n 'date', ['0'], 'sentiment') # 'daily_svm_sentiment_2class' '0', '1', '2', '3', '4', '5'\r\n data_manipulator.add_column(sentiment)\r\n # 中国CPI指数\r\n if 'cpi' in add_list and 'cpi' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('cpi')\r\n cpi = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'CPI', 'csv')[0],\r\n '日期', ['最新值', '涨跌幅', '近3月涨跌幅'], 'CPI')\r\n data_manipulator.add_column(cpi)\r\n # 上海银行间同业拆放利率\r\n if 'shibor' in add_list and 'shibor' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('shibor')\r\n shibor = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'shibor', 'csv')[0],\r\n 'date', ['on', '1w', '2w', '1m', '3m'], 'Shibor')\r\n data_manipulator.add_column(shibor)\r\n # 上证综指\r\n if 'shangzheng' in add_list and 'shangzheng' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('shangzheng')\r\n shangzheng = data_manipulator.read_in_file(\r\n data_manipulator.get_file_names(root, 'datasets', 'ShangZheng', 'csv')[0],\r\n 'trade_date', ['open', 'high', 'low', 'close', 'pct_chg', 'vol', 'amount',\r\n 'total_mv', 'float_mv', 'total_share', 'float_share',\r\n 'free_share', 'turnover_rate', 'turnover_rate_f', 'pe',\r\n 'pe_ttm', 'pb'],\r\n 'ShangZheng')\r\n data_manipulator.add_column(shangzheng)\r\n data_manipulator.shift_columns(['ShangZheng_pct_chg'], (-1,),\r\n add=True) # name has changed to shift-1_ShangZheng_pct_chg\r\n data_manipulator.rank_df_column(['shift-1_ShangZheng_pct_chg'],\r\n rank_list=[-10, -1, -0.5, 0, 0.5, 1, 10]) # rank_list=[-10, 0, 10] [-10, -1, -0.5, 0, 0.5, 1, 10]\r\n shangzheng_30min = data_manipulator.read_in_file(\r\n data_manipulator.get_file_names(root, 'datasets', 'ShangZheng_index_30min', 'csv')[0],\r\n 'trade_time', ['open', 'high', 'low', 'close', 'pct_chg', 'vol', 'amount'],\r\n 'ShangZheng_30min')\r\n data_manipulator.news_df_add_column(shangzheng_30min)\r\n data_manipulator.shift_minute_columns(['ShangZheng_30min_pct_chg'], (-1,),\r\n add=True)\r\n data_manipulator.rank_minute_df_columns(['shift-1_ShangZheng_30min_pct_chg'],\r\n rank_list=[-10, -1, -0.5, 0, 0.5, 1, 10]) # rank_list=[-10, 0, 10] [-10, -1, -0.5, 0, 0.5, 1, 10]\r\n\r\n # M2 广义货币量\r\n if 'm2' in add_list and 'm2' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('m2')\r\n m2 = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'M2', 'csv')[0],\r\n '月份', ['M2数量(亿元)', 'M2同比增长', 'M2环比增长'], 'M2')\r\n m2 = data_manipulator.complement_df(m2, 'date')\r\n data_manipulator.add_column(m2)\r\n\r\n # 人民币美元汇率\r\n if 'rmb_usd' in add_list and 'rmb_usd' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('rmb_usd')\r\n rmb_usd = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'RMB_USD', 'csv')[0],\r\n 'trade_date',\r\n ['bid_open', 'bid_close', 'bid_high', 'bid_low', 'ask_open',\r\n 'ask_close', 'ask_high', 'ask_low', 'tick_qty'], 'exchange')\r\n data_manipulator.add_column(rmb_usd)\r\n\r\n # 沪港通 沪深通 到岸 离岸资金流\r\n if 'fund_flow' in add_list and 'fund_flow' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('fund_flow')\r\n fund_flow = data_manipulator.read_in_file(\r\n data_manipulator.get_file_names(root, 'datasets', 'fund_flow', 'csv')[0],\r\n 'trade_date', ['north_money', 'south_money'], 'fund_flow')\r\n data_manipulator.add_column(fund_flow)\r\n\r\n # 债券回购日行情\r\n if 'repo' in add_list and 'repo' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('repo')\r\n repo = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'repo', 'csv')[0],\r\n 'trade_date', ['repo_maturity', 'open', 'high', 'low', 'close',\r\n 'amount'], 'repo', data_manipulator.cut_time_string,\r\n (0, 10,))\r\n repo = data_manipulator.select_col_group_by(repo, 'repo_repo_maturity', ['GC001', 'GC007', 'GC014', 'GC028'],\r\n 'date')\r\n data_manipulator.add_column(repo)\r\n\r\n # 新浪新闻\r\n if 'sina_news' in add_list and 'sina_news' not in data_manipulator.used_measure_list:\r\n data_manipulator.used_measure_list.append('sina_news')\r\n columns_type = {'create_time': str, 'text': str}\r\n sina_news = data_manipulator.read_in_file(data_manipulator.get_file_names(root, 'datasets', 'sina', 'csv')[0],\r\n 'create_time', ['text', ], 'sina', dtypes=columns_type)\r\n data_manipulator.add_change_news('sina', (7, 9), columns_type, sina_news, time_col_name='create_time')\r\n data_manipulator.add_minute_change_news('sina', columns_type, sina_news, time_col_name='create_time')\r\n if 'scale' in add_list:\r\n data_manipulator.scaling_col()\r\n if 'clear' in add_list:\r\n data_manipulator.clear()", "def create_dataframe_for_training(data):\n feature_column_name = 'X'\n #data_cp = data[['label']].copy()\n for i, row in tqdm(data.iterrows(), total=len(data)):\n all_features = f'{row.claimant} {row.claim} {row.article_content}'\n data.loc[i, feature_column_name] = all_features\n\n return data[feature_column_name]", "def create_raw_data():\r\n for csv_file in glob.glob(raw_loc + 'ticket_data/PRR_*'):\r\n filestring =os.path.basename(csv_file)\r\n index_start = 1\r\n j = 0\r\n start = dt.datetime.now()\r\n print('{} file started at {}'.format(filestring, start.strftime(\"%H:%M\")))\r\n df = pd.read_csv(csv_file, encoding = 'utf-8', parse_dates = ['Tick Issue Date'])\r\n df = df.rename(columns = {c: c.replace(' ', '') for c in df.columns})\r\n try:\r\n df.to_sql('raw_ticket_data', con = conn, if_exists='append')\r\n except:\r\n print('File read error')\r\n\r\n\r\n print ('{} file finished in {:03.2f} minutes '.format(filestring, (dt.datetime.now()-start).seconds / 60))", "def gen_feats_file(data_feats,ids,feat_filename):\n if not os.path.isfile(feat_filename) :\n new_feats=np.empty((0,2))\n for iid in ids:\n print(iid)\n indices = [i for i, v in enumerate(data_feats[:,0]) if iid in v]\n new_feats=np.vstack((new_feats,data_feats[indices,:]))\n np.savetxt(feat_filename,new_feats,fmt=\"%s\")", "def get_srs_features(df):\n\n #test result classified by labels\n li = df.TestResultsCode.tolist()\n labels = [ item.split('_') for item in li ]\n for item in labels:\n if len(item)==4:\n add = item[0]+item[1]\n item = item.insert( 0, add )\n for item in labels:\n if 'not' in item:\n item.remove('not')\n if 'detected' in item:\n item.remove('detected')\n\n\n #one-hot encode the test results\n disease = [ la[0] for la in labels ]\n spread = [ la[1] for la in labels ]\n risk = [ la[2] for la in labels ]\n\n disease_encode = pd.Series( disease ).str.get_dummies()\n spread_encode = pd.Series( spread ).str.get_dummies()\n risk_encode = pd.Series( risk ).str.get_dummies()\n\n disease_encode = pd.DataFrame( disease_encode )\n spread_encode = pd.DataFrame( spread_encode )\n risk_encode = pd.DataFrame( risk_encode)\n\n #interate one hot encoding of test results back to df\n df=df.drop(['PatientID', 'Address', 'CurrentLocation'],axis=1)\n df2 = df\n df2 = df2.drop(columns = 'TestResultsCode')\n\n results = pd.concat( [risk_encode, spread_encode, disease_encode], axis=1 )\n results = results.drop(['NotSpreader', 'NotatRisk'], axis=1)\n\n from sklearn.model_selection import train_test_split\n\n X_train, X_val, y_train, y_val = train_test_split( df2, results, test_size=0.33, random_state=33 ) #tr is test results numerically coded\n X_val, X_test, y_val, y_test = train_test_split( X_val, y_val , test_size=0.4, random_state=33)\n\n #REMOVED LOCATION FROM FEATURES\n\n # choosing from those features\n cols =['AgeGroup','AvgHouseholdExpenseOnPresents','AvgHouseholdExpenseOnSocialGames',\n 'AvgHouseholdExpenseParkingTicketsPerYear','AvgMinSportsPerDay','AvgTimeOnSocialMedia','AvgTimeOnStuding','BMI',\n 'DisciplineScore','HappinessScore','Job','NrCousins','StepsPerYear','SyndromeClass','TimeOnSocialActivities']\n\n X_train_sfs = X_train[cols]\n X_train_sfs = X_train_sfs.fillna(X_train_sfs.mean())\n\n from sklearn.neighbors import KNeighborsClassifier\n from mlxtend.feature_selection import SequentialFeatureSelector as SFS #import from mlxtend library\n knn = KNeighborsClassifier(n_neighbors=2) # ml_algo used = knn\n sfs = SFS(knn,\n k_features=10,\n forward=True, # if forward = True then SFS otherwise SBS\n floating=False,\n verbose=2,\n scoring='accuracy'\n )\n\n\n #after applying sfs fit the data:\n sfs.fit(X_train_sfs, y_train)\n\n return sfs.k_feature_names_" ]
[ "0.6386668", "0.6208587", "0.62022436", "0.60953605", "0.60796374", "0.60608006", "0.6022764", "0.5993953", "0.5985361", "0.59586924", "0.5820426", "0.5809583", "0.58089864", "0.58070034", "0.5787359", "0.575435", "0.57058054", "0.56917906", "0.56418794", "0.56213087", "0.5602896", "0.5580139", "0.55740535", "0.5564288", "0.5540875", "0.5534619", "0.55277455", "0.5516493", "0.5511482", "0.5505087" ]
0.62781596
1
Builds for each customer, RFM scores and encode scores. When this method is called during building data_model step, then dataframe handling new RFM features is dumped into a file.
def data_transform_rfm(self) : is_built_step = False if self._encoder_rfm is None: is_built_step = True #------------------------------------------------------------------------- # RFM feature is built #------------------------------------------------------------------------- ser_invoice_date = self._df_invoice_line.InvoiceDate self.df_invoice_line, df_RFM, self.df_RFM_quantiles, self._day_now \ = p5_util.p5_df_rfm_build(self.df_invoice_line, day_now = self._day_now\ , df_RFM_threshold=self.df_RFM_quantiles) self._df_invoice_line.InvoiceDate = ser_invoice_date #------------------------------------------------------------------------- # RFM score is added to dataframe #------------------------------------------------------------------------- df_merged = pd.merge(self.df_invoice_line\ , df_RFM[['CustomerID','RFM']], how='left', on=['CustomerID']) self._df_invoice_line \ = pd.DataFrame(df_merged.values, index = self._df_invoice_line.index\ , columns=df_merged.columns) #self._df_invoice_line \ #= pd.concat([ self.df_invoice_line,df_RFM[['CustomerID','RFM']] ], axis=1\ #,join='inner') #------------------------------------------------------------------------- # RFM encoding #------------------------------------------------------------------------- self._encoder_rfm, df_RFM_encoded \ = p5_util.df_rfm_one_hot_encode(df_RFM,'RFM', encoder=self._encoder_rfm) #------------------------------------------------------------------------- # Encoded RFM features are renamed #------------------------------------------------------------------------- df_customers_rfm, list_col_unchanged \ = p5_util.df_rename_columns(df_RFM_encoded, df_RFM_encoded.columns\ , 'w_rfm_') self.strprint("df_customers_rfm =" +str(df_customers_rfm.shape)) #------------------------------------------------------------------------- # dataframe with RFM encoded values per customer is dumped #------------------------------------------------------------------------- if is_built_step is True: p5_util.object_dump(df_customers_rfm, self.df_customers_rfm_fileName) else : self._df_customers_rfm = df_customers_rfm.copy() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def df_customers_fileRead(self):\n \n #-------------------------------------------------------------------------\n # RFM features are restored\n #-------------------------------------------------------------------------\n df_customers_rfm \\\n = p5_util.object_load(self.df_customers_rfm_fileName)\n self.strprint(\"RFM features : \"+str(df_customers_rfm.shape))\n \n #-------------------------------------------------------------------------\n # Time features are restored\n #-------------------------------------------------------------------------\n df_customers_timeFeature \\\n = p5_util.object_load(self._df_customers_timeFeature_fileName)\n self.strprint(\"Time features : \"+str(df_customers_timeFeature.shape))\n \n #-------------------------------------------------------------------------\n # NLP features are restored\n #-------------------------------------------------------------------------\n df_customers_nlp = p5_util.object_load(self._df_customers_nlp_fileName)\n self.strprint(\"NLP features : \"+str(df_customers_nlp.shape))\n\n if False:\n df_customers_rfm = self._df_customers_rfm.copy()\n df_customers_timeFeature = self._df_customers_timeFeature.copy()\n df_customers_nlp = self._df_customers_pca_nlp.copy()\n\n #-------------------------------------------------------------------------\n # Dataframe are aggregated; note that indexes are customerID.\n #-------------------------------------------------------------------------\n df_customers = pd.DataFrame()\n\n df_customers = pd.concat([df_customers,df_customers_rfm], axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_timeFeature]\\\n , join='inner', axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_nlp]\\\n , join='inner', axis=1)\n\n self.strprint(\"All features : \"+str(df_customers.shape))\n\n #----------------------------------------------------------------------\n # Dataframe is dumped into a file\n #----------------------------------------------------------------------\n p5_util.object_dump(df_customers, self._df_customers_fileName)\n if False:\n #----------------------------------------------------------------------\n # Dataframe is copied as an attribute\n #----------------------------------------------------------------------\n self._df_customers = df_customers.copy()\n \n return", "def df_customers_features_build(self):\n\n df_customers_rfm = self._df_customers_rfm.copy()\n df_customers_timeFeature = self._df_customers_timeFeature.copy()\n df_customers_nlp = self._df_customers_pca_nlp.copy()\n\n #-------------------------------------------------------------------------\n # Dataframe are aggregated; note that indexes are customerID.\n #-------------------------------------------------------------------------\n df_customers = pd.DataFrame()\n\n df_customers = pd.concat([df_customers,df_customers_rfm], axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_timeFeature]\\\n , join='inner', axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_nlp]\\\n , join='inner', axis=1)\n \n self.strprint(\"All features : \"+str(df_customers.shape))\n self._df_customers = df_customers.copy()\n return", "def collect_data(self):\n categories = []\n list_of_feature_lists = []\n feature_sets = set()\n with open(self.csv_path, \"r\") as f:\n reader = csv.DictReader(f)\n # collecting some stats for FDR calculation:\n self.PSM_count = 0\n self.decoy_count = 0\n\n if self[\"dump_svm_matrix\"]:\n self.init_svm_matrix_dump()\n additional_matrix_info = []\n\n for i, row in enumerate(\n sorted(\n reader,\n reverse=self[\"bigger_scores_better\"],\n key=lambda d: float(d[self.col_for_sorting]),\n )\n ):\n\n features = self.row_to_features(row)\n\n if tuple(features) in feature_sets:\n continue\n feature_sets.add(tuple(features))\n\n category, psm_FDR = self.get_psm_category(row)\n\n list_of_feature_lists.append(features)\n categories.append(category)\n\n if self[\"dump_svm_matrix\"]:\n label = -1 if row_is_decoy(row) else 1\n sequence = \"{0}.{1}#{2}.{3}\".format(\n row[\"Sequence Pre AA\"].strip(),\n row[\"Sequence\"].strip(),\n row[\"Modifications\"].strip(),\n row[\"Sequence Post AA\"].strip(),\n )\n additional_matrix_info.append(\n {\n \"psm_id\": row[\"Spectrum Title\"].strip(),\n \"label\": label,\n \"scannr\": row[\"Spectrum Title\"].strip().split(\".\")[-2],\n \"peptide\": sequence,\n \"proteins\": self.parse_protein_ids(row[\"Protein ID\"]),\n }\n )\n\n if i % 1000 == 0:\n score_val = float(row[self.col_for_sorting])\n msg = (\n \"Generating feature matrix from input csv \"\n \"(line ~{0}) with score {1} and FDR \"\n \"{2}\".format(i, score_val, psm_FDR)\n )\n print(msg, end=\"\\r\")\n\n # All data points are collected in one big matrix, to make standardization possible\n print(\"\\nConverting feature matrix to NumPy array...\")\n X_raw = np.array(list_of_feature_lists, dtype=float)\n\n print(\"Replacing empty/NaN values with the mean of each column...\")\n self.nan_replacer = Imputer()\n self.nan_replacer.fit(X_raw)\n X_raw = self.nan_replacer.transform(X_raw)\n # Standardize input matrix to ease machine learning! Scaled data has zero mean and unit variance\n print(\"Standardizing input matrix...\")\n self.scaler = SCALER.fit(X_raw)\n self.X = self.scaler.transform(X_raw)\n self.categories = np.array(categories)\n print()\n\n if self[\"dump_svm_matrix\"]:\n print(\"Dumping SVM matrix to\", self[\"dump_svm_matrix\"])\n\n for i, matrix_row in enumerate(self.X):\n matrix_row_info = additional_matrix_info[i]\n self.dump_svm_matrix_row(\n row=list(matrix_row),\n psm_id=matrix_row_info[\"psm_id\"],\n label=matrix_row_info[\"label\"],\n scannr=matrix_row_info[\"scannr\"],\n peptide=matrix_row_info[\"peptide\"],\n proteins=matrix_row_info[\"proteins\"],\n )\n\n print(\"Dumped SVM matrix to\", self[\"dump_svm_matrix\"])\n return", "def __train__(self):\n if (self.type_camf == 'CAMF_CI'):\n #users, items, context, ratings\n ci = camf_ci.CI_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = ci.fit()\n elif (self.type_camf == 'CAMF_CU'):\n cu = camf_cu.CU_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = cu.fit()\n elif (self.type_camf == 'CAMF_C'):\n c = camf_c.C_class(self.__users_array__, self.__items_array__, self.__context_array__, self.__ratings__, self.fold, self.lr, self.factors)\n predictions, losses = c.fit()\n\n dummy_pred = np.zeros((predictions.shape))\n for r, pred_array in enumerate(predictions):\n for c, pred in enumerate(pred_array):\n dummy_pred[r][c] = self.__check_ratings__(pred)\n predictions = dummy_pred\n #save a plot with a loss function\n plots = prs.PlotRSData()\n #print(losses)\n plots.plot_loss_cars(losses, self.type_camf, self.__save_prefix__+\"_loop\"+str(self.loop))\n pd.DataFrame(losses).to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ +\"losses_loop\"+str(self.loop)+\".csv\")\n print('Saving the feature matrix...')\n # set predictions back to the pivot table\n self.__utility_saved_training__(predictions) \n # save results\n self.utility_predictions.to_csv(\"./RecSys/out/CAMF/train/\"+self.type_camf+\"/\" + self.__save_prefix__ + \"_SGD_predictions_loop\"+str(self.loop)+\".csv\")", "def build(self):\n list_of_mafs = []\n maf_generator = self.get_dataframe()\n\n for maf_as_dict in maf_generator:\n list_of_mafs.extend(maf_as_dict)\n\n reporting_path = os.path.join(app.config.get('REPORTING_ROOT_PATH'), app.config.get('REPORTING_PATH'), 'global')\n combined_maf = None\n try:\n combined_maf = pandas.DataFrame(list_of_mafs)\n except Exception as e:\n logger.error(f'Problem creating dataframe from list of dicts: {str(e)}')\n try:\n combined_maf.to_csv(\n os.path.join(reporting_path, f'{self.method}_combined_maf.tsv'),\n sep=\"\\t\",\n encoding='utf-8',\n index='false'\n )\n except Exception as e:\n # bad practice here catching base exception, but the pandas documentation did not reveal what errors or\n # exceptions to expect\n logger.error(f'Problem writing the combined maf file to csv:{str(e)}')\n abort(500)", "def data_transform_timeFeature(self):\n #-------------------------------------------------------------------------\n # All new features are built into separate dataframes \n # and each of them are dumped into a separate file.\n #-------------------------------------------------------------------------\n self.strprint(\"self.df_invoice_line : \"+str(self.df_invoice_line.shape))\n \n self._dict_timeFeature_encoder, df_customers_timeFeature \\\n = p5_util.time_list_feature_build(self.df_invoice_line\\\n , self._list_new_feature, dict_encoder = self._dict_timeFeature_encoder\\\n ,is_verbose=self.is_verbose)\n \n #-------------------------------------------------------------------------\n # New time features are aggregated into a single dataframe.\n # Values are scaled.\n #-------------------------------------------------------------------------\n df_customers_timeFeature, self._std_scaler_timeFeature \\\n = p5_util.time_list_feature_restore(self._list_new_feature \\\n , std_scale = self._std_scaler_timeFeature\\\n , df_timeFeature = df_customers_timeFeature, is_verbose = self.is_verbose)\n\n self.strprint(\"df_customers_timeFeature : \"+str(df_customers_timeFeature.shape))\n \n #-------------------------------------------------------------------------\n # Dimension reduction thanks to PCA\n #-------------------------------------------------------------------------\n n_dim=30\n root_name = 'time_pca_'\n # Column CustomerID is used into df_pca_reduce\n df_customers_timeFeature['CustomerID'] = df_customers_timeFeature.index\n \n df_customers_timeFeature, pca_timeFeature \\\n = p5_util.df_pca_reduce(df_customers_timeFeature, n_dim, root_name\\\n , p_is_scale=False, pca = self._pca_timeFeature)\n\n self.strprint(df_customers_timeFeature.shape)\n \n if self._pca_timeFeature is None:\n #----------------------------------------------------------------------\n # Data-model is in built process with part of data-set.\n #----------------------------------------------------------------------\n self._pca_timeFeature = pca_timeFeature\n p5_util.object_dump(df_customers_timeFeature\\\n , self._df_customers_timeFeature_fileName)\n else:\n #----------------------------------------------------------------------\n # Data-model is already built and this method is called \n # for a customer classification.\n #----------------------------------------------------------------------\n self._df_customers_timeFeature = df_customers_timeFeature.copy()\n return", "def __init__(self, sc, dataset_path):\n\n logger.info(\"Starting up the Recommendation Engine: \")\n\n self.sc = sc\n\n\t#Load cusomer data for later use\n\t\n logger.info(\"Loading Customer data...\")\n customer_file_path = os.path.join(dataset_path, 'tpo_customer.csv')\n customer_raw_RDD = self.sc.textFile(customer_file_path)\n customer_raw_data_header = customer_raw_RDD.take(1)[0]\n self.customer_RDD = customer_raw_RDD.filter(lambda line: line!=customer_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: ((tokens[0]),(tokens[1]),(tokens[2]),(tokens[3]),(tokens[4]),(tokens[5]),(tokens[6]),(tokens[7]))).cache()\n\tlogger.info(\"Loading Customer data success...\")\n\t#CUSTOMCUSTOMER_NAME,CUSTOMER_ADDRESS1,CUSTOMER_ADDRESS2,CUSTOMER_CITY,CUSTOMER_STATE,CUSTOMER_COUNTRY,CUSTOMER_ZIPCODE,CREATED_BY,CREATION_DATE,LAST_UPDATED_BY,LAST_UPDATE_DATE\n \n\n\n\t\n\t#Load turbine data for later use\t\n logger.info(\"Loading Turbine data...\")\n turbine_file_path = os.path.join(dataset_path, 'test_tpo_unit_config.csv')\n turbine_raw_RDD = self.sc.textFile(turbine_file_path)\n turbine_raw_data_header = turbine_raw_RDD.take(1)[0]\n self.turbine_RDD = turbine_raw_RDD.filter(lambda line: line!=turbine_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: ((tokens[0]),(tokens[1]),(tokens[2]),(tokens[3]),(tokens[5]),(tokens[34]),(tokens[51]),(tokens[35]))).cache()\n\tlogger.info(\"Loading Turbine data success...\")\n \n\t\n\t\n\t\n\t#Load site data for later use\t\n logger.info(\"Loading Site data...\")\n site_file_path = os.path.join(dataset_path, 'tpo_site.csv')\n site_raw_RDD = self.sc.textFile(site_file_path)\n site_raw_data_header = site_raw_RDD.take(1)[0]\n self.site_RDD = site_raw_RDD.filter(lambda line: line!=site_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: ((tokens[0]),(tokens[1]),(tokens[2]),(tokens[3]),(tokens[4]),(tokens[5]),(tokens[6]),(tokens[7]),(tokens[16]))).cache()\n\tlogger.info(\"Loading Site data success...\")\n\t\n\n\n\n\t# Load ratings data for later use\n logger.info(\"Loading Ratings data...\")\n ratings_file_path = os.path.join(dataset_path, 'ratings.csv')\n ratings_raw_RDD = self.sc.textFile(ratings_file_path)\n ratings_raw_data_header = ratings_raw_RDD.take(1)[0]\n self.ratings_RDD = ratings_raw_RDD.filter(lambda line: line!=ratings_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: (int(tokens[0]),int(tokens[1]),float(tokens[2]))).cache()\n # Load movies data for later use\n logger.info(\"Loading Movies data...\")\n movies_file_path = os.path.join(dataset_path, 'movies.csv')\n movies_raw_RDD = self.sc.textFile(movies_file_path)\n movies_raw_data_header = movies_raw_RDD.take(1)[0]\n self.movies_RDD = movies_raw_RDD.filter(lambda line: line!=movies_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: (int(tokens[0]),tokens[1],tokens[2])).cache()\n self.movies_titles_RDD = self.movies_RDD.map(lambda x: (int(x[0]),x[1])).cache()\n # Pre-calculate movies ratings counts\n self.__count_and_average_ratings()\n\n # Train the model\n self.rank = 8\n self.seed = 5L\n self.iterations = 10\n self.regularization_parameter = 0.1\n self.__train_model()", "def features_customers(df_customers):\n for i in PREMIER_VALS:\n k = 'premier_' + str(i)\n df_customers[k] = np.where(df_customers['premier'] == i, 1, 0)\n\n df_customers['age'] = datetime.now().date().year - df_customers['yearOfBirth']\n df_customers['male'] = np.where(df_customers['gender'] == 'M', 1, 0)\n df_customers['female'] = np.where(df_customers['gender'] == 'F', 1, 0)\n df_customers['days_in_asos'] = (datetime.now().date() - df_customers['dateCreated']).dt.days\n\n logger.info(\"Features from the customers table: {shape} {dtypes}\"\n .format(shape=df_customers.shape, dtypes=df_customers.dtypes))\n return df_customers", "def make_submission_file(w, unused_features, filename=\"prediction.csv\"):\n\n # load test datasets\n print_banner(\"7. Read test dataset from higgs-data/test.csv\") \n test_y, test_x, ind = load_csv_data('higgs-data/test.csv')\n\n # Construct Matrix Output with values of one\n y_pred = np.ones(len(test_y))\n\n # Split test dataset based\n print_banner(\"8. Split the test dataset into 8 subsets\") \n test_sets_x, _, indices = create_subsets(test_x, test_y)\n\n # Remove features of test datasets based on PRI_JET_NUM and DER_MASS_MMC\n print_banner(\"9. Remove features in each test subset based on PRI_JET_NUM and DER_MASS_MMC\")\n test_sets_x = remove_features(test_sets_x, unused_features) \n\n # Iterate through the test subsets with their models accordingly\n print_banner(\"10. Predict each test subset using their corresponding model\") \n for x, w, index in zip(test_sets_x, w, indices):\n\n # Perform z-score standardization and expand matrix features with logarithmic & polynomial & cross_term & square root basis function\n stand_x = generate_features(x, 2, True, with_log=True, with_sqrt=True, cross_terms=True)\n\n # Get the prediction\n y_pred[index] = predict_labels(w, stand_x)\n\n print_banner(\" Predicting subset: DONE\") \n \n # Creating submission file\n print_banner(\"11. Making final submission file with csv format\") \n create_csv_submission(ind, y_pred, filename)", "def run(self, dataset_path):\n features = self._generate_features(self._feature_extractors)\n features.to_csv(dataset_path)", "def pre_processing_(data_df , serialized_objects):\n max_recency_acc_dig = serialized_objects['max_recency_acc_dig'] # These values are taken from trained model values\n max_recency_dig_2yr = serialized_objects['max_recency_dig_2yr'] # These values are taken from trained model values\n max_acc_recency_mf = serialized_objects['max_acc_recency_mf'] #These are values imported in training dataset. Same values needs to be used to impute missing values in unseen data\n\n data_df = data_df.na.fill({\n 'recency_acc_dig' : max_recency_acc_dig, # Filling missing values\n 'recency_dig_2yr' : max_recency_dig_2yr,\n 'acc_recency_mf' : max_acc_recency_mf\n })\n\n freq_acc_upg_2yrs_split = [-float('inf'), 0, 1, 2, float('inf')]\n bucketizer_freq_acc_upg_2yrs = Bucketizer(splits=freq_acc_upg_2yrs_split, inputCol='freq_acc_upg_acc_2yrs', outputCol='freq_acc_upg_acc_2yrs_bkt')\n data_df = bucketizer_freq_acc_upg_2yrs.setHandleInvalid('keep').transform(data_df) # Binning the freq_acc_upg_acc_2yrs column\n\n tot_purchase_split = [-float('inf'), 0, 1, 2, 3, float('inf')]\n bucketizer_tot_purchase = Bucketizer(splits=tot_purchase_split, inputCol='tot_accsry_purchse', outputCol='tot_accsry_purchse_bkt')\n data_df = bucketizer_tot_purchase.setHandleInvalid('keep').transform(data_df) # Binning the tot_accsry_purchse column\n\n del_cols_new = ['freq_acc_upg_acc_2yrs', 'tot_accsry_purchse']\n data_df = data_df.drop(*del_cols_new) # Dropping the older continuous columns\n return data_df", "def generateMatrix(self):\n if self.tokenWeights and self.extraFeatures:\n nFeatures = self.wordId + self.wordId2 + len(self.EXTRA_WEIGHTS_LABELS)\n logging.info('Exporting TOKEN WEIGHTS AND EXTRA FEATURES %dx%d'%(self.docId, nFeatures))\n mtrx = np.zeros((self.docId, nFeatures))\n \n for docId, doc in self.documents.iteritems():\n # iterate through 1st sentence\n for wId, val in doc['s1'].iteritems():\n mtrx[docId, wId] = val\n # then iterate thru 2nd sentence, store on 2ND PARTITION\n for wId, val in doc['s2'].iteritems():\n mtrx[docId, self.wordId + wId] = val\n # finally extra features values stored at the end of the vector\n for label, val in doc['extraFeatures'].iteritems():\n mtrx[docId, self.wordId + self.wordId2 + self.EXTRA_WEIGHTS_LABELS.index(label)] = val\n\n elif self.tokenWeights and not self.extraFeatures:\n nFeatures = self.wordId + self.wordId2\n logging.info('Exporting TOKEN WEIGHTS %dx%d'%(self.docId, nFeatures))\n mtrx = np.zeros((self.docId, nFeatures))\n \n for docId, doc in self.documents.iteritems():\n # iterate through 1st sentence\n for wId, val in doc['s1'].iteritems():\n mtrx[docId, wId] = val\n # then iterate thru 2nd sentence, store on 2ND PARTITION\n for wId, val in doc['s2'].iteritems():\n mtrx[docId, self.wordId + wId] = val\n else:\n nFeatures = len(self.EXTRA_WEIGHTS_LABELS)\n logging.info('Exporting EXTRA FEATURES %dx%d'%(self.docId, nFeatures))\n mtrx = np.zeros((self.docId, nFeatures))\n \n for docId, doc in self.documents.iteritems():\n for label, val in doc['extraFeatures'].iteritems():\n mtrx[docId, self.EXTRA_WEIGHTS_LABELS.index(label)] = val\n logging.info('Matrix generated')\n logging.info(mtrx.shape)\n return mtrx", "def make_dataset(interim_file_path, processed_file_path, weights, version):\n qws_wsrf, qws_complete_numpy_array = src.dataset.compute_wsrf.compute_wsrf(interim_file_path, weights)\n # qws_complete_numpy_array_temp = np.append(qws_complete_numpy_array, qws_wsrf[:, np.newaxis], axis=1)\n qws_wsrf_level = np.array([])\n for score in qws_wsrf:\n if(score > 0.78):\n level = 1\n elif(score > 0.7):\n level = 2\n elif(score > 0.65):\n level = 3\n else:\n level = 4\n score = np.append(score, level)\n qws_wsrf_level = np.append(qws_wsrf_level, score)\n qws_wsrf_level = qws_wsrf_level.reshape(qws_wsrf.shape[0], 2)\n if(version == 1):\n qws_complete_numpy_array[:, 9:11] = qws_wsrf_level\n elif(version == 2):\n qws_complete_numpy_array = np.hstack((qws_complete_numpy_array, np.zeros((qws_wsrf.shape[0], 2))))\n qws_complete_numpy_array[:, 11:13] = qws_complete_numpy_array[:, 9:11]\n qws_complete_numpy_array[:, 9:11] = qws_wsrf_level\n else:\n print(\"Version has to be either 1 or 2\")\n\n qws_complete_dataframe_new = pd.DataFrame(qws_complete_numpy_array)\n qws_complete_dataframe_new = qws_complete_dataframe_new.astype({10: int})\n qws_complete_dataframe_new.to_csv(processed_file_path, header=False, index=False)", "def create_weka_mfcc_13():\n global ARGS\n\n ## ten thu muc can trich chon vector dac trung (RLS, LMS, NLMS, Kalman, Non)\n name = '';\n fout = open('weka/MFCC78_TUNNING_{}_dataset.arff'.format(name), 'w')\n fout.write('@RELATION {}_dataset\\n\\n'.format(name))\n\n fout.write('@ATTRIBUTE MEAN_MFCC1\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC2\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC3\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC4\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC5\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC6\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC7\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC8\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC9\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC10\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC11\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC12\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCC13\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD1\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD2\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD3\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD4\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD5\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD6\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD7\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD8\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD9\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD10\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD11\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD12\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCD13\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD1\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD2\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD3\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD4\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD5\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD6\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD7\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD8\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD9\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD10\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD11\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD12\tREAL\\n')\n fout.write('@ATTRIBUTE MEAN_MFCCDD13\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC1\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC2\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC3\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC4\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC5\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC6\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC7\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC8\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC9\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC10\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC11\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC12\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCC13\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD1\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD2\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD3\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD4\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD5\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD6\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD7\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD8\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD9\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD10\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD11\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD12\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCD13\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD1\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD2\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD3\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD4\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD5\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD6\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD7\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD8\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD9\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD10\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD11\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD12\tREAL\\n')\n fout.write('@ATTRIBUTE STD_MFCCDD13\tREAL\\n')\n fout.write('@ATTRIBUTE class \t{'+ARGS.labels+'}\\n\\n')\n \n fout.write('@DATA\\n')\n\n ## cua so\n windowing = Windowing(type='hamming',\n size=1104,\n zeroPhase=False)\n \n ## quang pho\n spectrum = Spectrum(size=1104)\n\n ##khoi tao MFCC\n mfcc = MFCC(highFrequencyBound=4000, ## gioi han tren cua tan so\n inputSize=201, \t\t\t ## kich thuoc pho dau vao\n lowFrequencyBound=0,\t ## gioi han duoi cua tan so\n numberBands=40,\t\t\t ## so luong cac dai Mels trong bo loc\n numberCoefficients=13, ## so luong dau ra cac he so Mel\n sampleRate=16000)\t\t ## tan so lay mau\n\n for label in ARGS.labels.split(','): ## duyet cac thu muc giong voi ten nhan\n\n ## dia chi thu muc\n dir = os.path.join(ARGS.dir, label)\n\n logging.info('Access folder <{}>'.format(dir))\n\n for file in sorted(os.listdir(dir)):\n\n \t## duyet cac file .wav\n if file.endswith('.wav'):\n logging.info('Process <{}>'.format(file))\n path = os.path.join(dir, file)\n \n ## doc file am thanh\n loader = MonoLoader(filename=path, sampleRate=ARGS.sampleRate)\n audio = loader()\n cnt = 0\n\n for window in FrameGenerator(audio, \n frameSize=ARGS.window_length*ARGS.sampleRate/1000, \n hopSize=ARGS.window_stride*ARGS.sampleRate/1000, \n startFromZero=True):\n mfccs = []\n for frame in FrameGenerator(window, \n frameSize=ARGS.frame_length*ARGS.sampleRate/1000, \n hopSize=ARGS.frame_stride*ARGS.sampleRate/1000, \n startFromZero=True):\n s = spectrum(windowing(frame))\n\n _, m = mfcc(s)\n\n m_delta = librosa.feature.delta(m, order=1) ## dao ham bac 1\n m_delta_delta = librosa.feature.delta(m, order=2) ## dao ham bac 2\n\n m_all = np.concatenate((m, m_delta, m_delta_delta), axis=0) ## them vao chuoi\n mfccs.append(m_all)\n mfccs = np.array(mfccs)\n mfccs_mean = np.mean(mfccs, axis=0)\n mfccs_std = np.std(mfccs, axis=0)\n feat = np.concatenate((mfccs_mean, mfccs_std), axis=0).tolist()\n str_feat = [str(x) for x in feat]\n line = ','.join(str_feat)+','+label\n fout.write(line+'\\n')\n cnt = cnt+1\n logging.info('{} samples'.format(cnt))", "def process_dataset(self):\n\n logger.info('\\n')\n logger.info('=' * 40)\n logger.info('=\\t DeepRank Data Set')\n logger.info('=')\n logger.info('=\\t Training data')\n for f in self.train_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.valid_database:\n logger.info('=\\t Validation data')\n for f in self.valid_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.test_database:\n logger.info('=\\t Test data')\n for f in self.test_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n logger.info('=' * 40 + '\\n')\n sys.stdout.flush()\n\n # check if the files are ok\n self.check_hdf5_files(self.train_database)\n\n if self.valid_database:\n self.valid_database = self.check_hdf5_files(\n self.valid_database)\n\n if self.test_database:\n self.test_database = self.check_hdf5_files(\n self.test_database)\n\n # create the indexing system\n # alows to associate each mol to an index\n # and get fname and mol name from the index\n self.create_index_molecules()\n\n # get the actual feature name\n if self.mapfly:\n self.get_raw_feature_name()\n else:\n self.get_mapped_feature_name()\n\n # get the pairing\n self.get_pairing_feature()\n\n # get grid shape\n self.get_grid_shape()\n\n # get the input shape\n self.get_input_shape()\n\n # get renormalization factor\n if self.normalize_features or self.normalize_targets or self.clip_features:\n if self.mapfly:\n self.compute_norm()\n else:\n self.get_norm()\n\n logger.info('\\n')\n logger.info(\" Data Set Info:\")\n logger.info(\n f' Augmentation : {self.use_rotation} rotations')\n logger.info(\n f' Training set : {self.ntrain} conformations')\n logger.info(\n f' Validation set : {self.nvalid} conformations')\n logger.info(\n f' Test set : {self.ntest} conformations')\n logger.info(f' Number of channels : {self.input_shape[0]}')\n logger.info(f' Grid Size : {self.data_shape[1]}, '\n f'{self.data_shape[2]}, {self.data_shape[3]}')\n sys.stdout.flush()", "def CCF_toExcel(self, data_set, ccf_inputs):\n file_name = self.file_path(target_filename=\"LEICode_CCF_ModelID_EndOfObservationPeriod_versionNumber.xlsx\")\n oxl = openpyxl.load_workbook(file_name)\n\n # Information missing from test results:\n start_date\t = datetime.date(2007, 1, 1)\n end_date\t = datetime.date(2015, 1, 1)\n nb_customer = len(data_set.id.unique())\n grade_nb = data_set.Bin_CCF.unique()\n grade_name = []\n grade_counts = []\n avCCFE_perGrade = []\n avCCFR_perGrade = []\n minCCFR_perGrade = []\n maxCCFR_perGrade = []\n q5CCFR_perGrade = []\n q10CCFR_perGrade = []\n q25CCFR_perGrade = []\n q50CCFR_perGrade = []\n q75CCFR_perGrade = []\n q90CCFR_perGrade = []\n q95CCFR_perGrade = []\n for g in range(1, len(grade_nb) + 1):\n grade_name.append( self.grade_mapping(grade_num = g) )\n grade_counts.append( data_set[data_set.Default_Binary == 1][\"Bin_CCF\"].value_counts()[g] )\n avCCFE_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF.mean()[g] )\n avCCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.mean()[g] )\n minCCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.min()[g])\n maxCCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.max()[g])\n q5CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.05)[g])\n q10CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.10)[g])\n q25CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.25)[g])\n q50CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.50)[g])\n q75CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.75)[g])\n q90CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.90)[g])\n q95CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.95)[g])\n\n bcktesting_ccf_ptf = [\"N/A\", #Name of facility grade/pool or segment\n len(data_set.id.unique()), # Number of facilities (R)\n data_set.CCF.mean(), # Average estimated CCF (CCF^E)\n data_set.CCF_realised.mean(), # Average realised CCF (CCF^R)\n 0.0, # Floor used (if applicable)\n 0.0, # Number of CCF realisations floored\n data_set.CCF_realised.min(), # Minimum CCF^R\n data_set.CCF_realised.quantile(0.05), # Quantiles\n data_set.CCF_realised.quantile(0.10), #\n data_set.CCF_realised.quantile(0.25), #\n data_set.CCF_realised.quantile(0.50), #\n data_set.CCF_realised.quantile(0.75), #\n data_set.CCF_realised.quantile(0.90), #\n data_set.CCF_realised.quantile(0.95), #\n data_set.CCF_realised.max(), # Maximum CCF^R\n 0 # Exposure-weighted average of CCF^R (to be created)\n ]\n\n # Predictive ability\n ## CCF back-testing using a t-test (§ 2.9.3.1) - sheet 3.1\n wbk31 = oxl.get_sheet_by_name(\"3.1\")\n # Grade Lvl\n self.array_toExcel(wb=wbk31, stat_array = grade_name, row_pos=10, col_pos=4, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array = grade_counts, row_pos=10, col_pos=5, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array = avCCFE_perGrade, row_pos=10, col_pos=6, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array = avCCFR_perGrade, row_pos=10, col_pos=7, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array=[0] * 7, row_pos=10, col_pos=8, row_wise=True) # Floor used (if applicable)\n self.array_toExcel(wb=wbk31, stat_array=[0] * 7, row_pos=10, col_pos=9, row_wise=True) # Number of CCF realisations floored\n self.array_toExcel(wb=wbk31, stat_array= minCCFR_perGrade, row_pos=10, col_pos=10, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= maxCCFR_perGrade, row_pos=10, col_pos=18, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array=[0] * 7, row_pos=10, col_pos=19, row_wise=True) # Exposure-weighted average of CCF^R (to be created)\n self.array_toExcel(wb=wbk31, stat_array= q5CCFR_perGrade, row_pos=10, col_pos=11, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q10CCFR_perGrade, row_pos=10, col_pos=12, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q25CCFR_perGrade, row_pos=10, col_pos=13, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q50CCFR_perGrade, row_pos=10, col_pos=14, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q75CCFR_perGrade, row_pos=10, col_pos=15, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q90CCFR_perGrade, row_pos=10, col_pos=16, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q95CCFR_perGrade, row_pos=10, col_pos=17, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= [0] * 7, row_pos=10, col_pos=23, row_wise=True) # Number of facilities excluded due to outlier handling (set to zero)\n\n # Ptf Lvl\n self.df_toExcel(wb=wbk31, df = pd.DataFrame(ccf_inputs[\"predictive_ability\"][1]).T, row_pos=10, col_pos=20)\n self.array_toExcel(wb=wbk31, stat_array=ccf_inputs[\"predictive_ability\"][0], row_pos=8, col_pos=20, row_wise=False)\n self.array_toExcel(wb=wbk31, stat_array=bcktesting_ccf_ptf, row_pos=8, col_pos=4, row_wise=False)\n wbk31.cell(row=8, column=23).value = 0 # Number of facilities excluded due to outlier handling\n\n # Discriminatory Power\n ## Current gAUC vs gAUC at initial validation/development (§ 2.9.3.1) - sheet 4.0\n wbk40 = oxl.get_sheet_by_name(\"4.0\")\n self.array_toExcel(wb=wbk40, stat_array=ccf_inputs[\"AUC\"][:-1], row_pos=7, col_pos=4, row_wise=False)\n wbk40.cell(row= 7, column= 10).value = start_date # start date\n wbk40.cell(row=7, column=11).value = end_date # end date\n wbk40.cell(row=7, column=12).value = nb_customer # nb of customers\n wbk40.cell(row=7, column=13).value = ccf_inputs[\"AUC\"][-1] # Variance (gAUC_init)\n\n # Save file\n oxl.save(file_name)\n oxl.close()\n return \"CCF results saved to Excel.\"", "def submission(self):\n\n\t\tprobas = self.y_pred / self.count_models\n\n\t\tsub = pd.DataFrame({'id':self.X_test.PostId, 'OpenStatus':probas}).set_index('id')\n\t\tsub.to_csv('sub.csv')", "def write_to_csv(self, name_suffix = ''):\n f_path = os.path.join(self.root_dir, 'res' + name_suffix + '.csv')\n field_names = [] # the first field in CSV is 'obj_val'\n\n # put the keys in the cost, prim_var_change, dual_var_change and fea_conditions as field names if any\n for key in self.cost.keys():\n field_names.append(key)\n for key in self.cost_change.keys():\n field_names.append(key)\n for key in self.prim_var_change.keys():\n field_names.append(key)\n for key in self.dual_var_change.keys():\n field_names.append(key)\n for key in self.fea_conditions.keys():\n field_names.append(key)\n\n\tprint f_path\n\n with open(f_path, mode = 'wb') as csv_file: # open the file, if not exist, create it\n writer = csv.DictWriter(csv_file, fieldnames = field_names) # create a writer which maps the dictionaries onto output rows in CSV\n writer.writeheader() # write the field names to the header\n temp_dict = {} # create a temporary dict used to output rows\n row_max = self.get_iter_num() # get the max iters which indicates the number of rows in CSV\n print ('number of rows: ' + str(row_max))\n #print (field_names)\n for row in range(row_max + 1):\n temp_dict.clear() # clear all items\n start_idx = 0\n for i in range(len(self.cost)):\n field = field_names[start_idx + i]\n\t\t if row > len(self.cost[field]) - 1:\n\t\t\ttemp_dict[field] = ''\n\t\t else: temp_dict[field] = self.get_cost_val(field, row)\n\n start_idx = start_idx + len(self.cost) # the start pos of fields in field_names for prim_var_change\n for i in range(len(self.cost_change)): # for each cost_change\n field = field_names[start_idx + i]\n if row == 0: # for row 0 (iter 0), we will set '/' to the change of primal variables\n temp_dict[field] = '/'\n elif row > len(self.cost_change[field]) - 1:\n\t\t\t temp_dict[field] = ''\n\t\t else:\n temp_dict[field] = self.get_cost_change_value(field, row - 1)\n\n\n start_idx = start_idx + len(self.cost_change)\n for i in range(len(self.prim_var_change)): # for each prim_var_change\n field = field_names[start_idx + i]\n if row == 0: # for row 0 (iter 0), we will set '/' to the change of primal variables\n temp_dict[field] = '/'\n\t\t elif row > len(self.prim_var_change[field]) - 1:\n\t\t\ttemp_dict[field] = ''\n else:\n temp_dict[field] = self.get_prim_change_value(field, row - 1)\n\n start_idx = start_idx + len(self.prim_var_change) # go to the start pos of fields in field_names for dual_var_change\n for i in range(len(self.dual_var_change)): # for each dual_var_change\n field = field_names[start_idx + i]\n if row == 0: # for row 0 (iter 0), we will set '/' to the change of dual variables\n temp_dict[field] = '/'\n elif row > len(self.dual_var_change[field]) - 1:\n\t\t\ttemp_dict[field] = '' \n\t\t else:\n temp_dict[field] = self.get_dual_change_value(field, row - 1)\n\n start_idx = start_idx + len(self.dual_var_change) # go the the start pos of fields in field_names for fea_conditions\n for i in range(len(self.fea_conditions)): # for each fea_condition\n field = field_names[start_idx + i]\n\t\t if row > len(self.fea_conditions[field]) - 1:\n\t\t\ttemp_dict[field] = ''\n else: temp_dict[field] = self.get_fea_condition_value(field, row)\n\n writer.writerow(temp_dict)\n\n # we also save the value of primal values if not saved\n if not self.pdv_to_csv:\n self.save_last_prims()", "def produce_init(filename):\n training_dataset = pd.read_csv(f'../Modified Data/{filename}')\n test_dataset = pd.read_csv(f'../Raw Data/test.csv')\n features = list(training_dataset.columns)\n features.remove('SalePrice')\n predict_feature = ['SalePrice']\n\n # Produce Test Data\n test_X = test_dataset.loc[:, features]\n ids_test = test_dataset.loc[:, 'Id']\n\n for column in features:\n if str(training_dataset.loc[:, column].dtype) == 'object':\n # Initialize encoder\n labelencoder = LabelEncoder()\n # Encode Train Data\n training_dataset.loc[:, column] = training_dataset.loc[:, column].fillna('Missing')\n training_dataset.loc[:, column] = pd.Series(labelencoder.fit_transform(training_dataset.loc[:, column]))\n # Encode Test Data\n test_X.loc[:, column] = test_X.loc[:, column].fillna('Missing')\n test_X.loc[:, column] = pd.Series(labelencoder.fit_transform(test_X.loc[:, column]))\n else:\n # Fix missing values for train data\n training_dataset.loc[:, column] = training_dataset.loc[:, column].fillna(int(training_dataset.loc[:, column].mean()))\n # Fix missing values for test data\n test_X.loc[:, column] = test_X.loc[:, column].fillna(int(test_X.loc[:, column].mean()))\n\n return training_dataset, test_X, ids_test", "def create_features_customer(profile, transcript_training):\n\n # create avg/min/max amount features. Need to calculate amount features from transcript\n # because transcript_training only contains transactions for offer received and viewed.\n # such transactions do not have amount associated\n\n query = \"\"\"\n SELECT a.person, min(amount) as min_amount, max(amount) as max_amount, avg(amount) as avg_amount\n FROM transcript a\n JOIN transcript_quantile b\n ON a.person = b.person \n WHERE a.time <= b.received_time\n GROUP BY a.person\n \"\"\"\n\n profile_amount = u.read_dataframe_from_sql(query).set_index('person')\n\n # create avg/min/max amount duration_view\n profile_duration = create_features_using_groupby(transcript_training\\\n \t, 'profile', 'duration_view')\n\n # create view rate (average of label)\n profile_view_rate = create_features_using_groupby(transcript_training, 'profile', 'label'\\\n \t, minimum=False, maximum=False)\n profile_view_rate.columns=['view_rate_profile']\n\n # create trx rate (count of transactions per person/(max received time - min received time))\n profile_trx_rate = (transcript_training.groupby('person').size()*100\\\n /(transcript_training.groupby('person')['received_time'].max() \\\n - transcript_training.groupby('person')['received_time'].min())).reset_index()\n profile_trx_rate.columns = ['person', 'avg_trx_cnt']\n # set trx rate = 1 if max received time == min received time\n profile_trx_rate.loc[profile_trx_rate['avg_trx_cnt']==np.inf, 'avg_trx_cnt'] = 1\n profile_trx_rate = profile_trx_rate.set_index('person')\n\n profile_feat = profile_amount.join(profile_duration)\\\n .join(profile_view_rate).join(profile_trx_rate)\n\n assert pd.merge(profile, profile_feat, how='left', left_index=True, right_index=True).shape[0] == profile.shape[0]\\\n , \"rows do not match with original data (profile)\"\n\n profile = pd.merge(profile, profile_feat, how='left', left_index=True, right_index=True)\n\n return profile", "def create_feables(matches, fifa_stats, bookkeepers, verbose=True):\n\n if verbose:\n print(\"Generating match features...\")\n start = time()\n\n # Get match features for all matches (apply to each row)\n match_stats = matches.apply(lambda match: get_match_features(match, matches), axis=1)\n\n # Create dummies for league ID feature\n # deleting this as i am only looking at EPL\n # dummies = pd.get_dummies(match_stats['league_id']).rename(columns=lambda x: 'League_' + str(x))\n # match_stats = pd.concat([match_stats, dummies], axis=1)\n match_stats.drop(['league_id'], inplace=True, axis=1)\n\n end = time()\n if verbose:\n print(\"Match features generated in {:.1f} minutes\".format((end - start) / 60))\n\n if verbose:\n print(\"Generating match labels...\")\n start = time()\n\n # Create match labels\n labels = matches.apply(get_match_label, axis=1)\n end = time()\n if verbose:\n print(\"Match labels generated in {:.1f} minutes\".format((end - start) / 60))\n\n # if verbose == True:\n # print(\"Generating bookkeeper data...\")\n # start = time()\n # Get bookkeeper quotas for all matches\n # bk_data = get_bookkeeper_data(matches, bookkeepers, horizontal=True)\n # bk_data.loc[:, 'match_api_id'] = matches.loc[:, 'match_api_id']\n # end = time()\n # if verbose == True:\n # print(\"Bookkeeper data generated in {:.1f} minutes\".format((end - start) / 60))\n\n # Merges features and labels into one frame\n features = pd.merge(match_stats, fifa_stats, on='match_api_id', how='left')\n # features = pd.merge(features, bk_data, on='match_api_id', how='left')\n # features = match_stats\n feables = pd.merge(features, labels, on='match_api_id', how='left')\n\n # Drop NA values\n feables.dropna(inplace=True)\n\n # Return preprocessed data\n return feables", "def build_enru_custom_ft(self):\n eval_data_file = self.data_dir + '/' + enru_newscomm\n eval_data = tf.data.experimental.CsvDataset(\n [eval_data_file],\n record_defaults=[tf.string, tf.string],\n compression_type='GZIP',\n field_delim='\\t',\n use_quote_delim=False)\n train_data = eval_data.skip(3000).take(6000)\n eval_data = eval_data.take(3000)\n\n eval_data = eval_data.cache()\n train_data = train_data.cache()\n def to_features_dict(eng, rus):\n return {'inputs': eng, 'targets': rus}\n\n train_data = train_data.map(to_features_dict)\n eval_data = eval_data.map(to_features_dict)\n\n self.default_builder_obj = None\n\n return train_data, eval_data", "def create_matrix(ratings_df, jokes_df):\r\n \"\"\" note: empty entries are populated with zeros \"\"\"\r\n\r\n matrix_handler = matrix_object()\r\n\r\n num_joke_features = 5\r\n\r\n ''' add all joke features '''\r\n for row_idx in range(0, jokes_df.shape[0]):\r\n joke_idx = int(jokes_df.iloc[row_idx][\"Idx\"])\r\n isAggressive = jokes_df.iloc[row_idx][\"isAggressive\"]\r\n isIncongruence = jokes_df.iloc[row_idx][\"isIncongruence\"]\r\n generation = jokes_df.iloc[row_idx][\"Generation\"]\r\n isMillenial = (generation == \"Millenial\")\r\n isGenX = (generation == \"Gen X\")\r\n isGenZ = (generation == \"Gen Z\")\r\n\r\n if(int(isMillenial) == 1.0 and int(isGenX) == 1.0):\r\n raise Valueerror()\r\n\r\n matrix_handler.add_value(joke_idx - 1, 0, int(isAggressive))\r\n matrix_handler.add_value(joke_idx - 1, 1, int(isIncongruence))\r\n matrix_handler.add_value(joke_idx - 1, 2, int(isMillenial))\r\n matrix_handler.add_value(joke_idx - 1, 3, int(isGenX))\r\n matrix_handler.add_value(joke_idx - 1, 4, int(isGenZ))\r\n\r\n ''' add all ratings '''\r\n for row_idx in range(0, ratings_df.shape[0]):\r\n for joke_idx in range(1, 122):\r\n col_name = \"joke\" + str(joke_idx)\r\n matrix_handler.add_value(joke_idx - 1, row_idx + num_joke_features, ratings_df.iloc[row_idx][col_name])\r\n\r\n matrix = matrix_handler.compile_matrix()\r\n new_df = matrix_handler.to_df(matrix)\r\n\r\n return matrix, new_df", "def main():\n\n logger.info('Process initiated - Building dataset')\n\n if os.path.isfile(train_path) and os.path.isfile(test_path):\n logger.info('Loading pickled data')\n return pd.read_pickle(train_path), pd.read_pickle(test_path)\n\n logger.info('Reading COSMIC Cancer Gene Census')\n gene_census = cancer_gene_census()\n gene_census.extend(civic_cancer_genes())\n\n gene_census = set(gene_census)\n\n training_data = pd.DataFrame()\n testing_data = pd.DataFrame()\n\n for cancer_type in cancer_types:\n data_file_name = cancer_type + \".meth.by_mean.data.txt\"\n data_file_location = os.path.join(data_location, data_file_name)\n\n logger.info('Reading Methylation data for {}'.format(cancer_type))\n\n methyl_data = pd.read_csv(data_file_location, delimiter='\\t', skiprows=[1], index_col=0)\n\n logger.info(\n 'Number of Genes: {0} | Number of Patients: {1}'.format(methyl_data.shape[0], methyl_data.shape[1]))\n logger.info('Preprocessing Methylation data')\n\n methyl_data = genes_feature_selection(methyl_data, gene_census)\n\n logger.info('Number of Genes after processing: {0}\\n'.format(methyl_data.shape[0]))\n\n methyl_data = add_classification_label(methyl_data)\n methyl_data = methyl_data.transpose()\n\n normal_cases = methyl_data[methyl_data['Tumor'] == 0]\n logger.info(normal_cases.shape)\n train_normal_cases = normal_cases.sample(frac=0.7, random_state=200)\n logger.info(train_normal_cases.shape)\n test_normal_cases = normal_cases.drop(train_normal_cases.index)\n logger.info(train_normal_cases.shape)\n\n tumor_cases = methyl_data[methyl_data['Tumor'] != 0]\n logger.info(tumor_cases.shape)\n train_tumor_cases = tumor_cases.sample(frac=0.7, random_state=200)\n logger.info(train_tumor_cases.shape)\n\n test_tumor_cases = tumor_cases.drop(train_tumor_cases.index)\n logger.info(test_tumor_cases.shape)\n\n training_data = training_data.append(train_normal_cases)\n training_data = training_data.append(train_tumor_cases)\n\n testing_data = testing_data.append(test_normal_cases)\n testing_data = testing_data.append(test_tumor_cases)\n\n training_data = training_data.sample(frac=1)\n testing_data = testing_data.sample(frac=1)\n\n logger.info('Pickling training and testing data')\n training_data.to_pickle(train_path)\n testing_data.to_pickle(test_path)\n\n logger.info('Processing completed!')\n visualize_data(training_data)\n\n return training_data, testing_data", "def main():\n\n if os.path.exists(os.path.join(PROCESSED_PATH,\n 'all_posts_data.csv')):\n print(\"-- all_posts_data.csv found locally - delete interm files if rerun needed\")\n total_df = pd.read_csv(PROCESSED_PATH / 'all_posts_data.csv')\n else:\n training_post_filenames = glob.glob(os.path.join(DATA_DIR,\n 'raw',\n 'clpsych16-data',\n 'data',\n 'training',\n 'posts', '*.xml'))\n dev_post_filenames = glob.glob(os.path.join(DATA_DIR,\n 'raw',\n 'clpsych16-data',\n 'data',\n 'testing',\n 'posts', '*.xml'))\n\n new_posts2017 = glob.glob(os.path.join(DATA_DIR,\n 'raw',\n 'clpsych17-test',\n 'posts', '*.xml'))\n\n training_labels = os.path.join(DATA_DIR,\n 'raw',\n 'clpsych16-data',\n 'data',\n 'training',\n 'labels.tsv')\n dev_labels = os.path.join(DATA_DIR,\n 'raw',\n 'clpsych16-data',\n 'data',\n 'testing',\n 'labels.tsv')\n\n training_df = create_posts_df(training_post_filenames)\n dev_df = create_posts_df(dev_post_filenames)\n new_df = create_posts_df(new_posts2017)\n\n training_df['corpus_source'] = '2016train_2017train'\n dev_df['corpus_source'] = '2016test_2017train'\n new_df['corpus_source'] = '2017test'\n\n training_df = merge_post_labels(training_df, training_labels)\n dev_df = merge_post_labels(dev_df, dev_labels)\n\n training_df = merge_author_ranks(training_df)\n dev_df = merge_author_ranks(dev_df)\n new_df = merge_author_ranks(new_df)\n\n total_df = pd.concat([training_df, dev_df, new_df])\n\n test_labels = os.path.join(DATA_DIR,\n 'raw',\n 'clpsych17-test',\n 'test_ids.tsv')\n total_df.reset_index(inplace=True)\n total_df = merge_test_ids(total_df, test_labels)\n label_file = os.path.join(DATA_DIR,\n 'raw',\n 'clpsych17-test-labels.tsv')\n merge_ground_truth(total_df, label_file)\n output_path = PROCESSED_PATH / 'all_posts_data.csv'\n \n\n # clean body of text\n total_df['cleaned_body'], total_df['contained_quote'] = zip(*total_df['body'].apply(process_body))\n total_df['images'] = total_df['body'].apply(process_images)\n\n print('--Writing data to {}--'.format(output_path))\n total_df.to_csv(output_path, index=False)\n\n sentences_df = total_df.loc[:, ['post_id', 'cleaned_body', 'label', 'predict_me']]\n # the following will split posts into sentences and write out to a separate csv\n split_to_sentences(sentences_df)", "def features_orders(df_customers, df_receipts):\n df_customers.sort_values(by=['customerId2'], ascending=[True], inplace=True)\n # total amount of all the orders of a cusrtomer\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')['price'].sum(), 'sum_orders')\n # the min amount paid in one receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)[\n 'price'].sum().groupby('customerId2').min()['price'], 'min_order')\n # the mean amount paid per receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').mean()['price'], 'mean_order')\n # the max amount paid per receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').max()['price'], 'max_order')\n # the number of orders\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').count()['price'], 'count_orders')\n\n # the total amount of items bought by a user\n df_customers = _add_column(df_customers,\n df_receipts.groupby('customerId2')['itemQty'].sum(), 'sum_itemQty')\n # the min amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').min()['itemQty'], 'min_itemQty')\n # the mean amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').mean()['itemQty'], 'mean_itemQty')\n # the max amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').max()['itemQty'], 'max_itemQty')\n # from which dividion type a user buys\n for i in DIVISIONID_VALS:\n k = 'divisionId_' + str(i)\n df_receipts[k] = np.where(df_receipts['divisionId'] == i, 1, 0)\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')[k].sum(), k)\n # which source type a user uses to pay\n for i in SOURCEID_VALS:\n k = 'sourceId_' + str(i)\n df_receipts[k] = np.where(df_receipts['sourceId'] == i, 1, 0)\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')[k].sum(), k)\n\n logger.info(\"Features from the returns table: {shape} {dtypes}\"\n .format(shape=df_customers.shape, dtypes=df_customers.dtypes))\n return df_customers", "def build_wmt_ft(self):\n train_files = [self.data_dir + '/' + wmt_train]\n eval_files = [self.data_dir + '/' + wmt_test]\n\n train_data = tf.data.experimental.CsvDataset(\n train_files,\n record_defaults=[tf.string, tf.string],\n field_delim='\\t',\n use_quote_delim=False)\n eval_data = tf.data.experimental.CsvDataset(\n eval_files,\n record_defaults=[tf.string, tf.string],\n field_delim='\\t',\n use_quote_delim=False)\n\n eval_data = eval_data.cache()\n train_data = train_data.cache() # only read once\n\n def to_features_dict(eng, rus):\n return {'inputs': eng, 'targets': rus}\n\n train_data = train_data.map(to_features_dict)\n eval_data = eval_data.map(to_features_dict)\n\n self.default_builder_obj = None\n return train_data, eval_data", "def prepare_class_data_for_prediction(dataframe, model_dict, user_keyword, task_name):\r\n test_tweets = dataframe.iloc[:, [0, 1, 2]]\r\n\r\n parent_dir = Path.cwd().parent\r\n pickle_dir = parent_dir.joinpath('default_results', 'pickle_files_feat_eng')\r\n feature_X_user = pd.DataFrame\r\n emo_X_test_dict = {}\r\n\r\n\r\n for emotion, model_prop in model_dict.items():\r\n preprocessed_X_user = Preprocessor.perform(test_tweets, emotion, user_keyword, task_name)\r\n feature_X_user = Feature_Transformer.perform(preprocessed_X_user, emotion, user_keyword, task_name)\r\n vectorizer = Dictionaries.vectorizer_dict[model_prop[2]]\r\n\r\n #Fit transform the vectorizer with the corresponding preprocessed training data\r\n if os.path.exists(pickle_dir.joinpath(emotion + '_c_train_preprocess_df.pkl')):\r\n preprocess_train_df = pd.read_pickle(pickle_dir.joinpath(emotion + '_c_train_preprocess_df.pkl'))\r\n train_vect = vectorizer.fit_transform(preprocess_train_df['preprocessed_text'].values)\r\n print(emotion + 'TRAIN', train_vect.shape)\r\n train_vect_df = pd.DataFrame(train_vect.toarray(), columns=vectorizer.get_feature_names())\r\n else:\r\n #If the file doesnt exist, exit the program with instructions\r\n print('\\nRequired files does not exist.\\n\\n Please, train the models first by running > Modelling.py')\r\n sys.exit(1)\r\n\r\n # Use the same vectorizer to transform test data and then perform the feature union\r\n vector_X = vectorizer.transform(preprocessed_X_user['preprocessed_text'].values)\r\n test_vect_df = pd.DataFrame(vector_X.toarray(), columns=vectorizer.get_feature_names())\r\n X_test = pd.concat([test_vect_df, feature_X_user], axis=1)\r\n emo_X_test_dict[emotion] = X_test\r\n print(emotion + 'TEST', test_vect_df.shape, X_test.shape)\r\n return emo_X_test_dict", "def get_features_and_target(self, trades_features: pd.DataFrame, trades_target: pd.DataFrame) -> pd.DataFrame:\n \n sf_groups = trades_features.drop_duplicates(subset=['sf_account_id', 'trade_date', 'sku']).groupby('sf_account_id')\n\n # calculate features\n feature_dfs = []\n if 'product_name' in self.feature_categories:\n feature_dfs += [sf_groups.product_name.value_counts().unstack().notnull()]\n if 'product_category' in self.feature_categories:\n feature_dfs += [sf_groups.product_category.value_counts().unstack().notnull()]\n if 'reporting_channel' in self.feature_categories:\n feature_dfs += [sf_groups.sub_reporting_channel.value_counts().unstack().notnull()]\n if 'recency' in self.feature_categories:\n feature_dfs += [(trades_features.trade_date_dt.max()-sf_groups.trade_date_dt.max()).dt.days.to_frame().rename(columns={'trade_date_dt':'recency'})]\n if 'frequency' in self.feature_categories:\n feature_dfs += [sf_groups.product_name.count().to_frame().rename(columns={'product_name':'frequency'})]\n if 'total_spend' in self.feature_categories:\n feature_dfs += [sf_groups.cost_float.sum().to_frame().rename(columns={'cost_float':'total_spend'})]\n\n # concat features\n customer_df = pd.concat(feature_dfs, axis=1, sort=False) # outer join on index\n\n # add target variable\n for target_variable in self.target_variables:\n if (trades_target.product_name == target_variable).any():\n customer_df['target_'+target_variable] = trades_target.groupby(['sf_account_id', 'product_name']).trade_date.any().unstack()[target_variable]\n else:\n customer_df['target_'+target_variable] = False\n\n # remove customers with no purchases before cut off\n customer_df = customer_df[customer_df[customer_df.columns[customer_df.columns != 'target']].any(axis=1)]\n\n # replace nans with False\n customer_df.fillna(False, inplace=True)\n\n return customer_df", "def write_features(self):\n num_features_per_file = math.ceil(len(self.features) / self.num_jobs)\n for idx in range(self.num_jobs):\n job_features = self.features[idx * num_features_per_file: (idx + 1) * num_features_per_file]\n features_filename = constants.INPUT_FEATURES_FILENAME.format(self.args.output_dir, idx)\n with open(features_filename, \"wb\") as features_file:\n cloudpickle.dump(job_features, features_file, protocol=pickle.DEFAULT_PROTOCOL)" ]
[ "0.70747465", "0.7067084", "0.62919354", "0.6007114", "0.58264214", "0.5718458", "0.560313", "0.56008935", "0.55872756", "0.55600125", "0.5532584", "0.5520967", "0.55154175", "0.5512787", "0.54379237", "0.53890693", "0.53649795", "0.5364341", "0.53604376", "0.53556436", "0.53533244", "0.53514385", "0.5339159", "0.53381556", "0.53314143", "0.5309661", "0.53057206", "0.52984893", "0.5298287", "0.52958536" ]
0.724281
0
Creates new features from Description feature thanks to NLTK, a NLP package. NLP features are handled into a dataframe. A PCA reduction is applied on this dataframe. Features from dataframe are renamed with root ane w_nlp. When this method is called during building data_model step, then dataframe handling new NLP feature is dumped into a file.
def data_transform_nlp(self): df_invoice_line = None is_build_step = False if self._vectorizer_nlp is None: is_build_step = True list_no_words=['SET','PACK'] df_invoice_line, csr_matrix_weights, self._vectorizer_nlp \ = p5_util.nlp_process(self.df_invoice_line\ , 'Description' , vectorizer= self._vectorizer_nlp\ , list_no_words=list_no_words, is_verbose= self.is_verbose) if df_invoice_line is None: self.strprint("***ERROR : NLP process interrupted!") return #------------------------------------------------------------------------- # NLP weights are cumulated (sumerized) per customer #------------------------------------------------------------------------- if csr_matrix_weights is None: csr_matrix_weights \ = p5_util.object_load('./data/matrix_weights_NLP.dump') else: pass self.strprint("df_invoice_line : "+str(df_invoice_line.shape)) self.dbg_df = df_invoice_line.copy() root_name = 'w_nlp_' self._df_w_nlp = p5_util.df_nlp_sum_per_customer(df_invoice_line\ , csr_matrix_weights, root_name) del(csr_matrix_weights) #------------------------------------------------------------------------- # Dimension reduction thanks to PCA #------------------------------------------------------------------------- self.strprint("self._df_w_nlp : "+str(self._df_w_nlp.shape)) root_name_pca = 'nlp_pca_' n_dim = self._nlp_pca_ndim df_customers_pca_nlp, self._pca_nlp \ = p5_util.df_pca_reduce(self._df_w_nlp, n_dim, root_name_pca\ , p_is_scale=False, pca=self._pca_nlp) self.strprint("df_customers_pca_nlp : " +str(df_customers_pca_nlp.shape)) #------------------------------------------------------------------------- # Backup of NLP features per customer #------------------------------------------------------------------------- if is_build_step is True: p5_util.object_dump(df_customers_pca_nlp\ , self._df_customers_nlp_fileName) else: self._df_customers_pca_nlp = df_customers_pca_nlp.copy() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def feature_description_nlp(self):\n \n #-------------------------------------------------------------------------\n # Returned dataframe is aggregated with weights from self.vectorizer\n #-------------------------------------------------------------------------\n list_no_words=['SET','PACK']\n self.df_invoice_line, vectorizer, matrix_weights \\\n = p5_util.nlp_process(self.df_invoice_line,'Description'\\\n , vectorizer=self.vectorizer, list_no_words=list_no_words)\n\n #-------------------------------------------------------------------------\n # Each vectorized column 'x' is renamed w_nlp_i\n #-------------------------------------------------------------------------\n dict_matching_name = dict()\n for col in self.df_invoice_line.columns:\n if str(col).isdigit() is True:\n new_col_name = \"w_nlp_\"+str(col)\n dict_matching_name[col] = new_col_name\n \n self.df_invoice_line.rename(columns=dict_matching_name,inplace=True)\n #-------------------------------------------------------------------------\n # Description is droped from columns\n #-------------------------------------------------------------------------\n del(self.df_invoice_line['Description'])", "def featureprepare(self,df):\n try:\n df = self.featureselection(df)\n emp_len_dict= pickleload(self.dict_employ_len) # Load emp len\n df['emp_length'] = df['emp_length'].map(emp_len_dict)\n df['Long_emp_length'] = df['emp_length'].apply(lambda x: 'Yes' if x == 10 else 'No') # creating new feature\n df[\"emp_title\"].fillna('Missing', inplace=True)\n\n # Handling missing numerical value\n dict_Mean_var = pickleload(self.dict_Mean_var)\n for col, mean_val in dict_Mean_var.items():\n df[col].fillna(mean_val, inplace=True)\n\n # Handling rare values\n Freqlabels = pickleload(self.Freqlabels)\n for variable, frequent_labels in Freqlabels.items():\n df[variable] = np.where(df[variable].isin(frequent_labels), df[variable], 'Rare')\n\n # Encoding Categorical features\n x = pickleload(self.labelEncoder)\n for features, labels in x.items():\n df.loc[:, features] = labels.transform(df.loc[:, features])\n return df\n except Exception as e:\n self._Logger.error(\"Error in feature preparation: {}\".format(e))", "def generate_features(\n df: pd.DataFrame, spacy_model: str, language: str\n) -> pd.DataFrame:\n logging.info(\"Loading Spacy model...\")\n nlp = spacy.load(spacy_model)\n\n # Makes all tokens lowercase\n logging.info(\"Lowercase\")\n df[\"token_lower\"] = df[\"token\"].str.lower()\n\n logging.info(\"Lemma, pos\")\n spacy_pipe = nlp.pipe(df[\"token_lower\"].values, disable=[\"ner\", \"parser\"])\n features_gen = ((doc[0].lemma_, doc[0].pos_) for doc in spacy_pipe)\n df[\"lemma\"], df[\"pos\"] = zip(*features_gen)\n\n # Prepare stemmers\n logging.info(\"Loading Snowball Stemmer...\")\n snow = SnowballStemmer(language=language)\n\n logging.info(\"Snowball stemmer\")\n df[\"snowballStemmer\"] = df.apply(lambda row: snow.stem(row[\"token_lower\"]), axis=1)\n\n logging.info(\"Loading Porter Stemmer...\")\n port = PorterStemmer()\n\n logging.info(\"Porter stemmer\")\n df[\"porterStemmer\"] = df.apply(lambda row: port.stem(row[\"token_lower\"]), axis=1)\n\n # Adds columns with a binary if the word contains a possible negation prefix or suffix\n logging.info(\"Prefix\")\n df[\"possible_prefix\"] = df.apply(\n lambda row: possible_negation_prefix(row[\"token_lower\"]), axis=1\n )\n\n logging.info(\"Suffix\")\n df[\"possible_suffix\"] = df.apply(\n lambda row: possible_negation_suffix(row[\"token_lower\"]), axis=1\n )\n\n # Adds new columns for the previous and next lemma and pos-tag\n logging.info(\"Add prev/next shifts\")\n df[\"prev_Lemma\"] = df[\"lemma\"].shift(periods=1)\n df[\"next_Lemma\"] = df[\"lemma\"].shift(periods=-1)\n df[\"prev_pos\"] = df[\"pos\"].shift(periods=1)\n df[\"next_pos\"] = df[\"pos\"].shift(periods=-1)\n return df", "def construct_NLP_model(self, df=None):\n import review_processing as rp\n # get words\n if df is not None:\n nitems = df.shape[0]\n col_names = df.columns.values\n if self.review_col_name not in col_names or \\\n self.sentiment_col_name not in col_names:\n sys.exit('construct_NL_model: The name {0}/{1} cannot be found'.\n format(self.review_col_name, self.sentiment_col_name))\n review_list = df[self.review_col_name].values.tolist()\n meaningful_words = map(self.review_to_meaningful_words,\n review_list)\n # Get training sentiment values\n self.sentiment = df[self.sentiment_col_name].values\n\n else:\n if self.training_file_name is None:\n sys.exit('construct_NLP_model: traning file name does not '\n 'exist')\n else:\n suffix = os.path.splitext(self.training_file_name)[1][1:]\n if suffix == 'csv':\n df = pd.read_csv(self.training_file_name)\n if self.review_col_name not in col_names or \\\n self.sentiment_col_name not in col_names::\n sys.exit('construct_NL_model: The name {0}/{1} cannot '\n ' be found'.format(self.review_col_name,\n self.sentiment_col_name))\n nitems = df.shape[0]\n review_list = df[review_col_name].values.tolist()\n meaningful_words = map(self.review_to_meaningful_words,\n review_list)\n elif suffix == 'json':\n data_dict_list = rp.load_data(self.training_file_name)\n if self.review_col_name not in data_dict_list.keys():\n sys.exit('construct_NL_model: The name {0} cannot be '\n 'found'.format(review_col_name))\n review_list = map(lambda x: x[review_col_name],\n data_dict_list)\n meaningful_words = map(self.review_to_meaningful_words,\n review_list)\n else:\n sys.exit('construct_NLP_model: file type not supported '\n 'yet!')\n\n # Training process of Bag of Worlds\n if self.NLP_model == 'BagofWords':\n print('construct_NLP_model: Creating bag of words...')\n self.vectorizer = CountVectorizer(analyzer='word',\n tokenizer=None,\n preprocessor=None,\n stop_words=None,\n max_features=self.maxfeature)\n self.train_data_features = vectorizer.fit_transform(\n meaningful_words)\n self.train_data_features = train_data_features.toarray()\n\n # vocab = vectorizer.get_feature_names()\n # dist = np.sum(train_data_features, axis=0)\n # for tag, count in zip(vocab, dist):\n # print(count, tag)\n\n else:\n sys.exit('construct_NLP_model: NLP_model type not supported yet!')", "def df_lda_preprocessing(df, col_name, remove_stopwords=True, add_features=False):\n df['text'] = df[col_name] # Create a copy of the input col_name: text\n \n # df_clean_sting(df, 'text') # Clean the text from col_name # TEST FJERN RENGØRING\n\n # Test other way of handling strings\n df_simple_clean_string(df, 'text')\n\n if add_features:\n df_make_features_from_string(df, 'text') # Add features\n\n # This is a hack soly for the scope of this project to concat ThreadSubject\n # When the message is initiated by the Member\n if col_name == 'SignalMessageBodyClean':\n df_aka = df.copy(deep=True)\n # df_aka['text_1'] = df_aka['ThreadSubject']\n # df_clean_sting(df_aka, 'ThreadTopic')\n df_simple_clean_string(df_aka, 'ThreadTopic')\n\n df['text'] = (df['text'] +' '+df_aka['ThreadTopic']).where(df['IsFirstMessageInthread']==1,df['text'])\n\n df_get_tokens(df, 'text') # Returns col: tokenized_text\n\n # df_stem_words(df, 'tokenized_text') # Returns col: stemmed_text\n\n df_bigrams(df, 'tokenized_text') # Returns bigrams\n df_trigrams(df, 'tokenized_text') # Returns trigrams\n\n df['ngrams'] = df['tokenized_text'] + df['bigrams'] + df['trigrams']\n\n if remove_stopwords:\n df_remove_stopwords(df, 'ngrams') # returns stopwords_removed", "def preprocess(self, df, maxlen = 169):\n \n vocabs = self.tk.word_index.keys()\n \n df1 = self.treat_na(df)\n df2 = self.remove_punc_sw(df1)\n df3 = self.remove_numbers(df2)\n df4 = self.lemma_pos(df3)\n df5 = self.bigram(df4)\n df6 = self.combine_bigrams(df5)\n \n new_docs = []\n \n for word_list in df6:\n \n if len(word_list) == 2 and word_list[0].lower() == 'noinfo' and word_list[1].lower() == 'noinfo':\n new_docs.append(list(np.zeros(maxlen)))\n \n else:\n new_word_list = []\n for word in word_list:\n if word not in vocabs:\n word = 'UNKNOWN_TOKEN'\n new_word_list.append(word)\n \n sequence = \" \".join(new_word_list)\n vectors = self.tk.texts_to_sequences([sequence])\n padded_vectors = pad_sequences(vectors, maxlen=maxlen, padding='post', truncating='post')\n \n new_docs.append(list(padded_vectors[0]))\n \n return new_docs", "def preprocess_feature(df):", "def generate_trajectories_feature(self):\n if self.df_feature is not None:\n return self.df_feature\n trajs_feature = [traj.get_basic_feature() for traj in self.trajectories]\n self.df_feature = pd.DataFrame(trajs_feature)\n self.df_feature[\"LABEL\"] = self.df[\"LABEL\"]\n return self.df_feature", "def create_new_features(self):\n train = self.train\n \n train['is_context'] = train['context_type'].isin(CONTEXT_TYPE_TEST)\n train['is_context_flow'] = train['listen_type'] * train['is_context']\n \n train['is_listened_context'] = train['is_listened'] * train['is_context']\n train['is_listened_flow'] = train['is_listened'] * train['listen_type']\n train['is_listened_context_flow'] = train['is_listened'] * train['is_context_flow']\n \n for feature in self.categorize_features:\n gby_feat = train.groupby(feature)\n new_features(train, gby_feat, feature, feature in self.listen_type_features, self.context_features, self.flow_features, self.fillna)\n \n # Variable combinations\n for feat1 in self.combo_features1:\n for feat2 in self.combo_features2:\n gby_feat = train.groupby([feat1, feat2])\n name = feat1 + '_' + feat2\n new_features(train, gby_feat, name, feat1 in self.listen_type_features, self.context_features, self.flow_features, self.fillna)", "def other_features_(tweet, cleaned_tweet):\n #print(\"WARNING>>>>>>>>>>>>>>>>> VADERSENTIMENT DISABLED\")\n sentiment = nlp.sentiment_analyzer.polarity_scores(tweet)\n\n words = cleaned_tweet #Get text only\n\n syllables = textstat.syllable_count(words) #count syllables in words\n num_chars = sum(len(w) for w in words) #num chars in words\n num_chars_total = len(tweet)\n num_terms = len(tweet.split())\n num_words = len(words.split())\n avg_syl = round(float((syllables+0.001))/float(num_words+0.001),4)\n num_unique_terms = len(set(words.split()))\n ###Modified FK grade, where avg words per sentence is just num words/1\n FKRA = round(float(0.39 * float(num_words)/1.0) + float(11.8 * avg_syl) - 15.59,1)\n ##Modified FRE score, where sentence fixed to 1\n FRE = round(206.835 - 1.015*(float(num_words)/1.0) - (84.6*float(avg_syl)),2)\n\n\n twitter_objs = count_twitter_objs(tweet) #Count #, @, and http://\n features = [FKRA, FRE, syllables, num_chars, num_chars_total, num_terms, num_words,\n num_unique_terms, sentiment['compound'],\n twitter_objs[2], twitter_objs[1],]\n #features = pandas.DataFrame(features)\n return features", "def __init__(self, df):\n # Creating content\n df['content_str'] = df['content'].map(lambda x: self.__word_joiner(x))\n text = df['content_str'].str.cat(sep=' ')\n df.drop('content_str', axis=1, inplace=True)\n\n self._generateBigrams(text)\n self._generateUnigrams(text)\n self.corpussize=len(Utilities.CVTokeniser(text))\n print(\"Feature_PMI: Corpus size:\",self.corpussize)", "def build_feature_transform():\n\n # These features can be parsed as numeric.\n numeric = HEADER.as_feature_indices(\n [\"review_count\", \"lat\", \"lng\", \"lat2\", \"lng2\"]\n )\n\n # These features contain a relatively small number of unique items.\n categorical = HEADER.as_feature_indices(\n [\"distance\", \"price_level\", \"review_count\", \"Sp1\", \"type\"]\n )\n\n # These features can be parsed as natural language.\n text = HEADER.as_feature_indices(\n [\n \"slug\", \"menu\", \"slug.1\", \"categories\", \"name\", \"url\", \"homeurl\",\n \"resource_id1\", \"resource_id2\"\n ]\n )\n\n numeric_processors = Pipeline(steps=[(\"robustimputer\", RobustImputer())])\n\n categorical_processors = Pipeline(\n steps=[\n (\"thresholdonehotencoder\", ThresholdOneHotEncoder(threshold=162))\n ]\n )\n\n text_processors = Pipeline(\n steps=[\n (\n \"multicolumntfidfvectorizer\",\n MultiColumnTfidfVectorizer(\n max_df=0.9977,\n min_df=0.0003137465824032988,\n analyzer=\"word\",\n max_features=10000\n )\n )\n ]\n )\n\n column_transformer = ColumnTransformer(\n transformers=[\n (\"numeric_processing\", numeric_processors, numeric\n ), (\"categorical_processing\", categorical_processors,\n categorical), (\"text_processing\", text_processors, text)\n ]\n )\n\n return Pipeline(\n steps=[\n (\"column_transformer\",\n column_transformer), (\"robustpca\", RobustPCA(n_components=88)),\n (\"robuststandardscaler\", RobustStandardScaler())\n ]\n )", "def transform_and_create_new_features(df):\n # 'GENDER' FEATURE MANAGEMENT\n # Transform 'Gender' feature (categorical) to numerical one\n df['Gender'] = df['Sex'].map({'female': 0, 'male': 1}).astype(int)\n\n # 'EMBARKED' FEATURE MANAGEMENT\n # 1st approach: df['Port'] = df['Embarked'].map({'C': 1, 'S': 2, 'Q': 3}).astype(int)\n # Extract from 'pycon UK Tutorial':\n # \"Replacing {C, S, Q} by {1, 2, 3} would seem to imply the ordering C < S < Q when in fact they are simply arranged\n # alphabetically. To avoid this problem, we create dummy variables. Essentially this involves creating new columns\n # to represent whether the passenger embarked at C with the value 1 if true, 0 otherwise.\"\n dummies_embarked = pd.get_dummies(df['Embarked'], prefix='Embarked')\n df = pd.concat([df, dummies_embarked], axis=1)\n\n # 'AGE' & 'FARE' FEATURES MANAGEMENT\n df = _transform_age_feature(df)\n df = _transform_fare_feature(df)\n\n # CREATION OF A NEW FEATURE: Family size + Alone or not ?\n df['Family'] = df['SibSp'] + df['Parch']\n df['Alone'] = 0\n df.loc[df['Family'] == 0, 'Alone'] = 1\n\n # Drop all columns that are now useless\n df = df.drop(['Sex', 'Age', 'Fare', 'Embarked', 'SibSp', 'Parch'], axis=1)\n print(df.head(10))\n\n return df", "def featurize(self, data):\n \n features = []\n\n # tokens = data.split()\n\n #Modification 1: Normalization: All lowercase\n #Removing this did not seem to have any performance boost\n #but it did nothing negative either\n data = data.lower()\n\n #Modification 2: Normalization: Tokenizing using NLTK\n #Keep this\n # tokens = word_tokenize(data)\n tokens = data.split()\n\n #Modification 3: Word List: Removing stop words using NLTK\n #Keep this\n stop_words = set(stopwords.words('english'))\n tokens_filtered = []\n\n for t in tokens:\n if t not in stop_words:\n tokens_filtered.append(t)\n\n tokens = tokens_filtered\n\n #Modification 4: Pre-Processing Lemmization using NLTK\n #Surprisingly does not appear to impact performance\n # for t in tokens:\n # t = self.wordnet_lemmatizer.lemmatize(t)\n\n capital = 0\n average_word_length = 5 #It's 4.7, but we'll use 5\n short_words = 0\n long_words = 0\n\n for t in tokens:\n\n #Feature 1: Bag of words\n features.append((t, True))\n\n if(t.isupper()):\n capital += 1\n\n #Feature 3: Long or short word counter, intentionally ignoring length 4\n #and 5 as those are close to average\n #Very important that stop words were removed\n if(len(t) > average_word_length):\n long_words += 1\n elif(len(t) < average_word_length - 1):\n short_words += 1\n \n #Feature 2: Lots of capital\n #Remove this. It only appears to be a rough count of sentence number vs.\n #Capturing any sentiment. Does not impact F1 score in given train/dev sets\n # if(capital > 2):\n # features.append((\"LOTS_OF_CAPITAL\", True))\n\n #Feature 3: Long or short words\n # if(long_words > short_words):\n # features.append((\"LOTS_OF_LONG_WORDS\", True))\n\n\n\n return features", "def clfFeature(feature, mode):\r\n \r\n feature_path = 'C:\\\\Users\\\\Tom\\\\Documents\\\\Informatiekunde\\\\Thesis\\\\features\\\\' + feature + '.txt'\r\n classlist = ['negative', 'positive']\r\n features = pd.DataFrame()\r\n\r\n for label in classlist:\r\n path = 'C:\\\\Users\\\\Tom\\\\Documents\\\\Informatiekunde\\\\Thesis\\\\data\\\\' + mode + '\\\\' + label + '\\\\'\r\n allFiles = glob.glob(path + \"*.txt\")\r\n for review in allFiles:\r\n title = review.strip('.txt').split('\\\\')[-1]\r\n file = open(review, 'r', encoding='utf8').read().lower()\r\n wordlist = []\r\n featreader = csv.reader(open(feature_path, 'r'), delimiter= '\\n')\r\n for word in featreader:\r\n if word[0] in file:\r\n wordlist.append(word[0])\r\n df = pd.DataFrame({'File': [title], feature.capitalize(): [', '.join(wordlist)]}).set_index('File')\r\n features = features.append(df)\r\n \r\n return features", "def propername_featurize(input_data,N, MinFreq,model_choice =\"NGram\"):\n def to_lowercase(text):\n return text.lower()\n\n def remove_URL(text):\n return re.sub(r\"http\\S+\", \"\", text)\n def remove_non_ascii(words):\n new_words = []\n for word in words:\n new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')\n new_words.append(new_word)\n return new_words\n\n def tokenize(text):\n return text.split()\n def remove_stopwords(words):\n new_words = []\n for word in words:\n if word not in stop_word:\n new_words.append(word)\n return new_words\n def detokenize_words(words):\n separator = ' '\n return separator.join(words)\n def preprocess_text(df):\n df['text'] = df['text'].apply(to_lowercase)\n df['text'] = df['text'].apply(remove_URL)\n df['text'] = df['text'].apply(tokenize)\n df['text'] = df['text'].apply(remove_non_ascii)\n df['text'] = df['text'].apply(detokenize_words) \n return df\n def character_ngram(text_matrix, N, MinFreq): #array of non-tokenized text\n #tokenize\n all_tokenized_text = []\n #build all token\n flatten_tokenized_text = []\n for j in text_matrix:\n cur_text = \"\".join(j.split())\n cur_feature = []\n \n for i in range(N[0]-1,N[1]): \n \n for l in range(len(cur_text) - i):\n cur_feature.append(cur_text[l:l+i+1])\n \n all_tokenized_text.append(cur_feature)\n flatten_tokenized_text.extend(cur_feature)\n charfreq = {}\n for i in flatten_tokenized_text:\n if i not in charfreq.keys():\n charfreq[i] = 1\n else:\n charfreq[i] += 1\n selected_feature = []\n for i, item in charfreq.items():\n if item >= MinFreq:\n selected_feature.append(i)\n dim = len(selected_feature)\n encoded_matrix = []\n selected_feature = np.array(selected_feature)\n for i in all_tokenized_text:\n cur_text = np.array(i)\n cur_encoded = np.zeros(dim)\n cur_idx = []\n for j in range(len(cur_text)):\n idx = np.where(selected_feature == cur_text[j]) \n if len(idx[0]) != 0: \n cur_idx.append(idx[0][0])\n #binary character presence \n cur_encoded[cur_idx] = 1\n\n encoded_matrix.append(cur_encoded)\n encoded_matrix = np.array(encoded_matrix)\n\n return encoded_matrix, selected_feature\n def task_specific_featurize(feature_value):\n feature_dic = {\"contain_numerics\":[], \"contain_special_punc\":[],\"contain_inc\":[],\"Small_token_length\":[]}\n special_pun = \"&\\?-:%\"\n company_col = [\"co.\",\"inc.\"]\n def hasNumbers(string):\n return any(char.isdigit() for char in string)\n for i in text_feature:\n if hasNumbers(i):\n feature_dic[\"contain_numerics\"].append(1)\n else:\n feature_dic[\"contain_numerics\"].append(0)\n Spec_Punc = False\n for l in special_pun:\n if i.find(l) != -1:\n feature_dic[\"contain_special_punc\"].append(1)\n Spec_Punc = True\n break\n if Spec_Punc == False:\n feature_dic[\"contain_special_punc\"].append(0)\n Contain_Com = False\n for l in company_col:\n if i.find(l) != -1:\n feature_dic[\"contain_inc\"].append(1)\n Contain_Com = True\n break\n if Contain_Com == False:\n feature_dic[\"contain_inc\"].append(0)\n token_length = len(i.split())\n if token_length <= 1:\n feature_dic[\"Small_token_length\"].append(1)\n else:\n feature_dic[\"Small_token_length\"].append(0)\n\n encoded_matrix = pd.DataFrame(feature_dic).values\n selected_feature = list(feature_dic.keys()) \n return encoded_matrix, selected_feature\n # TODO: Implement featurization of input.\n matrix_processed = preprocess_text(input_data)\n text_feature = matrix_processed[[\"text\"]].values.flatten() \n if model_choice == \"NGram\":\n \n encoded_matrix, selected_feature = character_ngram(text_feature, N, MinFreq)\n elif model_choice == \"TS\":\n encoded_matrix, selected_feature = task_specific_featurize(text_feature)\n elif model_choice == \"Combined\":\n\n encoded_matrix_specific, selected_feature_specific = task_specific_featurize(text_feature) \n encoded_matrix_bow, selected_feature_bow = character_ngram(text_feature, N, MinFreq)\n encoded_matrix = np.hstack((encoded_matrix_bow,encoded_matrix_specific))\n selected_feature = list(selected_feature_bow)\n selected_feature.extend(selected_feature_specific)\n \n return encoded_matrix,selected_feature", "def text_feature_extract(df):\n return df", "def construct_df_topics(self, n_words=20):\n\n self.check_model()\n topic_keywords = []\n keywords = array(self.vectorizer.get_feature_names())\n\n for topic_weights in self.model.components_:\n top_keyword_locs = (-topic_weights).argsort()[:n_words]\n topic_keywords.append(keywords.take(top_keyword_locs))\n\n self.df_topic_keywords = pd.DataFrame(topic_keywords)\n self.df_topic_keywords.columns = ['Word ' + str(i) for i in range(self.df_topic_keywords.shape[1])]\n self.df_topic_keywords.index = ['Topic ' + str(i) for i in range(self.df_topic_keywords.shape[0])]", "def feature_extraction(inputFile, text, label):\r\n df = pd.read_csv(inputFile, encoding=\"utf8\")\r\n df[text].replace(np.nan, '', inplace=True)\r\n for idx, line in df.iterrows():\r\n try:\r\n words = line[text]\r\n newWords = ''.join(words.split())\r\n df.set_value(idx, text, newWords)\r\n except:\r\n pass\r\n tf = TfidfVectorizer(analyzer='char', encoding=\"utf8\", min_df=10)\r\n\r\n x = tf.fit_transform(df[text])\r\n x = x.toarray()\r\n print(x.shape)\r\n y = df[label]\r\n\r\n return x, y", "def extract_features_only(self, text):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.sentences = sentences\n \n n = len(sentences)\n locsSentStarts = [-1] * n\n curpt = 0\n for i in range(n):\n pos = text[curpt:].find(sentences[i])\n locsSentStarts[i] = pos + curpt\n curpt = locsSentStarts[i] + len(sentences[i])\n self.sentence_startPos = locsSentStarts\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n featList = [(feat.getType(), feat.getStartPos(), feat.getEndPos(), feat.getString()) for feat in featObjList]\n return featList", "def generate_features(self, df):\n df = df.reset_index()\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return extract_features(df, column_id=\"id\", impute_function=impute,\n default_fc_parameters=self.extraction_settings)", "def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()", "def preprocess_training_text(text, accented_chars=True, \n convert_num=False, extra_whitespace=True, \n lemmatization=True, lowercase=True, punctuations=True,\n remove_html=True, remove_num=True, special_chars=True, \n stop_words=True):\n \n\n \"\"\"preprocess text with default option set to true for all steps\"\"\"\n if remove_html == True: #remove html tags\n text = strip_html_tags(text)\n if extra_whitespace == True: #remove extra whitespaces\n text = remove_whitespace(text)\n if accented_chars == True: #remove accented characters\n text = remove_accented_chars(text)\n if lowercase == True: #convert all characters to lowercase\n text = text.lower()\n \n \n doc = nlp(text) #tokenise text\n\n\n clean_text = []\n for token in doc:\n flag = True\n edit = token.text\n # print(\"Word: \", edit, \" Type: \", token.pos_)\n # remove stop words\n if stop_words == True and token.is_stop and token.pos_ != 'NUM': \n flag = False\n # remove punctuations\n if punctuations == True and (token.pos_ == 'PUNCT') and flag == True: \n flag = False\n \n # remove 'X' characters:\n if token.pos_ == 'X':\n flag = False\n # remove special characters\n if special_chars == True and token.pos_ == 'SYM' and flag == True: \n flag = False\n # remove numbers\n if remove_num == True and (token.pos_ == 'NUM' or token.text.isnumeric()) \\\n and flag == True:\n flag = False\n # convert number words to numeric numbers\n if convert_num == True and token.pos_ == 'NUM' and flag == True:\n edit = w2n.word_to_num(token.text)\n # convert tokens to base form\n elif lemmatization == True and token.lemma_ != \"-PRON-\" and flag == True:\n edit = token.lemma_\n # append tokens edited and not removed to list \n if edit != \"\" and flag == True:\n clean_text.append(edit)\n \n # Convert back to string:\n new_text = ' '.join(clean_text)\n regex = re.compile('[^a-zA-Z]')\n new_text = regex.sub(' ', new_text)\n words = re.findall(r'\\w+.', new_text)\n return ' '.join(words)", "def topic_extraction(df, col_name):\n tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,\n max_features=200,\n stop_words='english')\n tfidf = tfidf_vectorizer.fit_transform(df[col_name])\n\n tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2,\n max_features=200,\n stop_words='english')\n tf = tf_vectorizer.fit_transform(df[col_name])\n nmf = NMF(n_components=20, random_state=1,\n alpha=.1, l1_ratio=.5)\n tfidf_feature_names = tfidf_vectorizer.get_feature_names()\n nmf_w = nmf.fit_transform(tfidf)\n nmf_h = nmf.components_\n df['labels'] = nmf_w.argmax(axis=1) # this was the right code to get labels/clusters\n\n\n print(\"\\nTopics in NMF model:\")\n print_top_words(nmf, tfidf_feature_names)\n\n\n lda = LatentDirichletAllocation(n_topics=20, max_iter=5,\n learning_method='online',\n learning_offset=50.,\n random_state=0,\n n_jobs=-1)\n lda.fit(tf)\n doc_topic_distrib = lda.transform(tf)\n lda_labels = doc_topic_distrib.argmax(axis=1)\n print lda_labels[:100]\n df['lda_labels'] = lda_labels\n print(\"\\nTopics in LDA model:\")\n tf_feature_names = tf_vectorizer.get_feature_names()\n print_top_words(lda, tf_feature_names)\n return df", "def create_tdm(cls):\n X = cls.vectorizer.fit_transform(cls.processed_documents) # Convert the X as transposed matrix\n X = X.T.toarray() # Create a DataFrame and set the vocabulary as the index\n cls.df_tdm = pd.DataFrame(X, index=cls.vectorizer.get_feature_names())", "def _finalize_features(self) -> DataFrameLike:\n all_features_dict = dict(ChainMap(*self._final_features.values()))\n return pd.DataFrame(all_features_dict)", "def featurize(movies):\n ###TODO \n movies['features'] = \"\" \n get_h = set() \n vocab_dict = {}\n df_dict_return = {}\n tup_list = []\n index_dict = {}\n index_dict_1 = {}\n movie_len = len(movies) \n #print(\"MovieLength::\",movie_len)\n #print(\"MOVIES:::\",movies)\n \n get_h = cal_unique_features(movies) # num_features\n\n vocab_dict = cal_unique_vocab(get_h) # vocab complete\n\n len_vocab = len(get_h)\n \n df_dict_return = cal_unique_docs(get_h,movies) # df(i)\n\n for token in get_h :\n #tup_list.clear()\n #print(\"token_GOTTTTT:::\",token)\n for index,row in movies.iterrows(): \n #print(\"row_got::\",row)\n gen_list = row['tokens']\n #print(\"gen_list::\",gen_list)\n #mov_id = row['movieId'] \n #print(\"mov_id::\",mov_id)\n token_count_1 = Counter(gen_list).most_common()[:1]\n tok = token_count_1[0]\n index_dict_1[index] = tok[1]\n token_count = gen_list.count(token)\n #print(\"token_count::\",token_count)\n tup = (index,token_count)\n #print(\"tuple::\",tup)\n tup_list.append(tup)\n #print(\"LIST_PRINT:::::::::::::\",tup_list)\n index_dict[token] = tup_list\n tup_list = []\n \n \n #print(\"INDEX_DICT:::\",index_dict) # tf(i,d)\n #print(\"INDEX_DICT_1:::\",index_dict_1) # max_k dict per docx\n \n \n for ind, row in movies.iterrows():\n data_list = []\n rows_list = []\n columns_list = []\n gen_list = row['tokens']\n #print(\"TOKENS GOTTT::\",gen_list) \n for gen in gen_list:\n tf = get_tf_value(index_dict,gen,ind)\n #print(\"TF GOTTT::\",tf) \n tf_weight = float( tf / index_dict_1[ind])\n #print(\"tf_weight::\",tf_weight)\n df_weight = float( math.log10( movie_len / df_dict_return[gen] ) )\n #print(\"df_weight::\",df_weight)\n final_tfidf = tf_weight * df_weight\n #print(\"final_tfidf::\",final_tfidf)\n data_list.append(final_tfidf)\n columns_list.append(vocab_dict[gen])\n rows_list.append(0) \n csr = csr_matrix((data_list, (rows_list,columns_list)), shape=(1,len_vocab))\n #print(\"TYPE of CSR GOTT::\",type(csr))\n #print(\"CSR GOTT:::\",csr) \n movies.set_value(ind, 'features', csr)\n \n #print(\"UPDATE movies::\",movies) \n\n return(movies,vocab_dict)\n \n\n pass", "def preprocess(old_df, label_name, category_features, non_category_features):\n old_df['fraud'] = old_df[label_name].apply(lambda x: x[0] == 'f')\n\n # Creating a new dataframe with a subset of features.\n new_df = old_df[['fraud'] + non_category_features]\n\n # For categorical features, we make dummy variables,\n # and merge them into new_df.\n for feature in category_features:\n dummy_df = pd.get_dummies(old_df[feature], prefix=feature,\n dummy_na=True)\n # Since dummy_na=True, the last column will be for null values.\n dummy_df.drop(dummy_df.columns[-1], axis=1, inplace=True)\n new_df = pd.concat([new_df, dummy_df], axis=1)\n return new_df", "def textFeature(mode):\r\n \r\n classlist = ['negative', 'positive']\r\n data = pd.DataFrame()\r\n\r\n for label in classlist:\r\n path = 'C:\\\\Users\\\\Tom\\\\Documents\\\\Informatiekunde\\\\Thesis\\\\data\\\\' + mode + '\\\\' + label + '\\\\'\r\n allFiles = glob.glob(path + \"*.txt\")\r\n df1 = pd.DataFrame()\r\n for review in allFiles:\r\n title = review.strip('.txt').split('\\\\')[-1]\r\n text = open(review, 'r', encoding='utf8').read()\r\n df = pd.DataFrame({'File': [title], 'Text': [text], 'Label': [label]}).set_index('File')\r\n df1 = df1.append(df)\r\n data = data.append(df1)\r\n \r\n return data", "def extract_features(self, doc):\n\n features = dict()\n\n bow = self.vectorize_doc_simple(doc)\n\n charcount = self.char_count(doc)\n wordcount = self.word_count(doc)\n sentencecount = self.sentence_count(doc)\n paragraphcount = self.paragraph_count(doc)\n\n # extract characters features\n features['characters per word'] = charcount / wordcount\n features['characters per sentence'] = charcount / sentencecount\n features['characters per paragraph'] = charcount / paragraphcount\n features['characters per document'] = charcount\n\n features['word characters length variance'] = numpy.std(\n self.word_char_length_variance(doc))\n features['sentence characters length variance'] = numpy.std(\n self.sentence_char_length_variance(doc))\n\n # extract words features\n features['words per sentence'] = wordcount / sentencecount\n features['words per paragraph'] = wordcount / paragraphcount\n features['words per document'] = wordcount\n\n features['sentence words length variance'] = numpy.std(\n self.sentence_words_length_variance(doc))\n\n # extract sentences features\n features['sentences per paragraph'] = sentencecount / paragraphcount\n features['sentences per document'] = sentencecount\n\n # extract paragraphs features\n features['paragraphs per document'] = paragraphcount\n\n # extract syllables features\n syllablecount = 0\n for word, count in bow.iteritems():\n syllablecount += self.num_of_syllables(word) * count\n features['syllables per word'] = syllablecount / wordcount\n features['syllables per sentence'] = syllablecount / sentencecount\n features['syllables per paragraph'] = syllablecount / paragraphcount\n\n # extract part of speech features\n tokens = self.pos_tag_doc(doc)\n\n pos_counts = self.vectorize_pos_tags(tokens)\n poswordcount = sum(pos_counts.values())\n for i in xrange(82, 101):\n features['%d per word' % i] = pos_counts[i] / poswordcount\n\n sorted_pos_counts = sorted(pos_counts, key=pos_counts.get, reverse=True)\n features['1st top tag'] = str(sorted_pos_counts[0])\n features['2nd top tag'] = str(sorted_pos_counts[1])\n features['3rd top tag'] = str(sorted_pos_counts[2])\n features['4th top tag'] = str(sorted_pos_counts[3])\n features['5th top tag'] = str(sorted_pos_counts[4])\n\n # extract vocab features\n vocabsize = len(self.vectorize_doc_simple(doc))\n features['vocab size'] = vocabsize\n features['words per vocab size'] = wordcount / vocabsize\n\n return features" ]
[ "0.7509502", "0.63895476", "0.63655084", "0.6190819", "0.61304533", "0.60974437", "0.6039494", "0.6013443", "0.6001541", "0.59564126", "0.59309864", "0.584246", "0.58098847", "0.5809157", "0.5786593", "0.57630605", "0.57536405", "0.57465136", "0.57437116", "0.5743342", "0.5741203", "0.5738111", "0.567976", "0.567601", "0.5647194", "0.5631507", "0.5618429", "0.56146353", "0.5601197", "0.5601117" ]
0.6648716
1
Build dataframe df_customers from transformed data. Transformed data are issued from NLP, Time and RFM features. See data_transform(). These data are stored as dataframes attributes.
def df_customers_features_build(self): df_customers_rfm = self._df_customers_rfm.copy() df_customers_timeFeature = self._df_customers_timeFeature.copy() df_customers_nlp = self._df_customers_pca_nlp.copy() #------------------------------------------------------------------------- # Dataframe are aggregated; note that indexes are customerID. #------------------------------------------------------------------------- df_customers = pd.DataFrame() df_customers = pd.concat([df_customers,df_customers_rfm], axis=1) df_customers = pd.concat([df_customers,df_customers_timeFeature]\ , join='inner', axis=1) df_customers = pd.concat([df_customers,df_customers_nlp]\ , join='inner', axis=1) self.strprint("All features : "+str(df_customers.shape)) self._df_customers = df_customers.copy() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def features_customers(df_customers):\n for i in PREMIER_VALS:\n k = 'premier_' + str(i)\n df_customers[k] = np.where(df_customers['premier'] == i, 1, 0)\n\n df_customers['age'] = datetime.now().date().year - df_customers['yearOfBirth']\n df_customers['male'] = np.where(df_customers['gender'] == 'M', 1, 0)\n df_customers['female'] = np.where(df_customers['gender'] == 'F', 1, 0)\n df_customers['days_in_asos'] = (datetime.now().date() - df_customers['dateCreated']).dt.days\n\n logger.info(\"Features from the customers table: {shape} {dtypes}\"\n .format(shape=df_customers.shape, dtypes=df_customers.dtypes))\n return df_customers", "def df_customers_fileRead(self):\n \n #-------------------------------------------------------------------------\n # RFM features are restored\n #-------------------------------------------------------------------------\n df_customers_rfm \\\n = p5_util.object_load(self.df_customers_rfm_fileName)\n self.strprint(\"RFM features : \"+str(df_customers_rfm.shape))\n \n #-------------------------------------------------------------------------\n # Time features are restored\n #-------------------------------------------------------------------------\n df_customers_timeFeature \\\n = p5_util.object_load(self._df_customers_timeFeature_fileName)\n self.strprint(\"Time features : \"+str(df_customers_timeFeature.shape))\n \n #-------------------------------------------------------------------------\n # NLP features are restored\n #-------------------------------------------------------------------------\n df_customers_nlp = p5_util.object_load(self._df_customers_nlp_fileName)\n self.strprint(\"NLP features : \"+str(df_customers_nlp.shape))\n\n if False:\n df_customers_rfm = self._df_customers_rfm.copy()\n df_customers_timeFeature = self._df_customers_timeFeature.copy()\n df_customers_nlp = self._df_customers_pca_nlp.copy()\n\n #-------------------------------------------------------------------------\n # Dataframe are aggregated; note that indexes are customerID.\n #-------------------------------------------------------------------------\n df_customers = pd.DataFrame()\n\n df_customers = pd.concat([df_customers,df_customers_rfm], axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_timeFeature]\\\n , join='inner', axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_nlp]\\\n , join='inner', axis=1)\n\n self.strprint(\"All features : \"+str(df_customers.shape))\n\n #----------------------------------------------------------------------\n # Dataframe is dumped into a file\n #----------------------------------------------------------------------\n p5_util.object_dump(df_customers, self._df_customers_fileName)\n if False:\n #----------------------------------------------------------------------\n # Dataframe is copied as an attribute\n #----------------------------------------------------------------------\n self._df_customers = df_customers.copy()\n \n return", "def create_features_customer(profile, transcript_training):\n\n # create avg/min/max amount features. Need to calculate amount features from transcript\n # because transcript_training only contains transactions for offer received and viewed.\n # such transactions do not have amount associated\n\n query = \"\"\"\n SELECT a.person, min(amount) as min_amount, max(amount) as max_amount, avg(amount) as avg_amount\n FROM transcript a\n JOIN transcript_quantile b\n ON a.person = b.person \n WHERE a.time <= b.received_time\n GROUP BY a.person\n \"\"\"\n\n profile_amount = u.read_dataframe_from_sql(query).set_index('person')\n\n # create avg/min/max amount duration_view\n profile_duration = create_features_using_groupby(transcript_training\\\n \t, 'profile', 'duration_view')\n\n # create view rate (average of label)\n profile_view_rate = create_features_using_groupby(transcript_training, 'profile', 'label'\\\n \t, minimum=False, maximum=False)\n profile_view_rate.columns=['view_rate_profile']\n\n # create trx rate (count of transactions per person/(max received time - min received time))\n profile_trx_rate = (transcript_training.groupby('person').size()*100\\\n /(transcript_training.groupby('person')['received_time'].max() \\\n - transcript_training.groupby('person')['received_time'].min())).reset_index()\n profile_trx_rate.columns = ['person', 'avg_trx_cnt']\n # set trx rate = 1 if max received time == min received time\n profile_trx_rate.loc[profile_trx_rate['avg_trx_cnt']==np.inf, 'avg_trx_cnt'] = 1\n profile_trx_rate = profile_trx_rate.set_index('person')\n\n profile_feat = profile_amount.join(profile_duration)\\\n .join(profile_view_rate).join(profile_trx_rate)\n\n assert pd.merge(profile, profile_feat, how='left', left_index=True, right_index=True).shape[0] == profile.shape[0]\\\n , \"rows do not match with original data (profile)\"\n\n profile = pd.merge(profile, profile_feat, how='left', left_index=True, right_index=True)\n\n return profile", "def data_transform(self, df) :\n\n #-------------------------------------------------------------------------\n # Copy of given dataframe to be transformed\n #-------------------------------------------------------------------------\n self.df_invoice_line = df\n \n #-------------------------------------------------------------------------\n # Features issued from InvoiceDate are created\n #-------------------------------------------------------------------------\n if self.is_transform_timeFeature is True:\n self.strprint(\"\\n*** Time features transformation ***\")\n self.data_transform_timeFeature()\n\n #-------------------------------------------------------------------------\n # RFM is computed and encoded\n #-------------------------------------------------------------------------\n if self.is_transform_rfm is True:\n self.strprint(\"\\n*** RFM transformation ***\")\n self.data_transform_rfm()\n\n #-------------------------------------------------------------------------\n # NLP features issued from Description are created\n #-------------------------------------------------------------------------\n if self.is_transform_nlp is True:\n self.strprint(\"\\n*** NLP transformation ***\")\n self.data_transform_nlp()\n \n return self.df_invoice_line", "def data_transform_timeFeature(self):\n #-------------------------------------------------------------------------\n # All new features are built into separate dataframes \n # and each of them are dumped into a separate file.\n #-------------------------------------------------------------------------\n self.strprint(\"self.df_invoice_line : \"+str(self.df_invoice_line.shape))\n \n self._dict_timeFeature_encoder, df_customers_timeFeature \\\n = p5_util.time_list_feature_build(self.df_invoice_line\\\n , self._list_new_feature, dict_encoder = self._dict_timeFeature_encoder\\\n ,is_verbose=self.is_verbose)\n \n #-------------------------------------------------------------------------\n # New time features are aggregated into a single dataframe.\n # Values are scaled.\n #-------------------------------------------------------------------------\n df_customers_timeFeature, self._std_scaler_timeFeature \\\n = p5_util.time_list_feature_restore(self._list_new_feature \\\n , std_scale = self._std_scaler_timeFeature\\\n , df_timeFeature = df_customers_timeFeature, is_verbose = self.is_verbose)\n\n self.strprint(\"df_customers_timeFeature : \"+str(df_customers_timeFeature.shape))\n \n #-------------------------------------------------------------------------\n # Dimension reduction thanks to PCA\n #-------------------------------------------------------------------------\n n_dim=30\n root_name = 'time_pca_'\n # Column CustomerID is used into df_pca_reduce\n df_customers_timeFeature['CustomerID'] = df_customers_timeFeature.index\n \n df_customers_timeFeature, pca_timeFeature \\\n = p5_util.df_pca_reduce(df_customers_timeFeature, n_dim, root_name\\\n , p_is_scale=False, pca = self._pca_timeFeature)\n\n self.strprint(df_customers_timeFeature.shape)\n \n if self._pca_timeFeature is None:\n #----------------------------------------------------------------------\n # Data-model is in built process with part of data-set.\n #----------------------------------------------------------------------\n self._pca_timeFeature = pca_timeFeature\n p5_util.object_dump(df_customers_timeFeature\\\n , self._df_customers_timeFeature_fileName)\n else:\n #----------------------------------------------------------------------\n # Data-model is already built and this method is called \n # for a customer classification.\n #----------------------------------------------------------------------\n self._df_customers_timeFeature = df_customers_timeFeature.copy()\n return", "def get_features_and_target(self, trades_features: pd.DataFrame, trades_target: pd.DataFrame) -> pd.DataFrame:\n \n sf_groups = trades_features.drop_duplicates(subset=['sf_account_id', 'trade_date', 'sku']).groupby('sf_account_id')\n\n # calculate features\n feature_dfs = []\n if 'product_name' in self.feature_categories:\n feature_dfs += [sf_groups.product_name.value_counts().unstack().notnull()]\n if 'product_category' in self.feature_categories:\n feature_dfs += [sf_groups.product_category.value_counts().unstack().notnull()]\n if 'reporting_channel' in self.feature_categories:\n feature_dfs += [sf_groups.sub_reporting_channel.value_counts().unstack().notnull()]\n if 'recency' in self.feature_categories:\n feature_dfs += [(trades_features.trade_date_dt.max()-sf_groups.trade_date_dt.max()).dt.days.to_frame().rename(columns={'trade_date_dt':'recency'})]\n if 'frequency' in self.feature_categories:\n feature_dfs += [sf_groups.product_name.count().to_frame().rename(columns={'product_name':'frequency'})]\n if 'total_spend' in self.feature_categories:\n feature_dfs += [sf_groups.cost_float.sum().to_frame().rename(columns={'cost_float':'total_spend'})]\n\n # concat features\n customer_df = pd.concat(feature_dfs, axis=1, sort=False) # outer join on index\n\n # add target variable\n for target_variable in self.target_variables:\n if (trades_target.product_name == target_variable).any():\n customer_df['target_'+target_variable] = trades_target.groupby(['sf_account_id', 'product_name']).trade_date.any().unstack()[target_variable]\n else:\n customer_df['target_'+target_variable] = False\n\n # remove customers with no purchases before cut off\n customer_df = customer_df[customer_df[customer_df.columns[customer_df.columns != 'target']].any(axis=1)]\n\n # replace nans with False\n customer_df.fillna(False, inplace=True)\n\n return customer_df", "def _get_cus_info(self):\n label_enc = LabelEncoder()\n customer_info = self._inv.drop_duplicates(['customer_code'], keep='last')\n customer_info = customer_info[['customer_code', 'customer_name', 'sales_cen_code',\n 'sales_cen_name', 'sales_region_name', 'province',\n 'city', 'district', 'customer_type', 'is_usable', 'channel_level']]\n customer_info['customer_id'] = label_enc.fit_transform(customer_info['customer_code'])\n customer_info['sales_cen_id'] = label_enc.fit_transform(customer_info['sales_cen_code'])\n customer_info['sales_region_id'] = label_enc.fit_transform(customer_info['sales_region_name'])\n customer_info['province_id'] = label_enc.fit_transform(customer_info['province'])\n customer_info['city_id'] = label_enc.fit_transform(customer_info['city'])\n customer_info['district_id'] = label_enc.fit_transform(customer_info['district'])\n customer_info['customer_type'] = label_enc.fit_transform(customer_info['customer_type'])\n customer_info['is_usable'] = label_enc.fit_transform(customer_info['is_usable'])\n customer_info['channel_level'] = label_enc.fit_transform(customer_info['channel_level'])\n customer_info_encoded = customer_info.drop(\n columns=['customer_name', 'sales_cen_code', 'sales_cen_name',\n 'sales_region_name', 'province', 'city', 'district']\n ).set_index('customer_code')\n customer_info.set_index('customer_code', inplace=True)\n customer_info_encoded = customer_info_encoded.reindex(self._index.get_level_values(0))\n return customer_info, customer_info_encoded", "def data_process_deprecated(self, CustomerID, InvoiceDate, InvoiceNo, Description, Quantity\\\n , UnitPrice ):\n dict_invoice = {'InvoiceDate':InvoiceDate, 'Description':Description\\\n , 'Quantity':Quantity, 'UnitPrice':UnitPrice}\n dict_invoice['CustomerID'] = CustomerID\n dict_invoice['InvoiceNo'] = InvoiceNo\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys(), index=[0])\n \n self.data_transform(df_invoice_line)\n\n #self.feature_rfm_encode()\n\n self.feature_scale()\n\n self.list_feature_drop()\n\n self.feature_description_nlp()\n return", "def data_transform_rfm(self) :\n \n is_built_step = False\n if self._encoder_rfm is None:\n is_built_step = True \n \n #-------------------------------------------------------------------------\n # RFM feature is built\n #-------------------------------------------------------------------------\n ser_invoice_date = self._df_invoice_line.InvoiceDate\n \n self.df_invoice_line, df_RFM, self.df_RFM_quantiles, self._day_now \\\n = p5_util.p5_df_rfm_build(self.df_invoice_line, day_now = self._day_now\\\n , df_RFM_threshold=self.df_RFM_quantiles)\n \n self._df_invoice_line.InvoiceDate = ser_invoice_date\n \n #-------------------------------------------------------------------------\n # RFM score is added to dataframe\n #-------------------------------------------------------------------------\n df_merged = pd.merge(self.df_invoice_line\\\n , df_RFM[['CustomerID','RFM']], how='left', on=['CustomerID'])\n\n self._df_invoice_line \\\n = pd.DataFrame(df_merged.values, index = self._df_invoice_line.index\\\n , columns=df_merged.columns)\n \n\n #self._df_invoice_line \\\n #= pd.concat([ self.df_invoice_line,df_RFM[['CustomerID','RFM']] ], axis=1\\\n #,join='inner')\n \n \n #-------------------------------------------------------------------------\n # RFM encoding\n #-------------------------------------------------------------------------\n self._encoder_rfm, df_RFM_encoded \\\n = p5_util.df_rfm_one_hot_encode(df_RFM,'RFM', encoder=self._encoder_rfm)\n\n #-------------------------------------------------------------------------\n # Encoded RFM features are renamed\n #-------------------------------------------------------------------------\n df_customers_rfm, list_col_unchanged \\\n = p5_util.df_rename_columns(df_RFM_encoded, df_RFM_encoded.columns\\\n , 'w_rfm_')\n \n self.strprint(\"df_customers_rfm =\" +str(df_customers_rfm.shape))\n\n #-------------------------------------------------------------------------\n # dataframe with RFM encoded values per customer is dumped\n #-------------------------------------------------------------------------\n if is_built_step is True:\n p5_util.object_dump(df_customers_rfm, self.df_customers_rfm_fileName)\n else :\n self._df_customers_rfm = df_customers_rfm.copy()\n return", "def get_customer_stats(self):\n try:\n names, waitings, totals, statuses, destinations, passwords, types, positions = zip(*[(p.name, p.get_waiting_time(),\n p.total_time(), status_to_str(p.status), p.get_position(), p.password, p.fleet_type, p.init_position)\n for p in self.customer_agents.values()])\n except ValueError:\n names, waitings, totals, statuses, destinations, passwords, types, positions = [], [], [], [], [], [], [], []\n\n df = pd.DataFrame.from_dict({\"name\": names, \"waiting_time\": waitings, \"total_time\": totals, \"status\": statuses, \"destination\": destinations, \"password\": passwords, \"fleet_type\": types, \"position\": positions})\n return df", "def create_data():\n data_set = pd.DataFrame()\n customer_id = list()\n for i in range(1, 10001):\n customer_id.append(i)\n data_set = pd.DataFrame()\n data_set.loc[:, 'customer_id'] = np.array(customer_id)\n product_name = ('dining chair', 'dining table', 'bed', 'dining set',\n 'stool', 'couch', 'occasional table',\n 'recliner')\n product_name_random = random.choices(product_name, k=10000)\n data_set.loc[:, 'product_name'] = np.array(product_name_random)\n quantity_rented = (1, 2, 3, 4)\n quantity_rented_random = random.choices(quantity_rented, k=10000)\n data_set.loc[:, 'quantity_rented'] = np.array(quantity_rented_random)\n unit_rental_price_monthly = list()\n for i in range(0, 10000):\n unit_rental_price_monthly.append(random.uniform(1.5, 25))\n data_set.loc[:, 'unit_rental_price'] = np.array(unit_rental_price_monthly)\n rental_period_months = list()\n for i in range(0, 10000):\n rental_period_months.append(randint(6, 60))\n data_set.loc[:, 'rental_period_months'] = np.array(rental_period_months)\n return data_set", "def _transform_df(self, data):\n # specify if has FIPS or not\n if self.has_location:\n loc_col_type = \"location\"\n elif not self.has_location:\n loc_col_type = \"location_name\"\n\n out = data.melt(\n id_vars=[\"dt\", loc_col_type], value_vars=self.crename.keys()\n ).dropna()\n out.loc[:, \"value\"] = pd.to_numeric(out[\"value\"])\n out = self.extract_CMU(out, self.crename)\n out[\"vintage\"] = self._retrieve_vintage()\n\n cols_to_keep = [\n \"vintage\",\n \"dt\",\n loc_col_type,\n \"category\",\n \"measurement\",\n \"unit\",\n \"age\",\n \"race\",\n \"ethnicity\",\n \"sex\",\n \"value\",\n ]\n return out.loc[:, cols_to_keep]", "def load_customers(dir):\n customSchema = StructType([ \\\n StructField(\"customerId2\", IntegerType(), True), \\\n StructField(\"churnlabel\", IntegerType(), True), \\\n StructField(\"gender\", StringType(), True), \\\n StructField(\"shippingCountry\", StringType(), True), \\\n StructField(\"dateCreated\", StringType(), True), \\\n StructField(\"yearOfBirth\", IntegerType(), True), \\\n StructField(\"premier\", IntegerType(), True)])\n\n df = sqlContext.read.format('com.databricks.spark.csv') \\\n .options(header='false', delimiter='\\t', nullValue='\\\\N') \\\n .load(get_dir_customers(dir) + '/*', schema=customSchema)\n\n return df", "def get_transformed_data(self, df):\n temp_df = pd.DataFrame(self.fa.transform(df))\n return temp_df", "def create_dataframe_for_training(data):\n feature_column_name = 'X'\n #data_cp = data[['label']].copy()\n for i, row in tqdm(data.iterrows(), total=len(data)):\n all_features = f'{row.claimant} {row.claim} {row.article_content}'\n data.loc[i, feature_column_name] = all_features\n\n return data[feature_column_name]", "def get_customers(filters):\n\treturn frappe.db.sql(\"\"\"\n\t\tSELECT\n\n\t\t\tpar.debtor_creditor_number as 'Konto',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Company' THEN cus.customer_name\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Name (Adressatentyp Unternehmen)',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Individual' THEN TRIM(SUBSTR(cus.customer_name, LOCATE(' ', cus.customer_name)))\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Name (Adressatentyp natürl. Person)',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Individual' THEN SUBSTRING_INDEX(SUBSTRING_INDEX(cus.customer_name, ' ', 1), ' ', -1)\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Vorname (Adressatentyp natürl. Person)',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Individual' THEN '1'\n\t\t\t\tWHEN 'Company' THEN '2'\n\t\t\t\tELSE '0'\n\t\t\t\tEND as 'Adressatentyp',\n\t\t\tadr.address_line1 as 'Straße',\n\t\t\tadr.pincode as 'Postleitzahl',\n\t\t\tadr.city as 'Ort',\n\t\t\tUPPER(country.code) as 'Land',\n\t\t\tadr.address_line2 as 'Adresszusatz',\n\t\t\tadr.email_id as 'E-Mail',\n\t\t\tadr.phone as 'Telefon',\n\t\t\tadr.fax as 'Fax',\n\t\t\tcus.website as 'Internet',\n\t\t\tcus.tax_id as 'Steuernummer'\n\n\t\tFROM `tabCustomer` cus\n\n\t\t\tleft join `tabParty Account` par\n\t\t\ton par.parent = cus.name\n\t\t\tand par.parenttype = 'Customer'\n\t\t\tand par.company = %(company)s\n\n\t\t\tleft join `tabDynamic Link` dyn_adr\n\t\t\ton dyn_adr.link_name = cus.name\n\t\t\tand dyn_adr.link_doctype = 'Customer'\n\t\t\tand dyn_adr.parenttype = 'Address'\n\n\t\t\tleft join `tabAddress` adr\n\t\t\ton adr.name = dyn_adr.parent\n\t\t\tand adr.is_primary_address = '1'\n\n\t\t\tleft join `tabCountry` country\n\t\t\ton country.name = adr.country\n\n\t\tWHERE adr.is_primary_address = '1'\n\t\t\"\"\", filters, as_dict=1)", "def create_master_table(df_cust: pd.DataFrame,\n df_trans: pd.DataFrame,\n parameters: Dict) -> pd.DataFrame:\n\n df_cust = _process_customers(df_cust, parameters)\n df_trans = _process_transactions(df_trans, parameters)\n\n # join data\n master_table = df_cust.merge(df_trans, on=['customerID'],\n how='left')\n\n # create geo risk ranking\n # temporary solution, if used in final solution, need to prepare in fit/transform maner\n bins = [-np.inf, 0.049, 0.071, 0.088, 0.107, 0.137, np.inf]\n geo_risk_rank = master_table.groupby('residentialAddress_clean')[['hist_default_sum', 'hist_trans_count']]. \\\n sum().reset_index(). \\\n assign(geo_risk_rank=lambda x: pd.cut(x['hist_default_sum']/x['hist_trans_count'], bins).cat.codes)\n\n master_table = master_table.merge(geo_risk_rank[['residentialAddress_clean', 'geo_risk_rank']], on='residentialAddress_clean', how='left')\n\n # drop clients without transactions\n master_table = master_table.dropna(subset=['default'])\n\n return master_table", "def features_orders(df_customers, df_receipts):\n df_customers.sort_values(by=['customerId2'], ascending=[True], inplace=True)\n # total amount of all the orders of a cusrtomer\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')['price'].sum(), 'sum_orders')\n # the min amount paid in one receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)[\n 'price'].sum().groupby('customerId2').min()['price'], 'min_order')\n # the mean amount paid per receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').mean()['price'], 'mean_order')\n # the max amount paid per receipt by a customer\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').max()['price'], 'max_order')\n # the number of orders\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['price'].sum()\n .groupby('customerId2').count()['price'], 'count_orders')\n\n # the total amount of items bought by a user\n df_customers = _add_column(df_customers,\n df_receipts.groupby('customerId2')['itemQty'].sum(), 'sum_itemQty')\n # the min amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').min()['itemQty'], 'min_itemQty')\n # the mean amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').mean()['itemQty'], 'mean_itemQty')\n # the max amount of items bought by a user in a receipt\n df_customers = _add_column(df_customers,\n df_receipts.groupby(['customerId2', 'receiptId'], as_index=False)['itemQty'].sum()\n .groupby('customerId2').max()['itemQty'], 'max_itemQty')\n # from which dividion type a user buys\n for i in DIVISIONID_VALS:\n k = 'divisionId_' + str(i)\n df_receipts[k] = np.where(df_receipts['divisionId'] == i, 1, 0)\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')[k].sum(), k)\n # which source type a user uses to pay\n for i in SOURCEID_VALS:\n k = 'sourceId_' + str(i)\n df_receipts[k] = np.where(df_receipts['sourceId'] == i, 1, 0)\n df_customers = _add_column(df_customers, df_receipts.groupby('customerId2')[k].sum(), k)\n\n logger.info(\"Features from the returns table: {shape} {dtypes}\"\n .format(shape=df_customers.shape, dtypes=df_customers.dtypes))\n return df_customers", "def prepare_data():\n df = pd.read_csv('Wholesale customers data.csv')\n df_numeric = df[['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen']]\n return df, df_numeric", "def clean_customer_df(customer_df: pd.DataFrame) -> pd.DataFrame:\n \n # remove customers with more than 20 purchases\n if 'frequency' in customer_df.columns:\n customer_df = customer_df[customer_df.frequency < 20]\n \n return customer_df", "def transform(self, data: pd.DataFrame, columns: list, verbose: int=1) -> pd.DataFrame:", "def generate_customer_info(self):\n consecutive = check_consecutive(self.customerIds)\n print(\"consecutive\" + str(consecutive))\n if consecutive:\n for customer_id in self.customerIds:\n # next we need to 1) decide if the customer has insurance, and if yes, generate the EOB that gets sent to the customer\n insured = np.random.choice(\n self.distributions[\"insurance_status\"],\n 1,\n p=self.distributions[\"insurance_distribution\"],\n )[0]\n dob = self.dobs[customer_id - 1]\n customer = pd.DataFrame(\n {\n \"customer_id\": [customer_id],\n \"dob\": str(dob),\n \"insurance\": insured,\n \"experiment_id\": random.randint(1, 2),\n }\n )\n self.Customers = self.Customers.append(customer)\n else:\n print(\"Error generating customer info: customerIds aren't consecutive\")\n return True", "def createDataFrames(self):\n self._atmDF = pd.DataFrame.from_dict(self._atmDict, orient='index')\n \n self._clientDF = pd.DataFrame.from_dict(self._clientDict, orient='index')\n self._clientDF['longAccount'] = self._clientDF.client.map(str) +\\\n '_' + self._clientDF.account.map(str)\n \n self._transactionDF = pd.DataFrame.from_dict(self._transactionDict, orient='index')", "def transform_train_data(df):\n return df.rdd.map(\n lambda x: (\n Vectors.dense([x.amount, x.split, x.maintain4, x.maintain12]),\n x.intime\n )\n ).toDF([\"features\", \"label\"])", "def new_df(companies_filtered):\n name = []\n city = []\n latitude = []\n longitude = []\n zip_code = []\n for i in companies_filtered:\n name.append(i['name'])\n try: \n if i['offices'][0]['city'] == '':\n city.append(np.nan)\n else:\n city.append(i['offices'][0]['city'])\n latitude.append(i['offices'][0]['latitude'])\n longitude.append(i['offices'][0]['longitude'])\n except:\n city.append(np.nan)\n latitude.append(np.nan)\n longitude.append(np.nan)\n zip_code.append(np.nan)\n dict_ = {'company' : name, 'city' : city, 'latitude' : latitude, 'longitude': longitude}\n companies_df = pd.DataFrame.from_dict(dict_, orient='columns')\n \n return companies_df", "def make_data(dataFname, enc, features=None):\n\n origData = pandas.read_csv(dataFname)\n ids = origData['id']\n\n # remove unused columns\n if 'Unnamed: 0' in origData.columns: del origData['Unnamed: 0']\n del origData['id']\n\n # remove \"data leakage\" columns\n for f in prohobitedFeatures:\n del origData[f]\n\n # separate into X & y values\n xData = origData[[col for col in origData.columns if not col=='loss']]\n set_vars_as_type(xData, discreteVars, object)\n yVec = origData.loss if 'loss' in origData.columns else None\n\n # try f528 - f274\n xData['f528f274'] = xData['f528'] - xData['f274']\n\n # encode the categorical features f776 and f777\n if enc is None:\n enc = OneHotEncoder(n_values=[2, 2])\n enc.fit(xData[['f776', 'f777']])\n\n xData[['f776_isZero', 'f776_isOne', 'f777_isZero', 'f777_isOne']] = pandas.DataFrame(enc.transform(xData[['f776', 'f777']]).toarray())\n del xData['f776']\n del xData['f777']\n\n print_missing_values_info(origData)\n\n # feature selection\n if features:\n filteredXData = xData[features]\n else: # use ALL features\n filteredXData = xData\n\n return filteredXData, yVec, ids, enc", "def get_dataframe() -> pandas.DataFrame:\n database_connection = processing.establish_connection(database_path)\n dataframe = database_connection.to_dataframe(['CustomerId', 'InvoiceDate', 'Total'], table_name)\n database_connection.close()\n dataframe = processing.get_invoice_date_fixed(dataframe)\n analyze_dataframe = dataframe.copy()\n total_sum_dataframe = processing.get_column_sum(analyze_dataframe)\n\n customer_count_dataframe = processing.drop_duplicates(analyze_dataframe)\n customer_count_dataframe = processing.get_column_count(customer_count_dataframe)\n return customer_count_dataframe.merge(total_sum_dataframe, how='inner', on='InvoiceDate')", "def _prep_data(self, data: bytes) -> pd.DataFrame:\n # Convert the bytes into a file-like object\n buffer = io.BytesIO(data)\n\n # Unzip the file and pull out the csv file\n with zipfile.ZipFile(buffer, \"r\") as zip_file:\n csv = zip_file.read(\"QSAR_BCF_Kow.csv\")\n\n # Convert the string into a file-like object\n csv_file = io.BytesIO(csv)\n\n # Read the file-like object into a dataframe\n cols = [\"cas\", \"name\", \"smiles\", \"logkow\", \"kow_exp\", \"logbcf\"]\n df = pd.read_csv(\n csv_file,\n names=cols,\n header=0,\n usecols=[col for col in cols if col not in [\"cas\", \"name\"]],\n )\n\n # Drop NaNs\n df = df.dropna().reset_index(drop=True)\n\n # Encode KOW types\n kow_types = [\"pred\", \"exp\"]\n df[\"kow_exp\"] = df.kow_exp.map(lambda txt: kow_types.index(txt))\n\n # Get maximum SMILE string length\n max_smile = max(len(smile_string) for smile_string in df.smiles)\n\n # Pad SMILE strings\n df[\"smiles\"] = [\n smile_string + \"x\" * (max_smile - len(smile_string))\n for smile_string in df.smiles\n ]\n\n # Split up the SMILE strings into a matrix\n smile_df = pd.DataFrame(df.smiles.map(list).values.tolist())\n\n # Set the column values of the SMILE dataframe\n smile_df.columns = pd.Index(\n [f\"smiles_{idx}\" for idx in range(smile_df.shape[1])]\n )\n\n # Add the smile dataframe to the original dataframe\n df = pd.concat([df, smile_df], axis=1)\n\n # Drop original SMILE feature\n df = df.drop(columns=\"smiles\")\n\n # Put the target variable at the end\n cols = [\"logkow\", \"kow_exp\"]\n cols += [f\"smiles_{idx}\" for idx in range(max_smile)]\n cols += [\"logbcf\"]\n df = df[cols]\n\n # Ensure that the `logkow` column is numeric\n df[\"logkow\"] = pd.to_numeric(df.logkow)\n\n return df", "def log_transform_features_customer(profile):\n\n view_amount_features = ['max_duration_view_profile', 'view_rate_profile', 'max_amount', \\\n 'min_duration_view_profile', 'min_amount',\\\n 'avg_amount', 'avg_trx_cnt', 'avg_duration_view_profile']\n\n profile_transformed = np.log(profile[view_amount_features]+1)\n\n profile = pd.concat([profile[['gender', 'age', 'became_member_on', 'income']]\\\n \t,profile_transformed], axis=1)\n\n profile.drop(columns=['income', 'min_amount', 'avg_amount', 'avg_duration_view_profile']\\\n \t, inplace=True)\n\n u.save_dataframe_to_sql(profile, 'profile')\n\n return profile", "def join_customer_features(traj_result, username, season, country):\n user_features=get_k_means_data(username,season, country).set_index(\"customer_nr\")\n features_with_trajectory=user_features.join(traj_result.set_index('customer_nr')[[\"cluster\"]])\n return features_with_trajectory" ]
[ "0.70902693", "0.6727287", "0.6064118", "0.60265195", "0.6024846", "0.5994544", "0.596741", "0.58626324", "0.584423", "0.5838772", "0.5787832", "0.5785175", "0.5773647", "0.5766124", "0.5743439", "0.57312465", "0.57225657", "0.5721255", "0.5717279", "0.5693215", "0.5649832", "0.56496465", "0.55862856", "0.5574678", "0.5563245", "0.54700917", "0.54481316", "0.5442918", "0.5426506", "0.5424589" ]
0.80397373
0
Build dataframe df_customers from transformed data. Transformed data are loaded from dumped files issued from NLP, Time and RFM features. See data_transform()
def df_customers_fileRead(self): #------------------------------------------------------------------------- # RFM features are restored #------------------------------------------------------------------------- df_customers_rfm \ = p5_util.object_load(self.df_customers_rfm_fileName) self.strprint("RFM features : "+str(df_customers_rfm.shape)) #------------------------------------------------------------------------- # Time features are restored #------------------------------------------------------------------------- df_customers_timeFeature \ = p5_util.object_load(self._df_customers_timeFeature_fileName) self.strprint("Time features : "+str(df_customers_timeFeature.shape)) #------------------------------------------------------------------------- # NLP features are restored #------------------------------------------------------------------------- df_customers_nlp = p5_util.object_load(self._df_customers_nlp_fileName) self.strprint("NLP features : "+str(df_customers_nlp.shape)) if False: df_customers_rfm = self._df_customers_rfm.copy() df_customers_timeFeature = self._df_customers_timeFeature.copy() df_customers_nlp = self._df_customers_pca_nlp.copy() #------------------------------------------------------------------------- # Dataframe are aggregated; note that indexes are customerID. #------------------------------------------------------------------------- df_customers = pd.DataFrame() df_customers = pd.concat([df_customers,df_customers_rfm], axis=1) df_customers = pd.concat([df_customers,df_customers_timeFeature]\ , join='inner', axis=1) df_customers = pd.concat([df_customers,df_customers_nlp]\ , join='inner', axis=1) self.strprint("All features : "+str(df_customers.shape)) #---------------------------------------------------------------------- # Dataframe is dumped into a file #---------------------------------------------------------------------- p5_util.object_dump(df_customers, self._df_customers_fileName) if False: #---------------------------------------------------------------------- # Dataframe is copied as an attribute #---------------------------------------------------------------------- self._df_customers = df_customers.copy() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def df_customers_features_build(self):\n\n df_customers_rfm = self._df_customers_rfm.copy()\n df_customers_timeFeature = self._df_customers_timeFeature.copy()\n df_customers_nlp = self._df_customers_pca_nlp.copy()\n\n #-------------------------------------------------------------------------\n # Dataframe are aggregated; note that indexes are customerID.\n #-------------------------------------------------------------------------\n df_customers = pd.DataFrame()\n\n df_customers = pd.concat([df_customers,df_customers_rfm], axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_timeFeature]\\\n , join='inner', axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_nlp]\\\n , join='inner', axis=1)\n \n self.strprint(\"All features : \"+str(df_customers.shape))\n self._df_customers = df_customers.copy()\n return", "def features_customers(df_customers):\n for i in PREMIER_VALS:\n k = 'premier_' + str(i)\n df_customers[k] = np.where(df_customers['premier'] == i, 1, 0)\n\n df_customers['age'] = datetime.now().date().year - df_customers['yearOfBirth']\n df_customers['male'] = np.where(df_customers['gender'] == 'M', 1, 0)\n df_customers['female'] = np.where(df_customers['gender'] == 'F', 1, 0)\n df_customers['days_in_asos'] = (datetime.now().date() - df_customers['dateCreated']).dt.days\n\n logger.info(\"Features from the customers table: {shape} {dtypes}\"\n .format(shape=df_customers.shape, dtypes=df_customers.dtypes))\n return df_customers", "def load_customers(dir):\n customSchema = StructType([ \\\n StructField(\"customerId2\", IntegerType(), True), \\\n StructField(\"churnlabel\", IntegerType(), True), \\\n StructField(\"gender\", StringType(), True), \\\n StructField(\"shippingCountry\", StringType(), True), \\\n StructField(\"dateCreated\", StringType(), True), \\\n StructField(\"yearOfBirth\", IntegerType(), True), \\\n StructField(\"premier\", IntegerType(), True)])\n\n df = sqlContext.read.format('com.databricks.spark.csv') \\\n .options(header='false', delimiter='\\t', nullValue='\\\\N') \\\n .load(get_dir_customers(dir) + '/*', schema=customSchema)\n\n return df", "def data_transform_timeFeature(self):\n #-------------------------------------------------------------------------\n # All new features are built into separate dataframes \n # and each of them are dumped into a separate file.\n #-------------------------------------------------------------------------\n self.strprint(\"self.df_invoice_line : \"+str(self.df_invoice_line.shape))\n \n self._dict_timeFeature_encoder, df_customers_timeFeature \\\n = p5_util.time_list_feature_build(self.df_invoice_line\\\n , self._list_new_feature, dict_encoder = self._dict_timeFeature_encoder\\\n ,is_verbose=self.is_verbose)\n \n #-------------------------------------------------------------------------\n # New time features are aggregated into a single dataframe.\n # Values are scaled.\n #-------------------------------------------------------------------------\n df_customers_timeFeature, self._std_scaler_timeFeature \\\n = p5_util.time_list_feature_restore(self._list_new_feature \\\n , std_scale = self._std_scaler_timeFeature\\\n , df_timeFeature = df_customers_timeFeature, is_verbose = self.is_verbose)\n\n self.strprint(\"df_customers_timeFeature : \"+str(df_customers_timeFeature.shape))\n \n #-------------------------------------------------------------------------\n # Dimension reduction thanks to PCA\n #-------------------------------------------------------------------------\n n_dim=30\n root_name = 'time_pca_'\n # Column CustomerID is used into df_pca_reduce\n df_customers_timeFeature['CustomerID'] = df_customers_timeFeature.index\n \n df_customers_timeFeature, pca_timeFeature \\\n = p5_util.df_pca_reduce(df_customers_timeFeature, n_dim, root_name\\\n , p_is_scale=False, pca = self._pca_timeFeature)\n\n self.strprint(df_customers_timeFeature.shape)\n \n if self._pca_timeFeature is None:\n #----------------------------------------------------------------------\n # Data-model is in built process with part of data-set.\n #----------------------------------------------------------------------\n self._pca_timeFeature = pca_timeFeature\n p5_util.object_dump(df_customers_timeFeature\\\n , self._df_customers_timeFeature_fileName)\n else:\n #----------------------------------------------------------------------\n # Data-model is already built and this method is called \n # for a customer classification.\n #----------------------------------------------------------------------\n self._df_customers_timeFeature = df_customers_timeFeature.copy()\n return", "def create_features_customer(profile, transcript_training):\n\n # create avg/min/max amount features. Need to calculate amount features from transcript\n # because transcript_training only contains transactions for offer received and viewed.\n # such transactions do not have amount associated\n\n query = \"\"\"\n SELECT a.person, min(amount) as min_amount, max(amount) as max_amount, avg(amount) as avg_amount\n FROM transcript a\n JOIN transcript_quantile b\n ON a.person = b.person \n WHERE a.time <= b.received_time\n GROUP BY a.person\n \"\"\"\n\n profile_amount = u.read_dataframe_from_sql(query).set_index('person')\n\n # create avg/min/max amount duration_view\n profile_duration = create_features_using_groupby(transcript_training\\\n \t, 'profile', 'duration_view')\n\n # create view rate (average of label)\n profile_view_rate = create_features_using_groupby(transcript_training, 'profile', 'label'\\\n \t, minimum=False, maximum=False)\n profile_view_rate.columns=['view_rate_profile']\n\n # create trx rate (count of transactions per person/(max received time - min received time))\n profile_trx_rate = (transcript_training.groupby('person').size()*100\\\n /(transcript_training.groupby('person')['received_time'].max() \\\n - transcript_training.groupby('person')['received_time'].min())).reset_index()\n profile_trx_rate.columns = ['person', 'avg_trx_cnt']\n # set trx rate = 1 if max received time == min received time\n profile_trx_rate.loc[profile_trx_rate['avg_trx_cnt']==np.inf, 'avg_trx_cnt'] = 1\n profile_trx_rate = profile_trx_rate.set_index('person')\n\n profile_feat = profile_amount.join(profile_duration)\\\n .join(profile_view_rate).join(profile_trx_rate)\n\n assert pd.merge(profile, profile_feat, how='left', left_index=True, right_index=True).shape[0] == profile.shape[0]\\\n , \"rows do not match with original data (profile)\"\n\n profile = pd.merge(profile, profile_feat, how='left', left_index=True, right_index=True)\n\n return profile", "def data_transform(self, df) :\n\n #-------------------------------------------------------------------------\n # Copy of given dataframe to be transformed\n #-------------------------------------------------------------------------\n self.df_invoice_line = df\n \n #-------------------------------------------------------------------------\n # Features issued from InvoiceDate are created\n #-------------------------------------------------------------------------\n if self.is_transform_timeFeature is True:\n self.strprint(\"\\n*** Time features transformation ***\")\n self.data_transform_timeFeature()\n\n #-------------------------------------------------------------------------\n # RFM is computed and encoded\n #-------------------------------------------------------------------------\n if self.is_transform_rfm is True:\n self.strprint(\"\\n*** RFM transformation ***\")\n self.data_transform_rfm()\n\n #-------------------------------------------------------------------------\n # NLP features issued from Description are created\n #-------------------------------------------------------------------------\n if self.is_transform_nlp is True:\n self.strprint(\"\\n*** NLP transformation ***\")\n self.data_transform_nlp()\n \n return self.df_invoice_line", "def data_transform_rfm(self) :\n \n is_built_step = False\n if self._encoder_rfm is None:\n is_built_step = True \n \n #-------------------------------------------------------------------------\n # RFM feature is built\n #-------------------------------------------------------------------------\n ser_invoice_date = self._df_invoice_line.InvoiceDate\n \n self.df_invoice_line, df_RFM, self.df_RFM_quantiles, self._day_now \\\n = p5_util.p5_df_rfm_build(self.df_invoice_line, day_now = self._day_now\\\n , df_RFM_threshold=self.df_RFM_quantiles)\n \n self._df_invoice_line.InvoiceDate = ser_invoice_date\n \n #-------------------------------------------------------------------------\n # RFM score is added to dataframe\n #-------------------------------------------------------------------------\n df_merged = pd.merge(self.df_invoice_line\\\n , df_RFM[['CustomerID','RFM']], how='left', on=['CustomerID'])\n\n self._df_invoice_line \\\n = pd.DataFrame(df_merged.values, index = self._df_invoice_line.index\\\n , columns=df_merged.columns)\n \n\n #self._df_invoice_line \\\n #= pd.concat([ self.df_invoice_line,df_RFM[['CustomerID','RFM']] ], axis=1\\\n #,join='inner')\n \n \n #-------------------------------------------------------------------------\n # RFM encoding\n #-------------------------------------------------------------------------\n self._encoder_rfm, df_RFM_encoded \\\n = p5_util.df_rfm_one_hot_encode(df_RFM,'RFM', encoder=self._encoder_rfm)\n\n #-------------------------------------------------------------------------\n # Encoded RFM features are renamed\n #-------------------------------------------------------------------------\n df_customers_rfm, list_col_unchanged \\\n = p5_util.df_rename_columns(df_RFM_encoded, df_RFM_encoded.columns\\\n , 'w_rfm_')\n \n self.strprint(\"df_customers_rfm =\" +str(df_customers_rfm.shape))\n\n #-------------------------------------------------------------------------\n # dataframe with RFM encoded values per customer is dumped\n #-------------------------------------------------------------------------\n if is_built_step is True:\n p5_util.object_dump(df_customers_rfm, self.df_customers_rfm_fileName)\n else :\n self._df_customers_rfm = df_customers_rfm.copy()\n return", "def prepare_data():\n df = pd.read_csv('Wholesale customers data.csv')\n df_numeric = df[['Fresh', 'Milk', 'Grocery', 'Frozen', 'Detergents_Paper', 'Delicassen']]\n return df, df_numeric", "def data_process_deprecated(self, CustomerID, InvoiceDate, InvoiceNo, Description, Quantity\\\n , UnitPrice ):\n dict_invoice = {'InvoiceDate':InvoiceDate, 'Description':Description\\\n , 'Quantity':Quantity, 'UnitPrice':UnitPrice}\n dict_invoice['CustomerID'] = CustomerID\n dict_invoice['InvoiceNo'] = InvoiceNo\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys(), index=[0])\n \n self.data_transform(df_invoice_line)\n\n #self.feature_rfm_encode()\n\n self.feature_scale()\n\n self.list_feature_drop()\n\n self.feature_description_nlp()\n return", "def make_data(dataFname, enc, features=None):\n\n origData = pandas.read_csv(dataFname)\n ids = origData['id']\n\n # remove unused columns\n if 'Unnamed: 0' in origData.columns: del origData['Unnamed: 0']\n del origData['id']\n\n # remove \"data leakage\" columns\n for f in prohobitedFeatures:\n del origData[f]\n\n # separate into X & y values\n xData = origData[[col for col in origData.columns if not col=='loss']]\n set_vars_as_type(xData, discreteVars, object)\n yVec = origData.loss if 'loss' in origData.columns else None\n\n # try f528 - f274\n xData['f528f274'] = xData['f528'] - xData['f274']\n\n # encode the categorical features f776 and f777\n if enc is None:\n enc = OneHotEncoder(n_values=[2, 2])\n enc.fit(xData[['f776', 'f777']])\n\n xData[['f776_isZero', 'f776_isOne', 'f777_isZero', 'f777_isOne']] = pandas.DataFrame(enc.transform(xData[['f776', 'f777']]).toarray())\n del xData['f776']\n del xData['f777']\n\n print_missing_values_info(origData)\n\n # feature selection\n if features:\n filteredXData = xData[features]\n else: # use ALL features\n filteredXData = xData\n\n return filteredXData, yVec, ids, enc", "def process_customers(self, customers_file):\n\t\tmin = max = None\n\t\tcustomers = {}\n\t\ttry:\n\t\t\tfor user_id, date_str in self.read_csv_file(customers_file):\n\t\t\t\tdate = self.convert_date(date_str)\n\t\t\t\tmin, max = self.min_max_date(min, max, date)\n\t\t\t\tcustomers[user_id] = date\n\t\texcept ValueError:\n\t\t\traise Exception('Customers file has unexpected format.')\n\n\t\tself.customers = customers\n\t\tself.min = min\n\t\tself.max = max", "def create_dataframe_for_training(data):\n feature_column_name = 'X'\n #data_cp = data[['label']].copy()\n for i, row in tqdm(data.iterrows(), total=len(data)):\n all_features = f'{row.claimant} {row.claim} {row.article_content}'\n data.loc[i, feature_column_name] = all_features\n\n return data[feature_column_name]", "def _transform_df(self, data):\n # specify if has FIPS or not\n if self.has_location:\n loc_col_type = \"location\"\n elif not self.has_location:\n loc_col_type = \"location_name\"\n\n out = data.melt(\n id_vars=[\"dt\", loc_col_type], value_vars=self.crename.keys()\n ).dropna()\n out.loc[:, \"value\"] = pd.to_numeric(out[\"value\"])\n out = self.extract_CMU(out, self.crename)\n out[\"vintage\"] = self._retrieve_vintage()\n\n cols_to_keep = [\n \"vintage\",\n \"dt\",\n loc_col_type,\n \"category\",\n \"measurement\",\n \"unit\",\n \"age\",\n \"race\",\n \"ethnicity\",\n \"sex\",\n \"value\",\n ]\n return out.loc[:, cols_to_keep]", "def get_features_and_target(self, trades_features: pd.DataFrame, trades_target: pd.DataFrame) -> pd.DataFrame:\n \n sf_groups = trades_features.drop_duplicates(subset=['sf_account_id', 'trade_date', 'sku']).groupby('sf_account_id')\n\n # calculate features\n feature_dfs = []\n if 'product_name' in self.feature_categories:\n feature_dfs += [sf_groups.product_name.value_counts().unstack().notnull()]\n if 'product_category' in self.feature_categories:\n feature_dfs += [sf_groups.product_category.value_counts().unstack().notnull()]\n if 'reporting_channel' in self.feature_categories:\n feature_dfs += [sf_groups.sub_reporting_channel.value_counts().unstack().notnull()]\n if 'recency' in self.feature_categories:\n feature_dfs += [(trades_features.trade_date_dt.max()-sf_groups.trade_date_dt.max()).dt.days.to_frame().rename(columns={'trade_date_dt':'recency'})]\n if 'frequency' in self.feature_categories:\n feature_dfs += [sf_groups.product_name.count().to_frame().rename(columns={'product_name':'frequency'})]\n if 'total_spend' in self.feature_categories:\n feature_dfs += [sf_groups.cost_float.sum().to_frame().rename(columns={'cost_float':'total_spend'})]\n\n # concat features\n customer_df = pd.concat(feature_dfs, axis=1, sort=False) # outer join on index\n\n # add target variable\n for target_variable in self.target_variables:\n if (trades_target.product_name == target_variable).any():\n customer_df['target_'+target_variable] = trades_target.groupby(['sf_account_id', 'product_name']).trade_date.any().unstack()[target_variable]\n else:\n customer_df['target_'+target_variable] = False\n\n # remove customers with no purchases before cut off\n customer_df = customer_df[customer_df[customer_df.columns[customer_df.columns != 'target']].any(axis=1)]\n\n # replace nans with False\n customer_df.fillna(False, inplace=True)\n\n return customer_df", "def transform(self, data: pd.DataFrame, columns: list, verbose: int=1) -> pd.DataFrame:", "def create_data():\n data_set = pd.DataFrame()\n customer_id = list()\n for i in range(1, 10001):\n customer_id.append(i)\n data_set = pd.DataFrame()\n data_set.loc[:, 'customer_id'] = np.array(customer_id)\n product_name = ('dining chair', 'dining table', 'bed', 'dining set',\n 'stool', 'couch', 'occasional table',\n 'recliner')\n product_name_random = random.choices(product_name, k=10000)\n data_set.loc[:, 'product_name'] = np.array(product_name_random)\n quantity_rented = (1, 2, 3, 4)\n quantity_rented_random = random.choices(quantity_rented, k=10000)\n data_set.loc[:, 'quantity_rented'] = np.array(quantity_rented_random)\n unit_rental_price_monthly = list()\n for i in range(0, 10000):\n unit_rental_price_monthly.append(random.uniform(1.5, 25))\n data_set.loc[:, 'unit_rental_price'] = np.array(unit_rental_price_monthly)\n rental_period_months = list()\n for i in range(0, 10000):\n rental_period_months.append(randint(6, 60))\n data_set.loc[:, 'rental_period_months'] = np.array(rental_period_months)\n return data_set", "def get_mall_data(): \n filename = 'mall_customers.csv'\n \n if os.path.isfile(filename):\n return pd.read_csv(filename, index_col=0)\n else: \n df = pd.read_sql(\"\"\"select * from customers\"\"\", get_connection('mall_customers'))\n df.to_csv(filename)\n return df", "def clean_customer_df(customer_df: pd.DataFrame) -> pd.DataFrame:\n \n # remove customers with more than 20 purchases\n if 'frequency' in customer_df.columns:\n customer_df = customer_df[customer_df.frequency < 20]\n \n return customer_df", "def transform_train_data(df):\n return df.rdd.map(\n lambda x: (\n Vectors.dense([x.amount, x.split, x.maintain4, x.maintain12]),\n x.intime\n )\n ).toDF([\"features\", \"label\"])", "def to_learn(trxfile, cardfile, custfile, trainfile, testfile):\n feature_df = to_feature(trxfile, cardfile, custfile)\n feature_df.loc[:] = preprocessing.scale(feature_df)\n #feature_df.loc[:] = preprocessing.normalize(feature_df, norm='l2')\n \n # card_no, label\n train_df = pandas.read_csv(trainfile, header=None)\n # card_no\n test_df = pandas.read_csv(testfile, header=None)\n\n train_data = feature_df.loc[train_df.loc[:, 0]]\n train_label = train_df.loc[:, 1]\n test_data = feature_df.loc[test_df.loc[:, 0]]\n\n return (train_data.values, train_label.values, test_data.values)", "def _get_cus_info(self):\n label_enc = LabelEncoder()\n customer_info = self._inv.drop_duplicates(['customer_code'], keep='last')\n customer_info = customer_info[['customer_code', 'customer_name', 'sales_cen_code',\n 'sales_cen_name', 'sales_region_name', 'province',\n 'city', 'district', 'customer_type', 'is_usable', 'channel_level']]\n customer_info['customer_id'] = label_enc.fit_transform(customer_info['customer_code'])\n customer_info['sales_cen_id'] = label_enc.fit_transform(customer_info['sales_cen_code'])\n customer_info['sales_region_id'] = label_enc.fit_transform(customer_info['sales_region_name'])\n customer_info['province_id'] = label_enc.fit_transform(customer_info['province'])\n customer_info['city_id'] = label_enc.fit_transform(customer_info['city'])\n customer_info['district_id'] = label_enc.fit_transform(customer_info['district'])\n customer_info['customer_type'] = label_enc.fit_transform(customer_info['customer_type'])\n customer_info['is_usable'] = label_enc.fit_transform(customer_info['is_usable'])\n customer_info['channel_level'] = label_enc.fit_transform(customer_info['channel_level'])\n customer_info_encoded = customer_info.drop(\n columns=['customer_name', 'sales_cen_code', 'sales_cen_name',\n 'sales_region_name', 'province', 'city', 'district']\n ).set_index('customer_code')\n customer_info.set_index('customer_code', inplace=True)\n customer_info_encoded = customer_info_encoded.reindex(self._index.get_level_values(0))\n return customer_info, customer_info_encoded", "def _prep_data(self, data: bytes) -> pd.DataFrame:\n # Convert the bytes into a file-like object\n buffer = io.BytesIO(data)\n\n # Unzip the file and pull out the csv file\n with zipfile.ZipFile(buffer, \"r\") as zip_file:\n csv = zip_file.read(\"QSAR_BCF_Kow.csv\")\n\n # Convert the string into a file-like object\n csv_file = io.BytesIO(csv)\n\n # Read the file-like object into a dataframe\n cols = [\"cas\", \"name\", \"smiles\", \"logkow\", \"kow_exp\", \"logbcf\"]\n df = pd.read_csv(\n csv_file,\n names=cols,\n header=0,\n usecols=[col for col in cols if col not in [\"cas\", \"name\"]],\n )\n\n # Drop NaNs\n df = df.dropna().reset_index(drop=True)\n\n # Encode KOW types\n kow_types = [\"pred\", \"exp\"]\n df[\"kow_exp\"] = df.kow_exp.map(lambda txt: kow_types.index(txt))\n\n # Get maximum SMILE string length\n max_smile = max(len(smile_string) for smile_string in df.smiles)\n\n # Pad SMILE strings\n df[\"smiles\"] = [\n smile_string + \"x\" * (max_smile - len(smile_string))\n for smile_string in df.smiles\n ]\n\n # Split up the SMILE strings into a matrix\n smile_df = pd.DataFrame(df.smiles.map(list).values.tolist())\n\n # Set the column values of the SMILE dataframe\n smile_df.columns = pd.Index(\n [f\"smiles_{idx}\" for idx in range(smile_df.shape[1])]\n )\n\n # Add the smile dataframe to the original dataframe\n df = pd.concat([df, smile_df], axis=1)\n\n # Drop original SMILE feature\n df = df.drop(columns=\"smiles\")\n\n # Put the target variable at the end\n cols = [\"logkow\", \"kow_exp\"]\n cols += [f\"smiles_{idx}\" for idx in range(max_smile)]\n cols += [\"logbcf\"]\n df = df[cols]\n\n # Ensure that the `logkow` column is numeric\n df[\"logkow\"] = pd.to_numeric(df.logkow)\n\n return df", "def make_df(self):\n # read in file\n df = pd.read_csv(self.data_file)\n cols_to_drop = [f'view{x}' for x in range(1,4)]+['response']\n # subtract loc3 viewing from location of interest\n df[self.label_key] = df[self.predictor] - df['view3']\n df.drop(cols_to_drop, axis=1, inplace=True)\n df.reset_index(drop=True, inplace=True)\n\n return df", "def get_transformed_data(self, df):\n temp_df = pd.DataFrame(self.fa.transform(df))\n return temp_df", "def create_master_table(df_cust: pd.DataFrame,\n df_trans: pd.DataFrame,\n parameters: Dict) -> pd.DataFrame:\n\n df_cust = _process_customers(df_cust, parameters)\n df_trans = _process_transactions(df_trans, parameters)\n\n # join data\n master_table = df_cust.merge(df_trans, on=['customerID'],\n how='left')\n\n # create geo risk ranking\n # temporary solution, if used in final solution, need to prepare in fit/transform maner\n bins = [-np.inf, 0.049, 0.071, 0.088, 0.107, 0.137, np.inf]\n geo_risk_rank = master_table.groupby('residentialAddress_clean')[['hist_default_sum', 'hist_trans_count']]. \\\n sum().reset_index(). \\\n assign(geo_risk_rank=lambda x: pd.cut(x['hist_default_sum']/x['hist_trans_count'], bins).cat.codes)\n\n master_table = master_table.merge(geo_risk_rank[['residentialAddress_clean', 'geo_risk_rank']], on='residentialAddress_clean', how='left')\n\n # drop clients without transactions\n master_table = master_table.dropna(subset=['default'])\n\n return master_table", "def dataset(self, file, latent_dim = 4, pivot = 0.2):\n data_df = pd.read_csv(file, sep=\"::\", engine='python',\n names=['UserId', 'MovieId', 'Rating', 'Timestamp'])\n print(len(data_df))\n data_df['avg_score'] = data_df.groupby(by='UserId')['Rating'].transform('mean')\n # feature columns\n user_num, item_num = data_df['UserId'].max() + 1, data_df['MovieId'].max() + 1\n feature_columns = [[self.denseFeature('avg_score')],\n [self.sparseFeature('user_id', user_num, latent_dim),\n self.sparseFeature('item_id', item_num, latent_dim)]]\n # split train dataset and test dataset\n watch_count = data_df.groupby(by='UserId')['MovieId'].agg('count')\n print(\"分割后\"+str(pivot*100)+\"%作为数据集\\n\")\n test_df = pd.concat([data_df[data_df.UserId == i].iloc[int((1 - pivot) * watch_count[i]):] for i in (watch_count.index)], axis=0)\n print(test_df.head())\n test_df = test_df.reset_index()\n train_df = data_df.drop(labels=test_df['index'])\n # 删除非需求列\n train_df = train_df.drop(['Timestamp'], axis=1).sample(frac=1.).reset_index(drop=True)\n test_df = test_df.drop(['index', 'Timestamp'], axis=1).sample(frac=1.).reset_index(drop=True)\n train_X = [train_df['avg_score'].values, train_df[['UserId', 'MovieId']].values]\n train_y = train_df['Rating'].values.astype('int32')\n test_X = [test_df['avg_score'].values, test_df[['UserId', 'MovieId']].values]\n test_y = test_df['Rating'].values.astype('int32')\n return feature_columns, (train_X, train_y), (test_X, test_y)", "def get_dataframe() -> pandas.DataFrame:\n database_connection = processing.establish_connection(database_path)\n dataframe = database_connection.to_dataframe(['CustomerId', 'InvoiceDate', 'Total'], table_name)\n database_connection.close()\n dataframe = processing.get_invoice_date_fixed(dataframe)\n analyze_dataframe = dataframe.copy()\n total_sum_dataframe = processing.get_column_sum(analyze_dataframe)\n\n customer_count_dataframe = processing.drop_duplicates(analyze_dataframe)\n customer_count_dataframe = processing.get_column_count(customer_count_dataframe)\n return customer_count_dataframe.merge(total_sum_dataframe, how='inner', on='InvoiceDate')", "def log_transform_features_customer(profile):\n\n view_amount_features = ['max_duration_view_profile', 'view_rate_profile', 'max_amount', \\\n 'min_duration_view_profile', 'min_amount',\\\n 'avg_amount', 'avg_trx_cnt', 'avg_duration_view_profile']\n\n profile_transformed = np.log(profile[view_amount_features]+1)\n\n profile = pd.concat([profile[['gender', 'age', 'became_member_on', 'income']]\\\n \t,profile_transformed], axis=1)\n\n profile.drop(columns=['income', 'min_amount', 'avg_amount', 'avg_duration_view_profile']\\\n \t, inplace=True)\n\n u.save_dataframe_to_sql(profile, 'profile')\n\n return profile", "def pre_process_data():\n data_list, header_list = Parser.__parse_csv_data(Parser.training_data_file)\n table = pandas.DataFrame(data_list, columns=header_list)\n table.drop(['date', 'employee id'], axis=1, inplace=True)\n unique_categories = table['category'].unique()\n unique_expense_desc = table['expense description'].unique()\n unique_tax_name = table['tax name'].unique()\n\n column_index = {\n 'input': {},\n 'output': {}\n }\n\n column_index['input']['pre-tax amount'] = {\n 'column_index': 0,\n 'type': 'int'\n }\n\n column_index['input']['tax amount'] = {\n 'column_index': 1,\n 'type': 'int'\n }\n\n index = 2\n\n for i in range(len(unique_expense_desc)):\n column_index['input'][unique_expense_desc[i]] = {\n 'column_index': i + index,\n 'type': 'str'\n }\n\n index += len(unique_expense_desc)\n\n for i in range(len(unique_tax_name)):\n column_index['input'][unique_tax_name[i]] = {\n 'column_index': i + index,\n 'type': 'str'\n }\n\n for i in range(len(unique_categories)):\n column_index['output'][unique_categories[i]] = {'value': i}\n\n Parser.__save_column_index(column_index)", "def get_customers(filters):\n\treturn frappe.db.sql(\"\"\"\n\t\tSELECT\n\n\t\t\tpar.debtor_creditor_number as 'Konto',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Company' THEN cus.customer_name\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Name (Adressatentyp Unternehmen)',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Individual' THEN TRIM(SUBSTR(cus.customer_name, LOCATE(' ', cus.customer_name)))\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Name (Adressatentyp natürl. Person)',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Individual' THEN SUBSTRING_INDEX(SUBSTRING_INDEX(cus.customer_name, ' ', 1), ' ', -1)\n\t\t\t\tELSE null\n\t\t\t\tEND as 'Vorname (Adressatentyp natürl. Person)',\n\t\t\tCASE cus.customer_type\n\t\t\t\tWHEN 'Individual' THEN '1'\n\t\t\t\tWHEN 'Company' THEN '2'\n\t\t\t\tELSE '0'\n\t\t\t\tEND as 'Adressatentyp',\n\t\t\tadr.address_line1 as 'Straße',\n\t\t\tadr.pincode as 'Postleitzahl',\n\t\t\tadr.city as 'Ort',\n\t\t\tUPPER(country.code) as 'Land',\n\t\t\tadr.address_line2 as 'Adresszusatz',\n\t\t\tadr.email_id as 'E-Mail',\n\t\t\tadr.phone as 'Telefon',\n\t\t\tadr.fax as 'Fax',\n\t\t\tcus.website as 'Internet',\n\t\t\tcus.tax_id as 'Steuernummer'\n\n\t\tFROM `tabCustomer` cus\n\n\t\t\tleft join `tabParty Account` par\n\t\t\ton par.parent = cus.name\n\t\t\tand par.parenttype = 'Customer'\n\t\t\tand par.company = %(company)s\n\n\t\t\tleft join `tabDynamic Link` dyn_adr\n\t\t\ton dyn_adr.link_name = cus.name\n\t\t\tand dyn_adr.link_doctype = 'Customer'\n\t\t\tand dyn_adr.parenttype = 'Address'\n\n\t\t\tleft join `tabAddress` adr\n\t\t\ton adr.name = dyn_adr.parent\n\t\t\tand adr.is_primary_address = '1'\n\n\t\t\tleft join `tabCountry` country\n\t\t\ton country.name = adr.country\n\n\t\tWHERE adr.is_primary_address = '1'\n\t\t\"\"\", filters, as_dict=1)" ]
[ "0.78012073", "0.70509386", "0.6482284", "0.6199711", "0.5975251", "0.5896229", "0.58942175", "0.5884299", "0.5793099", "0.57307404", "0.5701118", "0.568415", "0.56560767", "0.5655341", "0.56509733", "0.5625573", "0.5603968", "0.55976146", "0.55959636", "0.5586056", "0.5570692", "0.5557544", "0.5510036", "0.549403", "0.54827154", "0.54787415", "0.54602635", "0.545783", "0.54553646", "0.5432904" ]
0.7298348
1
Creates a new customer identifier from existing dataset.
def createCustomerID(self): customerID = self._df_invoice_original.CustomerID.max() customerID += 1 return int(customerID)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def customer_id(uncapped_flatprice, uncapped_flatprice_finalizer, team_multisig) -> int:\n customer_id = int(uuid.uuid4().hex, 16) # Customer ids are 128-bit UUID v4\n return customer_id", "def create_customer(cls, api, **data):\n return api.create_customer(**data)", "def create_customer(data):\n mandatory_params = ['customer_name', 'mobile_number']\n result = api_utils.check_required_params(mandatory_params, data)\n if result:\n return result\n mobile_number = db_helper.mobile_number_unique(data['mobile_number'])\n if not mobile_number:\n return api_utils.error(\"There already is a customer with \\\n mobile number {} found\".format(data['mobile_number']), 404)\n\n new_customer = db_helper.add_new_customer(data['customer_name'],\n mobile_number)\n return jsonify({'new_customer': new_customer})", "def add_customer(insert_dict):\n return cr.add_customer(insert_dict)", "def createCustomer(self, **params):\n return self.__req('create_customer', params)", "def generate_customer_info(self):\n consecutive = check_consecutive(self.customerIds)\n print(\"consecutive\" + str(consecutive))\n if consecutive:\n for customer_id in self.customerIds:\n # next we need to 1) decide if the customer has insurance, and if yes, generate the EOB that gets sent to the customer\n insured = np.random.choice(\n self.distributions[\"insurance_status\"],\n 1,\n p=self.distributions[\"insurance_distribution\"],\n )[0]\n dob = self.dobs[customer_id - 1]\n customer = pd.DataFrame(\n {\n \"customer_id\": [customer_id],\n \"dob\": str(dob),\n \"insurance\": insured,\n \"experiment_id\": random.randint(1, 2),\n }\n )\n self.Customers = self.Customers.append(customer)\n else:\n print(\"Error generating customer info: customerIds aren't consecutive\")\n return True", "def post(self):\n data = request.json\n return save_new_customer(data=data)", "def create_or_update_customer(entity):\n\ttry:\n\t\torganisation = entity.get('organisation').replace(\"'\",\"\")\n\t\torganisation = \"%s(C)\"%organisation if is_supplier_or_customer_group(organisation) else organisation\n\t\tname = frappe.db.get_value('Customer', organisation)\n\t\tif not name:\n\t\t\tcustomer = frappe.new_doc(\"Customer\")\n\t\t\tcustomer.customer_name = organisation\n\t\telse:\n\t\t\tcustomer = frappe.get_doc(\"Customer\", name)\n\n\t\tcustomer.entity_id = entity.get('entity_id')\n\t\tcustomer.customer_type = 'Company'\n\t\tif entity.get('group'):\n\t\t\tif entity.get('group').strip() == 'General':\n\t\t\t\tcustomer.customer_group = 'All Customer Groups'\n\t\t\telif frappe.db.get_value('Customer Group', entity.get('group').strip()):\n\t\t\t\tcustomer.customer_group = entity.get('group').strip() or 'All Customer Groups'\n\t\t\telif frappe.db.get_value('Customer', entity.get('group').strip()):\n\t\t\t\tcustomer.customer_group = 'All Customer Groups'\n\t\t\telse:\n\t\t\t\tcustomer.customer_group = create_customer_group(entity.get('group').strip())\n\t\tcustomer.territory = 'Australia'\n\t\tcustomer.customer_status = 'Existing'\n\t\tcustomer.modified_date = entity.get('updated_at')\n\t\tcustomer.save(ignore_permissions=True)\n\t\tif \"(C)\" in customer.customer_name:\n\t\t\tfrappe.db.set_value(\"Cusomer\", customer.name, \"customer_name\", organisation.replace(\"(C)\", \"\"))\n\n\t\tcreate_or_update_contact(customer, entity)\n\t\tget_addresses(entity.get('entity_id'))\n\n\t\t# return status\n\t\treturn {\n\t\t\tentity.get(\"entity_id\"): {\n\t\t\t\t\"operation\": \"Customer Created\" if not name else \"Customer Updated\",\n\t\t\t\t\"name\": customer.name,\n\t\t\t\t\"modified_date\": entity.get(\"updated_at\")\n\t\t\t}\n\t\t}\n\texcept Exception, e:\n\t\tdocname = entity.get('entity_id')\n\t\tresponse = entity\n\t\tlog_sync_error(\"Customer\", docname, response, e, \"create_new_customer\")", "def add_customers(current_customers, new_customer_list):\n for new in new_customer_list:\n new_id = _get_next_cust_id()\n current_customers[new_id] = new\n customer_cases[new_id] = {}", "def add_customer(customer_id, name, lastname, homeaddress, phone_number, email, status, credit_limit):\n try:\n with customer_db.transaction():\n new_customer_mi = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n homeaddress=homeaddress,\n phone_number=phone_number,\n email=email,\n status=status,\n credit_limit=credit_limit\n )\n logger.debug(\"Added customer %s to %s\", new_customer_mi, customer_db.database)\n return new_customer_mi\n except Exception as e:\n logger.error(\"Error creating customer_id %s: %s\", customer_id, e)", "def customer_id(self, customer_id: str):\n self._customer_id = customer_id", "def add_customer(customer_id, first_name, last_name, home_address,\n phone_number, email_address, status, credit_limit):\n print('Adding new customer, Customer ID {}...'.format(customer_id))\n try:\n Customer.get_by_id(customer_id)\n print('Customer ID {} is already in use'.format(customer_id))\n except Exception as ex:\n if \"instance matching query does not exist\" in str(ex):\n try:\n new_customer = Customer.create(customer_ID=customer_id,\n first_name=first_name,\n last_name=last_name,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit)\n new_customer.save()\n LOGGER.info('Added new customer, Customer ID %s', customer_id)\n except IntegrityError:\n print('Incorrect format, customer {} not saved'\n .format(customer_id))", "def store_customer(self, name):\n pass", "def _create_customers(self, customer_name=\"Alex\"):\n test_customer = Customer(\n name=customer_name,\n address=\"Washington Square Park\",\n phone_number=\"555-555-1234\",\n email=\"[email protected]\",\n credit_card=\"VISA\",\n active = True\n )\n return test_customer", "def customer(self, id):\r\n return Customer(self, id)", "def add_customer(customer_id, name, lastname, home_address,\n phone_number, email_address, status, credit_limit):\n try:\n with database.transaction():\n customer = Customer.create(\n customer_id=customer_id,\n name=name,\n lastname=lastname,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n status=status,\n credit_limit=credit_limit,\n )\n customer.save()\n except Exception as unknown_error:\n print(unknown_error)", "def add_customer(customer_id, first, last, addr, phone, email, status, limit):\n try:\n LOGGER.info('Creating customer record')\n with database.transaction():\n new_customer = Customer.create(\n customer_id=customer_id,\n first_name=first,\n last_name=last,\n home_address=addr,\n phone_number=phone,\n email_address=email,\n status=status,\n credit_limit=limit\n )\n new_customer.save()\n LOGGER.info('Added customer: %s', new_customer.customer_id)\n except IntegrityError as err:\n LOGGER.warning('Error creating = ID: %s', customer_id)\n LOGGER.warning(err)\n\n return Customer", "def test_customer_create(self):\n self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])", "def create_customer_token(self, _id):\n return self._get(\"/customer/{}/token\".format(_id))", "def createCustomer(sender, instance, **kwargs):\n Customer.objects.get_or_create(user=instance)", "def save_object(self, data):\n return Customer(**data)", "def set_customer_id(self, case_obj: Family) -> None:\n self._set_customer_id(case_obj.customer.internal_id)", "def create_customer(email=None, name=None, user_type='customer'):\n if user_type == 'charity':\n stripe.api_key = Config.STRIPE_SECRET_KEY_FOR_PLAN\n else:\n stripe.api_key = Config.STRIPE_SECRET_KEY\n if email and name:\n customer = stripe.Customer.create(email=email, name=name)\n else:\n customer = stripe.Customer.create()\n return customer.id", "def add_customer(customer_id, first_name, last_name, home_address, phone_number,\n email_address, is_active, credit_limit):\n try:\n LOGGER.info('Successfully connected to the database')\n\n with DATABASE.transaction():\n new_customer = Customer.create(customer_id=customer_id,\n first_name=first_name,\n last_name=last_name,\n home_address=home_address,\n phone_number=phone_number,\n email_address=email_address,\n is_active=is_active,\n credit_limit=credit_limit)\n new_customer.save()\n LOGGER.info(\"Customer added successfully\")\n\n except IntegrityError as error:\n LOGGER.info(error)\n LOGGER.info('Error occurred')", "def get_customer_id_by_sale_id(sale_id):\n\n # your code", "def CreateCustomer(Person):\n\t\t\tif Person.AddrCitytownNrID:\n\t\t\t\tcitytown = model.AddressCityTown.get(Person.AddrCitytownNrID)\n\t\t\t\tAddressLabel = '%s\\n%s\\n%s, %s\\n%s\\n%s' % (Person.AddrStr, citytown.Name, citytown.Block, citytown.District, citytown.State, citytown.ZipCode) \n\t\t\telse:\n\t\t\t\tAddressLabel = Person.AddrStr\n\t\t\tPersonName = ('%s %s,%s,%s' % (Person.Title, Person.NameFirst, Person.NameMiddle, Person.NameLast)).replace(',,',',').replace(',', ' ').strip()\n\t\t\tcustomer = model.InvCustomer(Name=PersonName ,CityID=Person.AddrCitytownNrID , AddressLabel=AddressLabel, CreditAmount=0.0, \\\n\t\t\t\tInventoryLocation=self.GetDefaultCustomerLocationID(), ExternalID=Person.id)\n\t\t\treturn customer", "def customer_id(self) -> str:\n return self._customer_id", "def add_customer(db_url: str):\n db_url = \"{}/{}\".format(db_url, \"user_api\")\n engine = create_engine(db_url, echo=True)\n session = sessionmaker(engine)()\n customer = Customer()\n session.add(customer)\n session.commit()\n return customer.id", "def add_customer(self, info, dup):\n if not dup:\n self.cursor.execute(\"INSERT INTO customerpersonal VALUES (%s,%s)\", (int(info['phone']), info['address']))\n\n self.cursor.execute(\"INSERT INTO customercredentials VALUES (%s,%s,%s,%s,%s,%s)\",\n (info['loginID'], info['firstName'], info['lastName'], info['salt'],\n info['key'], int(info['phone'])))\n self.db.commit()", "def import_customers(input_data):\n error_count = 0\n insert_count = 0\n LOGGER.info('Starting Customer import')\n for onecust in input_data:\n try:\n Customer(onecust['user_id'], onecust['first_name'], onecust['last_name'],\n onecust['address'], onecust['phone_number'], onecust['email'])\\\n .save(full_clean=True, force_insert=True)\n insert_count += 1\n except ValidationError as valerror:\n LOGGER.exception(\"Error importing data from csv: %s \", valerror.message)\n error_count += 1\n except (OperationError, DuplicateKeyError) as operror:\n LOGGER.exception(\"Error importing data from csv: %s \", operror)\n error_count += 1\n\n return insert_count, error_count" ]
[ "0.6462454", "0.63013685", "0.6199043", "0.6019253", "0.60129994", "0.60011494", "0.59338015", "0.59171224", "0.5879934", "0.58377534", "0.5820006", "0.5756114", "0.5745354", "0.57360977", "0.57085544", "0.5678208", "0.5672292", "0.56669915", "0.56526893", "0.5649953", "0.5642908", "0.56062853", "0.5590031", "0.55736756", "0.5573076", "0.5529865", "0.5502028", "0.54781216", "0.5468273", "0.5462993" ]
0.6483069
0
Drop from df_invoice_line dataframe features in list given as parameter. All elements from list are checked to be into dataframe columns.
def list_feature_drop(self): list_to_drop = list() list_not_in_df = list() #------------------------------------------------------------------------- # Columns are checked to be into df_invoice_line dataframe #------------------------------------------------------------------------- for col in self._list_feature_to_drop: if col in self.df_invoice_line.columns: list_to_drop.append(col) else: list_not_in_df.append(col) if 0 == len(list_to_drop): self.strprint("\n*** ERROR : no element in list belonging to dataframe!") else: if len(self._list_feature_to_drop) != len(list_to_drop): self.strprint("\n*** WARNING : followings features do not belong to \ dataframe : {}".format(list_not_in_df)) else: pass list_col_keep \ = [col for col in self.df_invoice_line.columns \ if col not in list_to_drop] s self.df_invoice_line = self.df_invoice_line[list_col_keep] return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drop_dfcol(self, drop_list):\n self.data = self.df\n for lbl in drop_list:\n self.data = self.data.drop(lbl, axis=1)\n self.n_features = np.shape(self.data)[1]", "def drop(self,df, column_list):\n df.drop(columns = column_list, inplace = True)\n return df", "def select_feats(df):\n cols = list(df)\n for col in cols:\n if col not in config[\"feats\"] and col != \"label\":\n df = df.drop(columns=col)\n return df", "def _drop_features(self, X, drop_features):\n self.drop_features = drop_features\n if len(self.drop_features) != 0:\n cfp = ComprehensiveFCParameters()\n df2 = []\n for df in self.drop_features:\n if df in X.columns:\n df2.append(df) # exact match\n else:\n if df in cfp.keys() or df in ['fft_coefficient_hann']:\n df = '*__{:s}__*'.format(df) # feature calculator\n # wildcard match\n df2 += [col for col in X.columns if fnmatch(col, df)] \n X = X.drop(columns=df2)\n return X", "def test_drop_1_variables_str_input(df_vartypes):\n\n transformer = DropFeatures(features_to_drop=\"Marks\")\n X = transformer.fit_transform(df_vartypes)\n\n # expected result\n df = pd.DataFrame(\n {\n \"Name\": [\"tom\", \"nick\", \"krish\", \"jack\"],\n \"City\": [\"London\", \"Manchester\", \"Liverpool\", \"Bristol\"],\n \"Age\": [20, 21, 19, 18],\n \"dob\": pd.date_range(\"2020-02-24\", periods=4, freq=\"T\"),\n }\n )\n\n # init params\n assert transformer.features_to_drop == \"Marks\"\n\n # transform params\n assert X.shape == (4, 4)\n assert type(X) == pd.DataFrame\n pd.testing.assert_frame_equal(X, df)", "def drop(self, columns: List[str]):\n self._check_columns(columns)\n return self._fromdata(\n {\n self.dtype.fields[i].name: ColumnFromVelox.from_velox(\n self.device,\n self.dtype.fields[i].dtype,\n self._data.child_at(i),\n True,\n )\n for i in range(self._data.children_size())\n if self.dtype.fields[i].name not in columns\n },\n self._mask,\n )", "def delete_columns(houses:pd.DataFrame)-> pd.DataFrame:\n drop_columns= ['NEXT OPEN HOUSE START TIME', 'NEXT OPEN HOUSE END TIME', \n 'URL (SEE http://www.redfin.com/buy-a-home/comparative-market-analysis FOR INFO ON PRICING)',\n 'MLS#', 'FAVORITE', 'INTERESTED', 'LATITUDE', 'LONGITUDE',\n SOURCE, SALE_TYPE, CITY, STATE]\n houses= houses[houses[STATUS].isin(['Sold'])]\n houses= houses[houses[CITY].isin(['Irvine'])]\n return houses.drop(drop_columns, axis= 1)", "def columns_to_drop(filepath, skiprows):\n candidates = ['unit', 'units', 'total', 'totals', 'id']\n df = pd.read_csv(filepath, skiprows=skiprows)\n drop = set()\n \n # find columns according to a list of names we should drop\n for item in df.columns:\n if item.upper() in [x.upper() for x in candidates]:\n drop.add(item)\n \n # find columns with only one unique value\n unique = df.nunique().to_dict()\n for column, n in unique.items():\n if n == 1:\n drop.add(column)\n \n # find columns with int values that are not a time period\n for column in df.columns:\n if df[column].dtype.name == 'int64':\n if not df[column].nunique() in [12, 24, 48, 96, 24*60/5, 24*60]:\n drop.add(column)\n \n return list(drop)", "def trimDf(df):\n cols = set(df.columns)\n\n cols.remove('exclamationCount') # bug in our feature extraction code\n cols.remove('price') # considered only free apps\n cols.remove('appName') # removing appNames\n\n # return df[list(cols)]\n\n\n\n return df[list(('revSent', 'appLabel'))]", "def _feature_country_process(self):\n if 'Country' not in self._df_invoice_line.columns:\n return\n\n list_countries_keep = ['United Kingdom']\n rows_before = self._df_invoice_line.shape[0]\n \n df_invoice_line_new = pd.DataFrame()\n for country in list_countries_keep : \n df_invoice_line_new = df_invoice_line_new.append(\\\n self._df_invoice_line[self._df_invoice_line['Country']==country]\\\n , ignore_index=True)\n\n self.df_invoice_line = df_invoice_line_new\n del(df_invoice_line_new)\n \n rows_after = self._df_invoice_line.shape[0] \n _print_stat_rows(\"Countries filtering : \",rows_before, rows_after)\n\n \n #-------------------------------------------------------------------------\n # Due to the fact only one country is used, then this feature is dropped\n #-------------------------------------------------------------------------\n list_col_to_keep = [col for col in self._df_invoice_line.columns \\\n if col not in 'Country']\n \n self._df_invoice_line = self._df_invoice_line[list_col_to_keep] \n\n return", "def remove_features(data, target, fn):\n selected_data = []\n if fn == 'variance':\n sel = VarianceThreshold(threshold=(.1 * (1 - .8)))\n selected_data = sel.fit_transform(data)\n elif fn == 'L1':\n lsvc = LinearSVC(C=0.01, penalty=\"l1\", dual=False).fit(data, target)\n model = SelectFromModel(lsvc, prefit=True)\n selected_data = model.transform(data)\n\n selected_t = np.transpose(selected_data)\n data_t = np.transpose(data)\n\n i = 0\n kept_cols = []\n removed_cols = []\n for i, col in enumerate(data_t):\n if col not in selected_t:\n removed_cols.append(i)\n else:\n kept_cols.append(i)\n return kept_cols, removed_cols", "def clean(df):", "def remove_intermediate_columns(dataframe):\n\n combined_dataframe_dropped_cols = dataframe.drop(columns = ['measureland_qualifier_flag_speed',\n 'measureland_qualifier_flag_distance',\n 'measureland_qualifier_flag_acceleration',\n 'measureland_qualifier_flag_visual'])\n\n print(\"Dimensions of combined dataframe after dropping columns:\", combined_dataframe_dropped_cols.shape)\n print(\"Combined dataframe after dropping columns: \", combined_dataframe_dropped_cols.sample(10))\n\n return combined_dataframe_dropped_cols", "def remove_urequired_columns(self, unrequired_columns):\n self.df = self.df.drop(columns=unrequired_columns)", "def clean_data(data):\n data.dropna(inplace=True)\n for feature in data:\n if ((feature != 'lat') and (feature != 'long') and (feature != 'date')):\n data.drop(data[(data[feature] < 0)].index, inplace=True)\n data.drop(data[(data['price'] == 0)].index, inplace=True)\n data.drop(data[(data['bedrooms'] == 0) & (data['bathrooms'] == 0.0)].index, inplace=True)\n return data", "def data_process_deprecated(self, CustomerID, InvoiceDate, InvoiceNo, Description, Quantity\\\n , UnitPrice ):\n dict_invoice = {'InvoiceDate':InvoiceDate, 'Description':Description\\\n , 'Quantity':Quantity, 'UnitPrice':UnitPrice}\n dict_invoice['CustomerID'] = CustomerID\n dict_invoice['InvoiceNo'] = InvoiceNo\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys(), index=[0])\n \n self.data_transform(df_invoice_line)\n\n #self.feature_rfm_encode()\n\n self.feature_scale()\n\n self.list_feature_drop()\n\n self.feature_description_nlp()\n return", "def select_columns(data):\n\n #Channels to be excluded\n features_delete = np.arange(46, 50)\n features_delete = np.concatenate([features_delete, np.arange(59, 63)])\n features_delete = np.concatenate([features_delete, np.arange(72, 76)])\n features_delete = np.concatenate([features_delete, np.arange(85, 89)])\n features_delete = np.concatenate([features_delete, np.arange(98, 102)])\n features_delete = np.concatenate([features_delete, np.arange(134, 243)])\n features_delete = np.concatenate([features_delete, np.arange(244, 249)])\n return np.delete(data, features_delete, 1)", "def preprocess(df):\n drop_cols = ['duration_ms', 'key', 'mode', 'time_signature', 'popularity', 'tempo']\n drop_cols += ['track_id', 'track_name', 'artist_name']\n for col in drop_cols:\n if col in list(df.columns):\n df = df.drop(columns=col)\n return df", "def clean_data(input_file, output_file):\n # Create data frame\n data = pd.read_csv(input_file, sep = \";\")\n \n # Remove unnecessary features from data frame\n data = data.drop([\"Name\",\"Ticket\",\"Cabin\"], axis=1)\n \n # Remove NaN values from remaining features\n data = data.dropna()\n \n # Save ready-to-use file\n data.to_csv(output_file, index=False)", "def _drop_inferior_features_transaction(\n df: pd.DataFrame,\n nan_threshold: float,\n target: str = \"isFraud\"\n) -> pd.DataFrame:\n print(\"Executing inferior feature removal...\")\n df = df.copy()\n num_columns = df.shape[1]\n if nan_threshold > 1.0 or nan_threshold < 0.0:\n raise ValueError(\"nan_threshold should be in range [0, 1].\")\n\n for col in df.columns:\n if col == target: # Preserve the target column.\n continue\n nan_percentage = np.mean(df[col].isna())\n if nan_percentage >= nan_threshold:\n df.drop(columns=[col], inplace=True)\n print(\"{}/{} features left with nan threshold {}\".format(\n df.shape[1], num_columns, nan_threshold\n ))\n return df", "def clean_line_generator_v2(df_pkl=None, fn='untitled'):", "def remove_features(x_train, x_val, x_test, features, ordered_feature_names):\n indices = np.where(np.isin(ordered_feature_names,unwanted_features))\n #print(indices)\n if len(indices) is not 0:\n x_train = np.delete(x_train, indices, axis=1)\n x_test = np.delete(x_test, indices, axis=1)\n x_val = np.delete(x_val,indices,axis=1)\n ordered_feature_names = np.delete(ordered_feature_names, indices, axis=None)\n return x_train,x_val, x_test, ordered_feature_names", "def remove_columns(lst):\n cols_rem = ['yearID','Team','lgID','Name','X','playerID','pops']\n\n for item in cols_rem:\n if item in lst:\n lst.remove(item)\n\n return(lst)", "def deselect (a_data,a_column) :\n loc_data = a_data.drop(a_column,axis = 1) \n return loc_data", "def _filter_features(\n record_batch: pa.RecordBatch,\n feature_allowlist: List[types.FeatureName]) -> pa.RecordBatch:\n columns_to_select = []\n column_names_to_select = []\n for feature_name in feature_allowlist:\n col = arrow_util.get_column(record_batch, feature_name, missing_ok=True)\n if col is None:\n continue\n columns_to_select.append(col)\n column_names_to_select.append(feature_name)\n return pa.RecordBatch.from_arrays(columns_to_select, column_names_to_select)", "def create_df(filename=r'.\\data\\default of credit card clients.xls', remove_pay0=True, resample=False):\n\n filename = filename\n nanDict = {}\n\n df = pd.read_excel(filename, header=1, skiprows=0, index_col=0, na_values=nanDict)\n df.rename(index=str, columns={\"default payment next month\":\"defaultPaymentNextMonth\"}, inplace=True)\n\n # Remove instances with zeros only for past bill statements or paid amounts\n # and not or, remove only when true in all columns\n print('before removing instances where all bill statements or paid amount is zero:', df.shape)\n \n df = df.drop(df[(df.BILL_AMT1 == 0) &\n (df.BILL_AMT2 == 0) &\n (df.BILL_AMT3 == 0) &\n (df.BILL_AMT4 == 0) &\n (df.BILL_AMT5 == 0) &\n (df.BILL_AMT6 == 0)].index, axis=0)\n \n df = df.drop(df[(df.PAY_AMT1 == 0) &\n (df.PAY_AMT2 == 0) &\n (df.PAY_AMT3 == 0) &\n (df.PAY_AMT4 == 0) &\n (df.PAY_AMT5 == 0) &\n (df.PAY_AMT6 == 0)].index, axis=0)\n \n print('after removing instances where all bill statements or paid amount is zero:', df.shape)\n\n \n \n print('df shape before illegal values removed:',df.shape)\n print('df after removing illegals:')\n\n df = pay_remove_value(df,-2)\n print(' remove pay=-2', df.shape)\n\n df = bill_amt_remove_negative(df, 0)\n print(' remove Pay_amt, bill_amt <0:', df.shape)\n\n\n df = edu_marr_remove_value(df)\n print(' remove edy=0,5,6, marriage=0:', df.shape)\n\n if remove_pay0:# over 80 % of data lost\n\n df = pay_remove_value(df,0)\n print(' remove pay=0:',df.shape)\n\n\n\n # features and targets\n X = df.loc[:, df.columns !='defaultPaymentNextMonth'].values\n y = df.loc[:, df.columns =='defaultPaymentNextMonth'].values\n\n # categorical variables to one-hot's\n onehotencoder = OneHotEncoder(categories='auto')\n #print(df.iloc[0:, 3])\n \n # transform cat. var. columns into cat. variables.\n # new columns are added at the start, columns before col 1 put behind new columns\n \n X = ColumnTransformer(\n [(\"\",onehotencoder, [1,2,3, 5,6,7,8,9,10]),],\n remainder='passthrough'\n ).fit_transform(X)\n print(' shape of dataset without resampling', X.shape,y.shape)\n\n if resample:\n sm = SMOTE(random_state=seed)\n X, y = sm.fit_resample(X, y.ravel())\n y = y.reshape(-1,1)\n print(' shape of dataset after resampling', X.shape,y.shape)\n #sys.exit()\n return X, y", "def purgeHighSparsedFeatures(df,threshold,barplot=False,title=''):\n \n thr = math.floor(df.shape[1] * threshold)\n rowsToDrop = np.array([])\n logger.debug(Sc+'Patient Threshold is %d' % thr) \n logger.debug(Sc+'Matrix dimensions : Rows %d , Columns %d'% (df.shape[0],df.shape[1]))\n #axis_x = np.arange(0,df.shape[0]) \n axis_y = np.array([]) \n numRows = df.shape[0] \n for i in range(1,numRows):\n arr = pd.isnull(df.iloc[i])\n nnan = np.sum(arr) \n axis_y = np.append(axis_y,nnan)\n if (nnan > thr):\n rowsToDrop = np.append(rowsToDrop,i)\n logger.debug ('%d features to drop ' % len(rowsToDrop))\n np.savetxt('debug/sparseFeaturesaxis_y.txt',axis_y)\n #if(barplot):\n # ax.title.set_text(title)\n # ax.bar(axis_x,axis_y) \n #logger.debug('After purge there are %d columns '% df.shape[1])\n return rowsToDrop", "def drop_columns(self, columns):\n dframe = self.dframe(keep_parent_ids=True)\n self.replace_observations(dframe.drop(columns, axis=1))", "def drop_columns(*, df, columns_to_drop, verbose=True):\n \n assert type(df)==pd.DataFrame, \"please provide df in pandas dataframe format\"\n df = df.copy()\n \n # find unique values in a list, just in case I made the mistake, \n columns_to_drop = list(pd.Series(columns_to_drop).unique())\n\n # .. info, header, \n if verbose==True:\n print(f\"\"\"Removing {len(columns_to_drop)} columns from df\"\"\") \n else:\n pass\n\n \n # remove columns one by one, \n for i,j in enumerate(columns_to_drop):\n try:\n df.drop(columns=[j], axis=1, inplace=True)\n if verbose==True:\n print(f\"{i} removing: {j}, ==> new df.shape: {df.shape}\")\n else:\n pass\n \n except:\n if verbose==True:\n print(f\"{i} .... column: {j}, was not found in df, check if name is correct....\")\n else:\n pass\n \n return df", "def remove(dataframe, limit=250):\n logfile = open('logfile_removecolumns.txt', 'w') # Create a logfile\n logfile.write('=====> Time: %s <=====\\n' % time.asctime(time.localtime()))\n logfile.write('=====> Log from file %s.py <===== \\n\\n' % __name__)\n\n columns_overview = dataframe.columns.summary() # Create an overview of the dataframe\n cols_list = dataframe.columns.tolist()\n cols_to_be_deleted = list()\n logfile.write('Overview of the dataframe: \\n%s' % columns_overview)\n\n for stock in range(len(cols_list)): # Walk through all stocks\n if dataframe[cols_list[stock]].isnull().sum() > limit: # Check No. of null values in a column\n cols_to_be_deleted.append(cols_list[stock])\n \n logfile.write('\\nNo. of Columns with more that %d missing values: %s\\n'\n % (limit, len(cols_to_be_deleted)))\n logfile.write('Deleted columns:\\n')\n for col in cols_to_be_deleted:\n logfile.write('%s \\n' % str(col))\n logfile.close()\n \n # Return updated dataframe or list of columns. See test code below\n dataframe_updated = dataframe[dataframe.columns.drop(cols_to_be_deleted)]\n return dataframe_updated" ]
[ "0.6483683", "0.61852366", "0.61701894", "0.61167115", "0.5998328", "0.5879889", "0.5828969", "0.57773757", "0.57595223", "0.56772375", "0.55653924", "0.55105036", "0.5499361", "0.5488699", "0.5475562", "0.54356617", "0.5433665", "0.5431893", "0.54176056", "0.5401929", "0.53749037", "0.5363736", "0.53616303", "0.53577864", "0.53537077", "0.53521913", "0.5339517", "0.5323909", "0.53152", "0.52870375" ]
0.7785573
0
Process df_invoice_line.Description with NLTK package.
def feature_description_nlp(self): #------------------------------------------------------------------------- # Returned dataframe is aggregated with weights from self.vectorizer #------------------------------------------------------------------------- list_no_words=['SET','PACK'] self.df_invoice_line, vectorizer, matrix_weights \ = p5_util.nlp_process(self.df_invoice_line,'Description'\ , vectorizer=self.vectorizer, list_no_words=list_no_words) #------------------------------------------------------------------------- # Each vectorized column 'x' is renamed w_nlp_i #------------------------------------------------------------------------- dict_matching_name = dict() for col in self.df_invoice_line.columns: if str(col).isdigit() is True: new_col_name = "w_nlp_"+str(col) dict_matching_name[col] = new_col_name self.df_invoice_line.rename(columns=dict_matching_name,inplace=True) #------------------------------------------------------------------------- # Description is droped from columns #------------------------------------------------------------------------- del(self.df_invoice_line['Description'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_process_deprecated(self, CustomerID, InvoiceDate, InvoiceNo, Description, Quantity\\\n , UnitPrice ):\n dict_invoice = {'InvoiceDate':InvoiceDate, 'Description':Description\\\n , 'Quantity':Quantity, 'UnitPrice':UnitPrice}\n dict_invoice['CustomerID'] = CustomerID\n dict_invoice['InvoiceNo'] = InvoiceNo\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys(), index=[0])\n \n self.data_transform(df_invoice_line)\n\n #self.feature_rfm_encode()\n\n self.feature_scale()\n\n self.list_feature_drop()\n\n self.feature_description_nlp()\n return", "def treat_new_line(self,text):\n text=text.replace('.\\n','. ')\n text=re.sub(r'(\\n\\s*)+\\n+', '\\n\\n',text )\n \n lw=text.split('\\n\\n')\n lw=[c for c in lw if c.replace(' ','')!='']\n \n for i in range(1,len(lw)):\n try:\n\n el=lw[i]\n if len(el)>=1:\n try:\n first_w=el.split()[0]\n except:\n first_w=el\n first_l=first_w[0]\n if first_l.isupper() :\n if len(lw[i-1])>0 and lw[i-1].replace(' ','') !='':\n if lw[i-1].replace(' ','')[-1] not in [\":\",'.',\"-\",'/',\"'\",\";\"]:\n prec=lw[i-1].split(\".\")[-1]\n merge=(prec+' '+lw[i]).split()\n dic=dict(nltk.tag.pos_tag(merge))\n proper_noun=dic[first_w]=='NNP'\n if not proper_noun:\n if not \".\" in lw[i-1]:\n lw[i-1]=lw[i-1]+\".\\n\\n \"\n else:\n lw[i-1]=lw[i-1][:-1]+\".\\n\\n \"\n else:\n lw[i-1]+=' '\n\n\n elif first_l.islower():\n if len(lw[i-1])>0 and lw[i-1][-1].replace(' ','')!='':\n\n if lw[i-1][-1].replace(' ','')[-1]!='-':\n lw[i-1]+=\"\"\n else:\n\n ltemp_prev=lw[i-1].split(' ')\n ltemp_next=lw[i].split(' ')\n motprev=ltemp_prev[-1][:-1]\n motnext=lw[i].split(' ')[0]\n if len((motprev+' '+motnext).split())==2:\n\n if self.english_voc.check(motprev) and self.english_voc.check(motnext) and not self.english_voc.check(\"\".join([motprev,motnext])) :\n newmot=\" \".join([motprev,motnext])\n else:\n newmot=\"\".join([motprev,motnext])\n ltemp_prev[-1]=newmot\n ltemp_next[0]=\"\"\n lw[i-1]=\" \".join(ltemp_prev)\n lw[i]=\" \".join(ltemp_next)\n else:\n lw[i-1]+=\"\\n\\n\"\n \n except:\n print('Error occurs, the reader may not be suitable for your pdf files')\n \n \n text=\"\".join(lw)\n \n lw=text.split('\\n')\n lw=[c for c in lw if c.replace(' ','')!='']\n for i in range(1,len(lw)):\n try:\n el=lw[i]\n if len(el)>=1:\n try:\n first_w=el.split()[0]\n except:\n first_w=el\n first_l=first_w[0]\n if first_l.isupper() :\n if len(lw[i-1])>0 and lw[i-1].replace(' ','')!='':\n if lw[i-1].replace(' ','')[-1] not in [\":\",'.',\"-\",'/',\"'\",\";\"]:\n prec=lw[i-1].split(\".\")[-1]\n merge=(prec+' '+lw[i]).split()\n dic=dict(nltk.tag.pos_tag(merge))\n proper_noun=dic[first_w]=='NNP'\n if not proper_noun:\n if not \".\" in lw[i-1]:\n lw[i-1]=lw[i-1]+\".\\n\\n \"\n else:\n lw[i-1]=lw[i-1][:-1]+\".\\n\\n \"\n else:\n lw[i-1]+=' '\n elif first_l.islower():\n if len(lw[i-1])>0 and lw[i-1].replace(' ','')!='':\n if lw[i-1].replace(' ','')[-1]==\"-\":\n ltemp_prev=lw[i-1].split(' ')\n ltemp_next=lw[i].split(' ')\n motprev=ltemp_prev[-1][:-1]\n motnext=lw[i].split(' ')[0]\n if len((motprev+' '+motnext).split())==2:\n if self.english_voc.check(motprev) and self.english_voc.check(motnext) and not self.english_voc.check(\"\".join([motprev,motnext])) :\n newmot=\" \".join([motprev,motnext])\n else:\n newmot=\"\".join([motprev,motnext])\n ltemp_prev[-1]=newmot\n ltemp_next[0]=\"\"\n lw[i-1]=\" \".join(ltemp_prev)\n lw[i]=\" \".join(ltemp_next)\n\n\n\n else:\n lw[i-1]+=\" \"\n else:\n lw[i-1]+=\" \"\n \n except:\n print('Error occurs, the reader may not be suitable for your pdf files')\n \n text=\"\".join(lw)\n return text", "def process_text(self, text, language):", "def data_transform_nlp(self):\n df_invoice_line = None\n \n is_build_step = False\n\n if self._vectorizer_nlp is None:\n is_build_step = True\n \n list_no_words=['SET','PACK']\n\n df_invoice_line, csr_matrix_weights, self._vectorizer_nlp \\\n = p5_util.nlp_process(self.df_invoice_line\\\n , 'Description' , vectorizer= self._vectorizer_nlp\\\n , list_no_words=list_no_words, is_verbose= self.is_verbose)\n \n if df_invoice_line is None:\n self.strprint(\"***ERROR : NLP process interrupted!\")\n return\n \n \n #-------------------------------------------------------------------------\n # NLP weights are cumulated (sumerized) per customer\n #-------------------------------------------------------------------------\n if csr_matrix_weights is None:\n csr_matrix_weights \\\n = p5_util.object_load('./data/matrix_weights_NLP.dump')\n else:\n pass\n \n self.strprint(\"df_invoice_line : \"+str(df_invoice_line.shape))\n \n self.dbg_df = df_invoice_line.copy()\n \n root_name = 'w_nlp_'\n self._df_w_nlp = p5_util.df_nlp_sum_per_customer(df_invoice_line\\\n , csr_matrix_weights, root_name)\n\n del(csr_matrix_weights)\n \n #-------------------------------------------------------------------------\n # Dimension reduction thanks to PCA\n #------------------------------------------------------------------------- \n self.strprint(\"self._df_w_nlp : \"+str(self._df_w_nlp.shape))\n\n root_name_pca = 'nlp_pca_'\n n_dim = self._nlp_pca_ndim\n \n df_customers_pca_nlp, self._pca_nlp \\\n = p5_util.df_pca_reduce(self._df_w_nlp, n_dim, root_name_pca\\\n , p_is_scale=False, pca=self._pca_nlp)\n \n self.strprint(\"df_customers_pca_nlp : \" +str(df_customers_pca_nlp.shape))\n\n #-------------------------------------------------------------------------\n # Backup of NLP features per customer\n #-------------------------------------------------------------------------\n if is_build_step is True:\n p5_util.object_dump(df_customers_pca_nlp\\\n , self._df_customers_nlp_fileName)\n else:\n self._df_customers_pca_nlp = df_customers_pca_nlp.copy()\n \n return", "def create_NER(self, dataframe):\n\n dataframe['entities'] = dataframe['line']\n entity_dict = {}\n entity_type = {}\n\n for i, val in enumerate(dataframe['entities']):\n e1 = re.findall('<e1>(.*?)</e1>', val)\n e2 = re.findall('<e2>(.*?)</e2>', val)\n entity_dict[i+1] = (str(e1[0]), str(e2[0]))\n doc = nlp(e1[0])\n for ent in doc.ents:\n if ent.label_:\n entity_type[i] = ent.label_\n else:\n entity_type[i] = ('NOT RECOGNIZED')\n \n doc = nlp(e2[0])\n for ent in doc.ents:\n if ent.label_:\n entity_type[i] = entity_type[i] + ent.label_\n else:\n entity_type[i] = entity_type[i] + ('NOT RECOGNIZED')\n\n entity_dataframe = self.create_dataframe(entity_dict, ['e1', 'e2'])\n entity_type_df = self.create_dataframe(entity_type, ['e1', 'e2'])\n\n dataframe = dataframe.drop(columns=['entities'])\n dataframe['e1'] = entity_dataframe['e1']\n dataframe['e2'] = entity_dataframe['e2']\n dataframe['e1_type'] = entity_type_df['e1']\n dataframe['e2_type'] = entity_type_df['e2']\n\n return dataframe", "def remove_info(text, journal_id, label, doc_type='inkomst'):\r\n sections = text.split('NEWPAR')\r\n cleaned_text = ''\r\n diagnose_detected = False\r\n for section in sections:\r\n if section:\r\n section_header =list(filter(None, section.split(' ')))[0]\r\n #print(section_header)\r\n if 'diagnose' in section_header.lower() or 'DIAGNOSE' in section or 'Diagnose :' in section or 'Problemstilling :' in section:\r\n diagnose_detected = True\r\n else:\r\n cleaned_text += section + ' '\r\n if not diagnose_detected :\r\n print('No DIAGNOSE in: ', journal_id)\r\n return cleaned_text", "def action_generate_lines_txt(self):\n rp_obj = self.env['res.partner']\n voucher_obj = self.env['account.wh.iva']\n txt_iva_obj = self.env['txt.iva.line']\n vouchers = []\n txt_brw = self.browse(self._ids)[0]\n txt_ids = txt_iva_obj.search([('txt_id', '=', txt_brw.id)])\n if txt_ids:\n txt_ids.unlink()\n\n if txt_brw.type:\n vouchers = voucher_obj.search([\n ('date_ret', '>=', txt_brw.date_start),\n ('date_ret', '<=', txt_brw.date_end),\n ('period_id', '=', txt_brw.period_id.id),\n ('state', '=', 'done'),\n ('type', 'in', ['in_invoice', 'in_refund'])])\n else:\n vouchers = voucher_obj.search([\n ('date_ret', '>=', txt_brw.date_start),\n ('date_ret', '<=', txt_brw.date_end),\n ('period_id', '=', txt_brw.period_id.id),\n ('state', '=', 'done'),\n ('type', 'in', ['out_invoice', 'out_refund'])])\n\n for voucher in vouchers:\n acc_part_id = rp_obj._find_accounting_partner(voucher.partner_id)\n for voucher_lines in voucher.wh_lines:\n if voucher_lines.invoice_id.state not in ['open', 'paid']:\n continue\n for voucher_tax_line in voucher_lines.tax_line:\n txt_iva_obj.create(\n {'partner_id': acc_part_id.id,\n 'voucher_id': voucher.id,\n 'invoice_id': voucher_lines.invoice_id.id,\n 'txt_id': txt_brw.id,\n 'untaxed': voucher_tax_line.base,\n 'amount_withheld': voucher_tax_line.amount_ret,\n 'tax_wh_iva_id': voucher_tax_line.id,\n })\n return True", "def prepare_text_data(descriptions):\n text_data = []\n for line in descriptions:\n tokens = prepare_text_for_lda(line)\n text_data.append(tokens)\n return text_data", "def data_transform(self, df) :\n\n #-------------------------------------------------------------------------\n # Copy of given dataframe to be transformed\n #-------------------------------------------------------------------------\n self.df_invoice_line = df\n \n #-------------------------------------------------------------------------\n # Features issued from InvoiceDate are created\n #-------------------------------------------------------------------------\n if self.is_transform_timeFeature is True:\n self.strprint(\"\\n*** Time features transformation ***\")\n self.data_transform_timeFeature()\n\n #-------------------------------------------------------------------------\n # RFM is computed and encoded\n #-------------------------------------------------------------------------\n if self.is_transform_rfm is True:\n self.strprint(\"\\n*** RFM transformation ***\")\n self.data_transform_rfm()\n\n #-------------------------------------------------------------------------\n # NLP features issued from Description are created\n #-------------------------------------------------------------------------\n if self.is_transform_nlp is True:\n self.strprint(\"\\n*** NLP transformation ***\")\n self.data_transform_nlp()\n \n return self.df_invoice_line", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_ids = self.pool.get('account.journal').search(cr, uid,\n [('type', '=', 'sale'), ('company_id', '=', order.company_id.id)],\n limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no sales journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_id.property_account_receivable.id,\n 'journal_id': order.partner_id.property_default_sale_invoice_journal.id,\n 'partner_id': order.partner_id.id,\n 'address_invoice_id': order.partner_invoice_id.id,\n #'address_contact_id': order.partner_order_id.id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n\n return invoice_vals", "def summary_line_and_description():", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_id = self.pool['account.invoice'].default_get(cr, uid, ['journal_id'], context=context)['journal_id']\n if not journal_id:\n raise osv.except_osv(_('Error!'),\n _('Please define sales journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_invoice_id.property_account_receivable.id,\n 'partner_id': order.partner_invoice_id.id,\n 'journal_id': journal_id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_invoice_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False,\n 'section_id' : order.section_id.id,\n 'test_1' :order.test\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n return invoice_vals", "def process_line(line):\n [label, text] = line.split('\\t')\n return text.split()", "def label_notes(all_notes_lines):\n# nf = nemo.core.NeuralModuleFactory(backend=nemo.core.Backend.PyTorch, log_dir=None)\n #note_line_queries = notes.split('\\n')\n #note_line_queries = ['pt arrived obtunded not answering questions responding to voice and sternal rub speaking in garbled voice pupils unequal left 3mm and right 2mm brisk bilaterally trauma sicu MD aware currently recieving keppra IV finished dilantin gtt due for level at 08a EEG today LSCTA on 3LNC sats 100 % SBP 90 s to 100 s HR NSR no ectopy 60 s NS @ 75cc continuous +BS no stools rec d lactulose at OSH to recieve PR q4h abd soft non-tender non-distended foley in place draining adequate amt clear yellow urine skin intact left 20G x2 WNL wife Name NI']\n\n# labels_dict = get_vocab(LABELS_DICT)\n# pretrained_bert_model = nemo_nlp.nm.trainables.get_huggingface_model(\n# bert_config=BERT_CONFIG, pretrained_model_name=PRETRAINED_MODEL_NAME\n# )\n\n# tokenizer = nemo.collections.nlp.data.tokenizers.get_tokenizer(\n# tokenizer_name=TOKENIZER,\n# pretrained_model_name=PRETRAINED_MODEL_NAME,\n# tokenizer_model=TOKENIZER_MODEL,\n# )\n# hidden_size = pretrained_bert_model.hidden_size\n\n load_datalayer_begin_time = time.time()\n data_layer = nemo_nlp.nm.data_layers.BertTokenClassificationInferDataLayer(\n queries=all_notes_lines, tokenizer=tokenizer, max_seq_length=MAX_SEQ_LENGTH, batch_size=2000\n )\n load_datalayer_end_time = time.time()\n\n classifier = TokenClassifier(hidden_size=hidden_size, num_classes=len(labels_dict))\n\n input_ids, input_type_ids, input_mask, _, subtokens_mask = data_layer()\n \n load_hidden_states_begin_time = time.time()\n hidden_states = pretrained_bert_model(input_ids=input_ids, token_type_ids=input_type_ids, attention_mask=input_mask)\n load_hidden_states_end_time = time.time()\n load_logits_begin_time = time.time()\n logits = classifier(hidden_states=hidden_states)\n load_logits_end_time = time.time()\n\n ###########################################################################\n\n # Instantiate an optimizer to perform `infer` action\n infer_begin_time = time.time()\n evaluated_tensors = nf.infer(tensors=[logits, subtokens_mask], checkpoint_dir=CHECKPOINT_DIR)\n infer_end_time = time.time()\n\n logits, subtokens_mask = [concatenate(tensors) for tensors in evaluated_tensors]\n\n preds = np.argmax(logits, axis=2) \n all_notes_labeled_lines = []\n\n for i, query in enumerate(all_notes_lines):\n logging.info(f'Query: {query}')\n\n pred = preds[i][subtokens_mask[i] > 0.5]\n words = query.strip().split()\n\n #replaced with logic below instead of raising an error:\n '''\n if len(pred) != len(words):\n logging.info('Preds length: ' + str(len(preds[i])))\n logging.info('subtokens_mask length: ' + str(len(subtokens_mask[i])))\n logging.info('Pred length: ' + str(len(pred)))\n logging.info('words length: ' + str(len(words)))\n logging.info('Preds: ' + str(preds.tolist()))\n logging.info('subtokens_mask: ' + str(subtokens_mask[i]))\n logging.info('Pred:' + str(pred.tolist()))\n logging.info('words:' + str(words))\n\n labeled_note = '__Prediction/Word Mismatch__ pred length: ' + str(len(pred)) + ', words length: ' + str(len(words))\n break\n #raise ValueError('Pred and words must be of the same length')\n \n output = ''\n for j, w in enumerate(words):\n output += w\n label = labels_dict[pred[j]]\n if label != NONE_LABEL:\n label = add_brackets(label)\n output += label\n output += ' '\n labeled_note += '\\n' + output.strip()\n logging.info(f'Combined: {output.strip()}')\n\n '''\n\n if len(pred) == len(words):\n output = ''\n for j, w in enumerate(words):\n output += w\n label = labels_dict[pred[j]]\n if label != NONE_LABEL:\n label = add_brackets(label)\n output += label\n output += ' '\n all_notes_labeled_lines.append(output.strip())\n logging.info(f'Combined: {output.strip()}')\n else:\n all_notes_labeled_lines.append(query)\n pred_length = str(len(pred))\n word_length = str(len(words))\n logging.info(f'__Prediction/Word Length Mismatch__ pred length: {pred_length}, words length: {word_length}')\n logging.info(f'{query}')\n \n\n print(str(load_datalayer_end_time-load_datalayer_begin_time)+' seconds to load the datalayer')\n print(str(load_hidden_states_end_time-load_hidden_states_begin_time)+' seconds to load hidden states')\n print(str(load_logits_end_time-load_logits_begin_time)+' seconds to load logits')\n print(str(infer_end_time-infer_begin_time)+' seconds to run inference')\n\n return all_notes_labeled_lines", "def nlp(self, text):\n # Runs the NLP model on the input.\n doc = self.nlp_model(text)\n\n to = []\n when = []\n body = []\n\n # Group the labels into variables.\n for token in doc:\n if token.dep_ == \"TO\":\n to.append(token.text)\n elif token.dep_ == \"WHEN\":\n when.append(token.text)\n elif token.dep_ == \"BODY\":\n body.append(token.text)\n log.debug(\"%s %s\", token.text, token.dep_)\n\n # Get the time entity from the NLP model.\n time = datetime.now()\n if len(when) == 0:\n time = time + timedelta(seconds=5)\n else:\n time = tc.parse_time(when)\n\n _body = \" \".join(body)\n\n return (to, time, _body)", "def ie_preprocess(document):\n sentences = nltk.sent_tokenize(document) #NLTK default sentence segmenter\n #print sentences # sentences are segmented\n sentences = [nltk.word_tokenize(sent) for sent in sentences] # NLTK word tokenizer \n #print sentences # sentences are tokenized\n sentences = [nltk.pos_tag(sent) for sent in sentences] # NLTK POS tagger \n #print sentences # sentences are POS tagged\n return sentences", "def convert_chn_text(detail=True):\n p = {\n \"data_path\": \"../data/data_literature\",\n \"output_dir\": \"../data/converted_data\"\n }\n if detail:\n gen_params_info(p)\n\n os.system(\"rm -rf %s\" % p[\"output_dir\"])\n os.system(\"mkdir -p %s\" % p[\"output_dir\"])\n files = os.listdir(p[\"data_path\"])\n for file_name in files:\n if detail:\n print(\"to process %s\" % file_name)\n file_path = \"%s/%s\" % (p[\"data_path\"], file_name)\n out_file_path = \"%s/%s\" % (p[\"output_dir\"], file_name)\n fh_in = codecs.open(filename=file_path, mode=\"r\", encoding='utf8')\n fh_out = codecs.open(filename=out_file_path, mode=\"w\", encoding='utf8')\n line_idx = 1\n verb = \"\"\n for line in fh_in:\n line = line.lstrip()\n if line.find(\"\\t\") < 0:\n print(\"Please check in file %s, line: %s\\nsentence :%s\\n\"\\\n \"The above sentence has NO TAB and has been skiped!\" \\\n % (file_name, line_idx, line))\n continue\n items = line.split(\"\\t\")\n if len(items) != 4:\n print(\"Please check in file %s, line: %s\\nsentence :%s\\n\"\\\n \"The above sentence has NO 4 TAB and has been skiped!\" \\\n % (file_name, line_idx, line))\n continue\n frame_id = items[0]\n if frame_id.find(\".\") >= 0:\n frame_id = frame_id.split(\".\")[0]\n verb = items[2].strip()\n left_sent = items[1].strip()\n right_sent = items[3].strip()\n out_line = \"%s\\t%s\\t%s\\t%s\"\\\n % (frame_id, left_sent, verb, right_sent)\n print(out_line, file=fh_out)\n\n line_idx += 1\n\n fh_in.close()\n fh_out.close()", "def extract_features_temporal(self, text, expDateStr = None, onsetDateStr = None, refExpDateStr = None, textType='vaers'):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n# id = 0\n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.initialization_text_data(text, sentences, taggedSentences, textType)\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n docFeature = self.extract_temporal_info(featObjList, expDateStr, onsetDateStr, refExpDateStr)\n \n return docFeature", "def preprocess_document(raw,sentence_level):\r\n\r\n\t# raw = raw.decode(\"utf-8\")\r\n\t# raw = raw.encode(\"ascii\",\"ignore\")\r\n\t\r\n\tfrom nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters\r\n\tparam = PunktParameters()\r\n\ttokenizer = PunktSentenceTokenizer(param)\r\n\tif sentence_level:\r\n\t\tsentences = tokenizer.tokenize(raw)\r\n\t\tsentences_words = list()\r\n\t\tfor s in sentences:\r\n\t\t\tsentences_words.append((s.strip(),preprocess_sentence(s)))\r\n\t\treturn sentences_words\r\n\telse:\r\n\t\treturn [(raw.strip(),preprocess_sentence(raw))]", "def generate_txt(self):\n txt_string = ''\n rp_obj = self.env['res.partner']\n for txt in self:\n vat = rp_obj._find_accounting_partner(\n txt.company_id.partner_id).vat[2:]\n vat = vat\n for txt_line in txt.txt_ids:\n vendor, buyer = self.get_buyer_vendor(txt, txt_line)\n period = txt.period_id.name.split('/')\n period2 = period[0] + period[1]\n # TODO: use the start date of the period to get the period2\n # with the 'YYYYmm'\n operation_type = ('V' if txt_line.invoice_id.type in\n ['out_invoice', 'out_refund'] else 'C')\n document_type = self.get_type_document(txt_line)\n document_number = self.get_document_number(\n txt_line, 'inv_number')\n control_number = self.get_number(\n txt_line.invoice_id.nro_ctrl, 'inv_ctrl', 20)\n document_affected = self.get_document_affected(txt_line)\n voucher_number = self.get_number(\n txt_line.voucher_id.number, 'vou_number', 14)\n amount_exempt, amount_untaxed = \\\n self.get_amount_exempt_document(txt_line)\n amount_untaxed = amount_untaxed\n alicuota = self.get_alicuota(txt_line)\n amount_total, amount_exempt = self.get_amount_line(\n txt_line, amount_exempt)\n\n txt_string = (\n txt_string + buyer + '\\t' + period2.strip() + '\\t' +\n txt_line.invoice_id.date_invoice + '\\t' + operation_type +\n '\\t' + document_type + '\\t' + vendor + '\\t' +\n document_number + '\\t' + control_number + '\\t' +\n str(round(amount_total, 2)) + '\\t' +\n str(round(txt_line.untaxed, 2)) + '\\t' +\n str(round(txt_line.amount_withheld, 2)) + '\\t' +\n document_affected + '\\t' + voucher_number + '\\t' +\n str(round(amount_exempt, 2)) + '\\t' + str(alicuota) +\n '\\t' + '0' + '\\n')\n return txt_string", "def extract_data(file_ner,file_pos,separator=\" \"):\n\n # read NER and POS from the two files\n words_tags=read_conll_file(file_ner)\n words_pos=read_conll_file(file_pos)\n \n ## some checks, e.g., that both files have same length, same tokens\n assert(len(words_tags)==len(words_pos))\n \n for (words,tags),(_,pos) in zip(words_tags,words_pos):\n for word,pos,tag in zip(words,pos,tags):\n # first letter is capitalized\n cap=\"+\" if word[0].isupper() else \"-\"\n hyphen = '+' if '-' in word else '-'\n l = str(len(word))\n #vowels = \"\".join(sorted([w for w in word.lower() if w in ['a','e','i','o','u','y']]))\n #################################\n ###### YOUR FEATURES HERE ####### \n #################################\n # 0=separator\n \n ## todo: output the cap feature and more \n ## make sure the format you output here is what the nerfeats.py script expects as fields!\n print separator.join([word.lower(),pos,cap, l, hyphen, tag])\n # sentence separator\n print \"\"", "def post_process_text(self, text):\n\t\treturn text", "def process_input(fname,onlynugget,onlyarg):\n content=utils.readFileEncode(fname,'utf8')\n lines = content.split('\\n')[:-1]\n sentences=[]\n labels=[]\n sent=[]\n label=[]\n for i in range(len(lines)):\n if len(lines[i])>3:\n words=lines[i].split('\\t')\n word={'originalText':words[0],'offset':int(words[1])}\n sent.append(word)\n if onlynugget:\n if words[2] in NuggetList10:\n label.append(words[2]) \n else:\n label.append('O')\n elif onlyarg:\n if words[2] in ArgumentList:\n\n if 'Software' in words[2]:\n label.append(words[2][0:2]+'System')\n else:\n label.append(words[2])\n else:\n label.append('O')\n else:\n if len(sent)>0 and len(label)>0: \n sentences.append(sent)\n labels.append(label) \n sent=[]\n label=[]\n elif len(sent)==0 and i < len(lines)-1:\n sentences.append([])\n labels.append([])\n \n return sentences,labels", "def label_paragraphs(root_el, fastcase_data):\n # case metadata\n citations = [alphanum_lower(\" \".join((c[\"Volume\"], c[\"Reporter\"], c[\"Page\"]) + ((c[\"Suffix\"],) if \"Suffix\" in c else ()))) for c in fastcase_data['Citations']]\n name_clean = alphanum_lower(fastcase_data['PartyHeader']) if fastcase_data['PartyHeader'] else None\n court_clean = alphanum_lower(fastcase_data['CourtName'] or fastcase_data['CourtAbbreviation'])\n docket_numbers_clean = [alphanum_lower(d) for d in fastcase_data['DocketNumbers']]\n\n # via https://github.com/harvard-lil/CaselawAccessProjectSchemas/blob/master/casebodyxml/v1/casebodyxml.xsd\n states = {k:i for i, k in enumerate([None, \"citation\", \"parties\", \"docketnumber\", \"court\", \"otherdate\", \"decisiondate\", \"history\", \"syllabus\", \"attorneys\", \"judges\", \"disposition\", \"_opinionstart\", \"_preauthor\", \"author\", \"opinion\"])}\n reverse_states = {v:k for k, v in states.items()}\n\n state = 0\n header_els = []\n opinions = [[]]\n header_complete = False\n extra_els = []\n blank_els = []\n authors = []\n opinion_starts = []\n paragraph_id = 1\n\n def shift_to_opinion(i):\n \"\"\"Move i elements from the end of header to the start of opinion.\"\"\"\n if not i:\n return\n nonlocal header_els\n opinions[0][0:0] = header_els[-i:]\n header_els = header_els[:-i]\n\n def add_el(el, state, target_list=header_els):\n nonlocal blank_els, paragraph_id\n if state:\n if not reverse_states[state].startswith('_'):\n el.attrib['class'] = reverse_states[state]\n if state == states['_opinionstart']:\n opinion_starts.append((len(target_list), el))\n elif state == states['author']:\n authors.append((len(target_list), el))\n blank_els = []\n else:\n blank_els.append(el)\n el.attrib['id'] = f'p-{paragraph_id}'\n paragraph_id += 1\n target_list.append(el)\n\n def append_to_previous(line):\n PyQuery(header_els[-1]).append(PyQuery(line))\n\n for el_pq in PyQuery(root_el)('root').children().items():\n\n if extra_els:\n extra_els.append(el_pq)\n el_pq = extra_els.pop(0)\n\n el = el_pq[0]\n\n # mark the end of the labeled front matter (which may or may not align with actual end)\n if el.tag == 'header-end':\n header_complete = True\n if state == states[\"author\"]:\n state = states[\"opinion\"]\n continue\n\n # skip\n if el.text == \"COPYRIGHT MATERIAL OMITTED\":\n continue\n\n # add linebreak after element for indentation\n if not (el.tail and el.tail.startswith('\\n')):\n el.tail = '\\n' + (el.tail or '')\n\n line = inner_html(el)\n line_text = strip_tags(line)\n line_text_lower = line_text.lower()\n line_alphanum_chars = alphanum_lower(line_text)\n\n # if we've had 5 regular paragraphs in a row, assume we missed the start of the opinion\n if state < states[\"opinion\"] and len(blank_els) >= 5:\n shift_to_opinion(len(blank_els))\n state = states[\"opinion\"]\n\n # we have now reached the opinion and no longer have to process header lines\n if state >= states[\"opinion\"]:\n # check short lines for the start of a concurrence or dissent\n m = new_opinion_re.match(line_text)\n if m:\n el.attrib['class'] = 'author'\n el.attrib['opinion-type'] = opinion_type_lookup[m[1].lower()]\n opinions.append([])\n\n add_el(el, 0, opinions[-1])\n continue\n\n # citation\n if state <= states[\"citation\"]:\n if any(c in line_alphanum_chars for c in citations) or all(citation_like_re.match(s) for s in line.split('<br>')):\n state = states[\"citation\"]\n continue # don't include citation lines in output\n\n # parties\n if state < states[\"parties\"]:\n # special case -- if the case doesn't have a name, like NE2d/939/939ne2d586.xml,\n # assume that whatever comes after the last citation is the name\n if name_clean is None or line_alphanum_chars == name_clean:\n state = states[\"parties\"]\n add_el(el, state)\n elif header_els and name_clean == alphanum_lower(inner_html(header_els[-1]) + line):\n # handle edge case where name is split across two paragraphs\n append_to_previous(line)\n elif line_alphanum_chars.startswith(name_clean) or similar_strings(line_text, fastcase_data['PartyHeader']):\n # special cases -- NW2d/881/881 N.W.2d 813-4_Replace.xml, NW2d/792/792NW2d203.xml\n state = states[\"parties\"]\n add_el(el, state)\n else:\n # if we haven't found a valid name yet, paragraphs are just regular paragraphs\n add_el(el, 0)\n continue\n\n # docket numbers or court\n if state < states[\"court\"]:\n # detect 'Supreme Judicial Court of Massachusetts.' and 'United States Bankruptcy Appellate Panel of the Ninth Circuit.' as a court, but not\n # 'Court of Appeals Case No. 04A03-1707-IF-1724' or 'Consol. Court No. 16-00054'\n # line may be 'Court of Appeals of Virginia, Chesapeake.' if court is 'Court of Appeals of Virginia'\n # line may be 'North Carolina Court of Appeals.' if court is 'Court of Appeals of North Carolina'\n # if 'court' in line.lower() or 'panel' in line.lower()) and ('No.' not in line or 'Division No.' in line):\n if any(line_alphanum_chars.startswith(s) for s in docket_numbers_clean):\n state = states[\"docketnumber\"]\n elif line_alphanum_chars.startswith(court_clean) or (\n (line_text.endswith('Court of Appeals.') or any(line_text_lower.startswith(s) for s in ('court of appeal', 'supreme court')))\n ):\n state = states[\"court\"]\n else:\n state = states[\"docketnumber\"]\n add_el(el, state)\n continue\n\n # accidental start of opinion included in head matter\n # NW2d/737/737NW2d768_3New.xml -- \"On order of the Court ...\"\n if state >= states[\"decisiondate\"]:\n if line_text.startswith(\"On order of the Court\"):\n state = states[\"opinion\"]\n add_el(el, 0, opinions[-1])\n continue\n\n # dates\n # 'DATED at Olympia, Washington, this 31st day of October, 2018.'\n # '01-04-2017'\n if state <= states[\"decisiondate\"]:\n # long line isn't decision date -- SCt/134/134sct985_2.xml\n if len(line_text) < 80 and (date_re.search(line_text) or line_text_lower.startswith('dated at') or re.match(r'\\d{1,2}-\\d{2}-\\d{4}$', line_text)):\n if any(line_text.startswith(s) for s in ('Released', 'Submitted', 'Dissenting')) and 'Decided' not in line_text:\n # handle case like\n # 'Submitted June 5, 2007, at Lansing.'\n # 'Decided June 12, 2007, at 9:05 a.m.'\n # 'Released for Publication October 11, 2007\n # 'Dissenting Opinion of Chief Justice Maynard June 27, 2008.'\n # avoid\n # 'Submitted March 2, 2010.<br>Decided April 2, 2010.'\n state = states[\"otherdate\"]\n else:\n state = states[\"decisiondate\"]\n add_el(el, state)\n continue\n\n if state < states[\"judges\"]:\n # strip off judges lines appended to current line, and add as an extra_el\n # \"for Respondent.<strong>Justice BEATTY.</strong></p>\" SE2d/708/708se2d750.xml\n # \"... West Virginia Insurance Federation.<strong>DAVIS, Justice:</strong></p>\" SE2d/719/719se2d830.xml\n # \"for appellees.<strong>Present: HUMPHREYS, McCLANAHAN and BEALES, JJ.</strong><strong>BEALES, Judge.</strong>\" SE2d/708/708se2d429.xml\n while True:\n m = re.search('(.+)(<strong>([^<]+)</strong>)$', line)\n if m and is_judges_or_author(m[3]):\n extra_els.insert(0, PyQuery('<p>'+m[2]+'</p>'))\n line = m[1]\n el_pq.html(line)\n line_text = strip_tags(line)\n line_alphanum_chars = alphanum_lower(line_text)\n continue\n break\n\n # history\n # 'Appeal by defendant from judgment entered 8 December 2004 by Judge Robert H. Hobgood in Alamance County Superior Court. Heard in the Court of Appeals 2 November 2005.'\n if line_text_lower.startswith('appeal') or any(s in line_text for s in ('Superior Court', 'District Court', 'Circuit Court')):\n state = states[\"history\"]\n add_el(el, state)\n continue\n\n # syllabus\n if 'Syllabus by the Court' in line_text or (state == states[\"syllabus\"] and re.match(r'\\d+\\.|[a-z\\[]', line_text)):\n if re.match(r'[a-z\\[]', line_text):\n # handle case where syllabus is split midsentence\n append_to_previous(line)\n else:\n state = states[\"syllabus\"]\n add_el(el, state)\n continue\n\n # attorneys\n # 'Garrett D. Blanchfield, Jr., Reinhardt Wendorf & Blanchfield, St. Paul, MN, for Appellants.'\n if any(line_text.startswith(s) for s in (\"An amicus\", \"For the\", \"On behalf of\")) or any(s in line_text for s in (' for ', 'amici curiae', 'pro se')):\n state = states[\"attorneys\"]\n add_el(el, state)\n continue\n\n # titles that mark the start of an opinion, like \"OPINION\"\n if line_alphanum_chars in opinion_start_lines or any(line_alphanum_chars.startswith(s) for s in opinion_start_line_prefixes):\n state = states[\"_opinionstart\"]\n if line_text != \"OPINION\":\n add_el(el, state)\n continue\n\n # Handle paragraph that is definitely followed by author, like \"The opinion of the court was delivered by\", A3d/148/148 A.3d 441_Replace.xml\n if line_text == \"The opinion of the court was delivered by\":\n state = states[\"_preauthor\"]\n add_el(el, 0)\n continue\n if state == states[\"_preauthor\"]:\n add_el(el, states[\"author\"])\n state = states[\"opinion\"]\n continue\n\n # author\n # note, in theory fastcase_data[\"Author\"] could be useful for identifying author paragraph, but it's often not set,\n # and when it is it can also appear in the judges line and other places ...\n judges_or_author = is_judges_or_author(line_text)\n if judges_or_author == \"judges\":\n state = states[\"judges\"]\n add_el(el, state)\n continue\n elif judges_or_author == \"author\":\n add_el(el, states[\"author\"])\n state = states[\"opinion\"] if header_complete else states[\"author\"]\n continue\n\n # weird special case where there's an order provided before the start of the opinion\n # E.g. NW2d/740/740NW2d659_1.xml, 'ORDER ENTERED JUNE 8, 2007' and subsequent unlabeled lines\n if line_text.startswith(\"ORDER ENTERED\") or state == states[\"disposition\"]:\n state = states[\"disposition\"]\n add_el(el, state)\n continue\n\n # regular paragraph\n add_el(el, 0)\n continue\n\n # fixups\n labels = [el.attrib.get('class') for el in header_els]\n # rewrite special case like NE2d/944/944ne2d1119.xml:\n # [['parties', '...'],\n # ['docketnumber', 'Feb. 15'],\n # ['docketnumber', '2011.'],\n # ['court', 'Court of Appeals of New York.']]\n # to\n # [['parties', '...'],\n # ['court', 'Court of Appeals of New York.'],\n # ['decisiondate', 'Feb. 15, 2011.']]\n if labels == [None, 'docketnumber', 'docketnumber', 'court']:\n docket_combined = header_els[1].text + \", \" + header_els[2].text\n if date_re.match(docket_combined):\n header_els[1].attrib['class'] = 'decisiondate'\n header_els[1].text = docket_combined\n header_els = [header_els[0], header_els[3], header_els[1]]\n\n # change all author labels but the last to judges; we likely misdetected one earlier\n for i, el in authors[:-1]:\n el.attrib['class'] = \"judges\"\n\n # if we didn't find an author and the last line is unlabeled, assume that's the author with a typo --\n # e.g. NW2d/753/753NW2d552_1.xml , missing comma\n if header_els and not authors and not opinion_starts and state >= states[\"judges\"] and header_els[-1].attrib.get('class') is None:\n header_els[-1].attrib['class'] = \"author\"\n authors = [(len(header_els)-1, header_els[-1])]\n\n # move author, and any paragraphs after it, to beginning of first opinion\n move_index = opinion_starts[0][0] + 1 if opinion_starts else authors[-1][0] if authors else None\n if move_index is not None:\n shift_to_opinion(len(header_els)-move_index)\n\n return header_els, opinions", "def softm_to_invoice(rechnungsnr):\n from pprint import pprint\n\n if str(rechnungsnr).startswith('RG'):\n rechnungsnr = str(rechnungsnr)[2:]\n rg, orderlines = get_rechnung('RG833645')\n hint = {}\n for attr in 'skontobetrag'.split():\n hint[attr] = rg[attr]\n out = {'hint': hint}\n for attr in '''kundenauftragsnr auftragsnr versandkosten rechnung_steuranteil rechnungsnr\n zu_zahlen'''.split():\n out[attr] = rg[attr]\n\n out['leistungsdatum'] = rg['versand_date']\n out['kundennr'] = rg['kundennr_rechnungsempfaenger']\n out['erfasst_von'] = rg['sachbearbeiternr']\n out['abschlag_prozent'] = rg['auftragsrabatt1p'] + rg['auftragsrabatt2p']\n out['auftragsrabatt'] = rg['auftragsrabatt']\n out['rechungsdatum'] = rg['druck_date']\n rabatttext = ' und '.join([x for x in [rg['rabatttext1'].strip(), rg['rabatttext2'].strip()] if x])\n rabatttext = \"\"\n if rabatttext:\n rabatttext = \"%s: %f\" % (rabatttext, out['abschlag_prozent'])\n elif out['abschlag_prozent']:\n rabatttext = u\"Ab/Zuschläge: %f\" % (out['abschlag_prozent'])\n\n out['infotext_kunde'] = '\\n'.join([rabatttext])\n\n out['orderlines'] = []\n for ol in get_connection().query(['AFU00'], condition=\"FURGNR=%s\" % sql_escape(rechnungsnr)):\n pprint(ol)\n outol = {}\n for attr in '''menge artnr abschlag rechungsbetrag warenwert'''.split(): # zu_zahlen\n outol[attr] = ol[attr]\n out['orderlines'].append(outol)\n\n #line = dict(\n # guid=p.guid,\n # menge=int(p.menge),\n # artnr=p.artnr,\n # #kundenartnr=f3.artnr_kunde,\n # #name=f3.artikelbezeichnung.strip(),\n # infotext_kunde=p.text\n # #einzelpreis=int(abs(f3.verkaufspreis)*100),\n # #warenwert=int(p.wert_netto*100),\n # #zu_zahlen=int(abs(f3.wert_brutto)*100),\n # #abschlag=int(f4.positionsrabatt_gesamt*100)\n # )\n\n #if f3.ean and int(f3.ean):\n # line['ean'] = f3.ean", "def text_preprocessing_pdf(self,p):\n #remover_end_paragraphs=np.vectorize(self.remove_end_paragraphs,otypes=[str])\n cleaner=np.vectorize(self.remove_non_alpha,otypes=[str])\n cut_text=np.vectorize(self.cut_text,otypes=[str])\n cut_text_raw=np.vectorize(self.cut_text_raw,otypes=[str])\n assert len(self.parser)==len(self.parser_raw), \"Length of the treated sentence treated list does not match length of raw text list: {} / {}\".format(len(self.parser),len(self.parser_raw))\n cut_text_raw(p)\n p=cleaner(p)\n cut_text(p)\n return p", "def debian_multiline_description(description):\n return \"\\n \".join(line for line in description.split(\"\\n\") if line.strip() != \"\")", "def _DocSim(self,df,a):\r\n #Obtain the descriptions of the two input courses.\r\n textA = df['description'][a]\r\n #Obtain the document embedding vector for each description.\r\n vectorA = self.DocVecModel.infer_vector([textA], alpha=0.1, min_alpha=0.0001, steps=300)\r\n return vectorA", "def _prepare_invoice_lines(self, exchange_line, order_line):\n invoice_type = {\n 'sale.order.line': {\n 'higher': 'out_invoice', 'lower': 'out_refund',\n 'type': 'sale', 'field': 'exchange_sale_line_id'\n },\n 'purchase.order.line': {\n 'higher': 'in_invoice', 'lower': 'in_refund',\n 'type': 'purchase', 'field': 'exchange_purchase_line_id'\n },\n }\n product = exchange_line.exchange_product_id or exchange_line.product_id\n data = {\n 'invoice_type': False,\n 'values': {\n 'product_id': product.id,\n 'quantity': exchange_line.quantity,\n 'name': 'Exchange for [%s]' % exchange_line.product_id.display_name,\n }\n }\n if exchange_line.exchange_product_id or \\\n exchange_line.price_subtotal > order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['higher']\n elif exchange_line.price_subtotal < order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['lower']\n else:\n return {}\n data[invoice_type[order_line._name]['type']] = order_line.order_id\n data['values'][invoice_type[order_line._name]['field']] = order_line.id\n data['values']['price_unit'] = exchange_line.price_unit\n # TODO i think we should take the different between prices NOT the all price\n # abs(exchange_line.price_unit - order_line.price_unit)\n return data", "def process_text(self):\n prp1 = preprocessor.Preprocess()\n processed_text = prp1.clean_data(self.text)\n self.vec1 = self.vec.transform(pd.Series(processed_text))" ]
[ "0.5984977", "0.5817351", "0.5476201", "0.5464979", "0.54381603", "0.5437341", "0.539142", "0.5348691", "0.53211063", "0.5294485", "0.5236899", "0.51855206", "0.51376784", "0.51221997", "0.50945246", "0.50891274", "0.5085302", "0.50443596", "0.5012782", "0.5011681", "0.49394357", "0.49224195", "0.49133125", "0.4907674", "0.4897296", "0.48888484", "0.48883978", "0.4881489", "0.48669836", "0.48628157" ]
0.7215489
0
Standardize quantitatives features. Standardizer is stored as object attribute. It will be copied into P5_SegmentClassifier object.
def feature_scale(self): #------------------------------------------------------------------------- # List of quantitative features to be standardized #------------------------------------------------------------------------- list_quant_feature = ['Quantity','UnitPrice'] self._list_quant_feature = list_quant_feature.copy() #------------------------------------------------------------------------- # Standardization is applied over quantitative features in list. #------------------------------------------------------------------------- X_std = self.std_scale.transform(self.df_invoice_line[self.list_quant_feature]) df_quant_std = pd.DataFrame(X_std, index=self.df_invoice_line.index) #------------------------------------------------------------------------- # Columns from standardized dataframe are renamed #------------------------------------------------------------------------- df_quant_std.rename(columns={0:'STD_Quantity',1:'STD_UnitPrice'}\ ,inplace=True) #------------------------------------------------------------------------- # Standardized values dataframe is aggregated to df_invoice_line #------------------------------------------------------------------------- list_col_drop = ['Quantity','UnitPrice'] list_col_keep = \ [col for col in self.df_invoice_line.columns if col not in list_col_drop ] self.df_invoice_line = self.df_invoice_line[list_col_keep] self.df_invoice_line \ = pd.concat([self.df_invoice_line,df_quant_std], axis=1) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def standardiser(self):\n # Select only numeric features first\n\n #self.X = self.data.loc[:, self.data.columns != self.target].values\n numeric_columns = []\n for col in self.X.columns:\n if self.X[col].dtype!='object':\n numeric_columns.append(col)\n scaler = preprocessing.StandardScaler().fit(self.X[numeric_columns]) \n # Now we can standardise\n self.X[numeric_columns] = scaler.transform(self.X[numeric_columns])", "def test_scale_features_standardize(self):\n data = array([[0.564, 20.661], [-18.512, 41.168], [-0.009, 20.440]])\n cdata = CData(data)\n\n # correct answer computed in Mathematica\n # TODO: can we compute the right answer in Python?\n answer = array([[0.60355, -0.568043], [-1.1543, 1.15465], [0.550748, -0.586608]])\n\n # perform standardization feature scaling and check answer\n cdata.scale_features('standardize')\n self.assertTrue(allclose(cdata.data, answer))", "def _standardize(self):\n deviation = np.std(self.series)\n self.series = (self.series - np.mean(self.series)) / (deviation if deviation != 0 else 1)", "def standardize(X):\n\n scaler = StandardScaler()\n X_scaled = scaler.fit_transform(X)\n return X_scaled", "def standardize(self, x):\n if self.preprocessing_function:\n x = self.preprocessing_function(x)\n if self.rescale:\n x *= self.rescale\n if self.samplewise_center:\n x -= np.mean(x, keepdims=True)\n if self.samplewise_std_normalization:\n x /= (np.std(x, keepdims=True) + K.epsilon())\n\n if self.featurewise_center:\n if self.mean is not None:\n x -= self.mean\n else:\n warnings.warn('This AudioDataGenerator specifies '\n '`featurewise_center`, but it hasn\\'t '\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n if self.featurewise_std_normalization:\n if self.std is not None:\n x /= (self.std + K.epsilon())\n else:\n warnings.warn('This AudioDataGenerator specifies '\n '`featurewise_std_normalization`, but it hasn\\'t '\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n if self.zca_whitening:\n if self.principal_components is not None:\n flatx = np.reshape(x, (-1, np.prod(x.shape[-2:])))\n whitex = np.dot(flatx, self.principal_components)\n x = np.reshape(whitex, x.shape)\n else:\n warnings.warn('This AudioDataGenerator specifies '\n '`zca_whitening`, but it hasn\\'t '\n 'been fit on any training data. Fit it '\n 'first by calling `.fit(numpy_data)`.')\n return x", "def pre_processing(self, whole_dataset, type=None):\n # for svm\n X = whole_dataset\n if self._scaler == None:\n self._scaler = preprocessing.StandardScaler().fit(X)\n else:\n basic.outputlogMessage('warning, StandardScaler object already exist, this operation will overwrite it')\n self._scaler = preprocessing.StandardScaler().fit(X)\n # save\n joblib.dump(self._scaler, scaler_saved_path)", "def standardize(sets_x):\n\n # initiate empty list for return variable\n standardized_x = []\n\n # iterate through subsets\n for x in sets_x:\n # call preprocess function, normalize and generate features for each subset\n # and store the result into list\n standardized_x.append(generate_features(x, 2, True, with_log=True, with_sqrt=True, cross_terms=True))\n\n return standardized_x", "def standardize(X_train_input, X_test_input):\r\n from sklearn.preprocessing import StandardScaler\r\n sc = StandardScaler()\r\n sc.fit(X_train_input)\r\n\r\n X_train_std = sc.transform(X_train_input)\r\n X_test_std = sc.transform(X_test_input)\r\n \r\n return X_train_std, X_test_std", "def standardize(self, x):\n if not self.image_resample:\n x = to_shape(x, self.image_shape, constant_values=-1024)\n elif self.image_resample:\n x = resample(x, self.image_shape)\n\n if self.preprocessing_function:\n x = self.preprocessing_function(x)\n if self.voxelwise_normalization:\n if self.voxel_bounds is not None:\n x = voxelwise_normalize(x, self.voxel_bounds)\n if self.voxelwise_center:\n if self.voxel_mean is not None:\n x -= self.voxel_mean\n if self.voxelwise_std_normalization:\n x /= (self.voxelwise_std + 1e-7)\n if self.samplewise_center:\n x -= np.mean(x, axis=self.channel_axis, keepdims=True)\n if self.samplewise_std_normalization:\n x /= (np.std(x, axis=self.channel_axis, keepdims=True) + 1e-7)\n return x", "def standardize_data(f, train_mask):\n # standardize data\n f = f.todense()\n mu = f[train_mask == True, :].mean(axis=0)\n sigma = f[train_mask == True, :].std(axis=0)\n f = f[:, np.squeeze(np.array(sigma > 0))]\n mu = f[train_mask == True, :].mean(axis=0)\n sigma = f[train_mask == True, :].std(axis=0)\n f = (f - mu) / sigma\n return f", "def calculateStandardisation(vector):\r\n global standardisation\r\n # from http://sebastianraschka.com/Articles/2014_about_feature_scaling.htm\r\n std_scale = preprocessing.StandardScaler().fit(vector)\r\n standardisation = std_scale.transform(vector)", "def standardize(tX):\n features = tX.T\n features_len = len(features)\n means = np.reshape(np.mean(features, axis=1), [features_len, 1])\n stds = np.reshape(np.std(features, axis=1), [features_len, 1])\n features_std = (features - means) / stds\n new_tX = features_std.T\n return new_tX", "def standardize(self, snpreader):\n\n for dtype in [np.float64, np.float32]:\n\n snps = snpreader.read(order=\"F\", force_python_only=True, dtype=dtype).val\n self.assertEqual(dtype, snps.dtype)\n\n snp_s1 = Unit().standardize(snps.copy(), force_python_only=True)\n snp_s2 = Unit().standardize(\n snps.copy(), block_size=100, force_python_only=True\n )\n snps_F = np.array(snps, dtype=dtype, order=\"F\")\n snp_s3 = Unit().standardize(snps_F)\n snps_C = np.array(snps, dtype=dtype, order=\"C\")\n snp_s4 = Unit().standardize(snps_C)\n\n self.assertEqual(snp_s1.shape[0], snp_s2.shape[0])\n self.assertEqual(snp_s1.shape[1], snp_s2.shape[1])\n\n self.assertEqual(snp_s1.shape[0], snp_s3.shape[0])\n self.assertEqual(snp_s1.shape[1], snp_s3.shape[1])\n\n self.assertEqual(snp_s1.shape[0], snp_s4.shape[0])\n self.assertEqual(snp_s1.shape[1], snp_s4.shape[1])\n\n self.assertTrue(np.allclose(snp_s1, snp_s2, rtol=1e-05, atol=1e-05))\n self.assertTrue(np.allclose(snp_s1, snp_s3, rtol=1e-05, atol=1e-05))\n self.assertTrue(np.allclose(snp_s1, snp_s4, rtol=1e-05, atol=1e-05))\n\n snp_beta1 = Beta(1, 25).standardize(snps.copy(), force_python_only=True)\n snps_F = np.array(snps, dtype=dtype, order=\"F\")\n snp_beta2 = Beta(1, 25).standardize(snps_F)\n snps_C = np.array(snps, dtype=dtype, order=\"C\")\n snp_beta3 = Beta(1, 25).standardize(snps_C)\n\n self.assertEqual(snp_beta1.shape[0], snp_beta2.shape[0])\n self.assertEqual(snp_beta1.shape[1], snp_beta2.shape[1])\n self.assertEqual(snp_beta1.shape[0], snp_beta3.shape[0])\n self.assertEqual(snp_beta1.shape[1], snp_beta3.shape[1])\n\n self.assertTrue(np.allclose(snp_beta1, snp_beta2, rtol=1e-05, atol=1e-05))\n self.assertTrue(np.allclose(snp_beta1, snp_beta3, rtol=1e-05, atol=1e-05))", "def do_preprocess_on_segment_raw(seg_raw_df):\n sigma = 2\n median_kernel_size = 5\n print \"=======================start preprocessing segment raw dataframe=================\"\n print \"parameters: \" + \"gaussian filter sigma: %.2f, median kernel size: %.2f\" % (sigma, median_kernel_size)\n pp_df = seg_raw_df.copy(deep=True)\n df_mean = pp_df[s_info.raw_value_names].mean()\n df_std = pp_df[s_info.raw_value_names].std()\n pp_df[s_info.raw_value_names] = pp_df.groupby(s_info.segment_col)[s_info.raw_value_names].transform(sp_signal.medfilt, median_kernel_size)\n pp_df[s_info.raw_value_names] = (pp_df[s_info.raw_value_names] - df_mean)/df_std\n pp_df[s_info.raw_value_names] = pp_df.groupby(s_info.segment_col)[s_info.raw_value_names].transform(gaussian_filter1d, sigma=sigma, axis=0, order=0, mode='reflect')\n return pp_df", "def dataset_handling_with_standardisation(init_data):\n #\n ##Maximum number of points = 72 , keep around 80 values for even number\n max_len = 80\n ##Fluxes, Standardisation is done over 1 type of feature\n data = init_data.loc[:, [u'fluxes_0', u'fluxes_1', u'fluxes_2', u'fluxes_3', u'fluxes_4', u'fluxes_5']].values\n zp_array_flux = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_flux.append(n_data)\n zp_array_flux = np.array(zp_array_flux)\n print(zp_array_flux.shape)\n\n ##Fluxerrors, Standardisation is done over 1 type of feature\n data = init_data.loc[:,\n [u'fluxerrs_0', u'fluxerrs_1', u'fluxerrs_2', u'fluxerrs_3', u'fluxerrs_4', u'fluxerrs_5']].values\n zp_array_flux_error = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_flux_error.append(n_data)\n zp_array_flux_error = np.array(zp_array_flux_error)\n print(zp_array_flux_error.shape)\n\n ##Time, Standardisation is done over 1 type of feature\n data = init_data.loc[:, [u'mjds_0', u'mjds_1', u'mjds_2', u'mjds_3', u'mjds_4', u'mjds_5']].values\n zp_array_mjds = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_mjds.append(n_data)\n zp_array_mjds = np.array(zp_array_mjds)\n print(zp_array_mjds.shape)\n\n ##Concatenating everything\n zp_data = np.c_[zp_array_flux, zp_array_flux_error, zp_array_mjds]\n\n ##Adding redshift info// Gal pos info might be necessary to remove\n zp_data = np.c_[\n zp_data, init_data.loc[:, [u'gal_b', u'gal_l', u'hostgal_photoz', u'hostgal_photoz_err', u'hostgal_specz', u'mwebv']].values]\n print(zp_data.shape)\n\n ##Load labels and convert to integer\n labels = init_data.loc[:, [u'target']].values\n labels = labels.flatten()\n labels_name = np.array([6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95, 99])\n [np.place(labels, labels == labels_name[i], [i]) for i in range(len(labels_name))]\n\n return [zp_data, labels]", "def standardize(X):\n mu = X.mean(axis=0, keepdims=True)\n s = X.std(axis=0, keepdims=True)\n return (X-mu)/s", "def quantize(self, df):\n if len(self.dict_scalers) == 0:\n raise Exception(\"[ERROR] quantize method called prior to\"\n \"normalization transform method \")\n\n quant_df = pd.DataFrame()\n if 'OneForAll' in self.dict_scalers:\n # quantization is applied on all features\n min_fp = float(np.amin(df))\n max_fp = float(np.amax(df))\n scale = (max_fp - min_fp) / (127 - (-127))\n zero_point = 127 - (max_fp / scale)\n quant_df = df / scale + zero_point\n else:\n # quantization is applied independently for each feature/column\n lbl_list = df.columns.values\n for lbl in lbl_list:\n min_fp = float(np.amin(df[lbl]))\n max_fp = float(np.amax(df[lbl]))\n scale = (max_fp - min_fp) / (127 - (-127))\n zero_point = 127 - (max_fp / scale)\n quant_df[lbl] = df[lbl] / scale + zero_point\n return quant_df.astype(np.int8)", "def standard_scale(X_train, X_test):\n preprossor = StandardScaler().fit(X_train)\n X_train = preprossor.transform(X_train)\n X_test = preprossor.transform(X_test)\n return X_train, X_test", "def standardise(self):\n if self.vector.shape is ():\n return\n if self.dimensionality() != 1:\n # TODO: implement\n raise NotImplementedError\n max_value = 1.0 * max(self.vector)\n if max_value == 0.0:\n # Nothing to do\n return\n self.vector = self.vector.astype('float64') / max_value", "def _normalize_feature(self, feature):\n\n for ic in range(self.data_shape[0]):\n feature[ic] = (feature[ic] - self.feature_mean[ic]\n ) / self.feature_std[ic]\n return feature", "def _standardize(self, x):\r\n\t\tkurts = kurtosis(x) # calculate Fisher kurtosis\r\n\t\tk_x = np.abs(kurts)**(1./4) # the quantity for standardization (k_x in [1])\r\n\t\tx_hat = x / k_x # the standardized data\r\n\t\treturn x_hat", "def standardize(self, inputData):\n\n return (inputData - self.mean) / self.std", "def scalarNormalizer(df):\r\n arr=dict()\r\n for col in CONT_FEATURES_COL_TO_USE:\r\n mean, std =df[col].mean(), df[col].std()\r\n df[col]=df[col].apply(lambda x: (x-mean)/std)\r\n arr[col] = [mean, std]\r\n json.dump(arr, open('normalize.json', 'w'))\r\n return df", "def normalize_standard_deviation(dataset):\n return dataset*(1/np.std(dataset))", "def normalize_features(self, scaler: StandardScaler = None, replace_nan_token: int = 0) -> StandardScaler:\n if len(self.data) == 0 or self.data[0].features is None:\n return None\n\n if scaler is not None:\n self.scaler = scaler\n\n elif self.scaler is None:\n features = np.vstack([d.features for d in self.data])\n self.scaler = StandardScaler(replace_nan_token=replace_nan_token)\n self.scaler.fit(features)\n\n for d in self.data:\n d.set_features(self.scaler.transform(d.features.reshape(1, -1))[0])\n\n return self.scaler", "def get_normalizer(data):\n scaler = StandardScaler().fit(data)\n return scaler", "def standardize_data(X_train, X_test):\n scaler = StandardScaler()\n scaler.fit(X_train)\n X_train = scaler.transform(X_train)\n # apply same transformation to test data\n X_test = scaler.transform(X_test)\n return X_train, X_test", "def persist_standardizer(self, std_object):\n object_path = 'model_objects/'\n file_name = f'market_maker_standardizer_{self.target_coin}.pkl'\n self.s3_client.put_object(Bucket=self.s3_bucket,\n Key=object_path + file_name,\n Body=pickle.dumps(std_object, pickle.HIGHEST_PROTOCOL)\n )\n return", "def standardize(train_data_jets, test_data_jets):\n \n nbr_jets = train_data_jets.shape[0]\n \n for jet in range(nbr_jets):\n # extract features for standardization\n train_data_features = train_data_jets[jet][:,2:] \n test_data_features = test_data_jets[jet][:,2:] \n # store train mean and std without considering nan values\n train_mean = np.nanmean(train_data_features, axis=0)\n train_std = np.nanstd(train_data_features, axis=0)\n # standardize train and test data with train mean and std\n train_data_features = (train_data_features - train_mean) / train_std\n test_data_features = (test_data_features - train_mean) / train_std\n # insert standardized features into original dataset with predictions\n train_data_jets[jet][:,2:] = train_data_features\n test_data_jets[jet][:,2:] = test_data_features\n \n return train_data_jets, test_data_jets", "def standardizeRatios( self, ratios ):\n\t\tratios_standardized = ratios.copy()\n\t\tzscore = lambda x: ( x - x.mean() ) / x.std()\n\t\tfor row in ratios.iterrows():\n\t\t\tratios_standardized.loc[ row[0] ] = zscore( row[1] )\n\t\treturn ratios_standardized" ]
[ "0.66549325", "0.5813407", "0.57823", "0.57652164", "0.57482", "0.5628171", "0.56227326", "0.54852945", "0.54692763", "0.5413241", "0.5376666", "0.53717995", "0.5229775", "0.51795113", "0.5171377", "0.51348805", "0.51237977", "0.5088582", "0.5076588", "0.50762415", "0.50561696", "0.50408816", "0.5024084", "0.50040805", "0.4950862", "0.4945896", "0.49209926", "0.49128872", "0.49062297", "0.49034885" ]
0.64846426
1
Returns market segment ID related to a customer thanks to customer invoices lines given as parameter. Features transformations are applied on data included into invoice lines. Once done, a machine learning algorithm is invocated in order to predict customer market segment.
def get_customer_marketSegment(self, df_invoice_line_customer): #------------------------------------------------------------------------- # Building data model #------------------------------------------------------------------------- self.data_transform(df_invoice_line_customer) #------------------------------------------------------------------------- # Customer features are built thanks to transformers. #------------------------------------------------------------------------- self.df_customers_features_build() #------------------------------------------------------------------------- # Customer market segment is predicted #------------------------------------------------------------------------- X_test = self._df_customers.values y_pred = self._classifier_model.predict(X_test) segmentID = y_pred[0] return segmentID
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict_segment(self, df_invoice_line=None):\n if df_invoice_line is not None:\n self.data_transform(df_invoice_line) \n self.df_customers_features_build() \n else:\n pass\n X_test = self._df_customers.values\n y_pred = self._classifier_model.predict(X_test)\n return y_pred[0]", "def order_process(self, customerID, list_stockCode, list_quantity\\\n , orderDate=None):\n\n segmentID = -1\n\n #-------------------------------------------------------------------------\n # A new customer is created and inserted into data-set.\n #-------------------------------------------------------------------------\n if customerID is None:\n customerID = int(self.createCustomerID())\n else:\n pass\n \n #-------------------------------------------------------------------------\n # A new dataframe with new invoice lines are created.\n #-------------------------------------------------------------------------\n df_invoice_line = self.create_customer_df_invoice_line(customerID\\\n , list_stockCode, list_quantity, orderDate)\n \n #-------------------------------------------------------------------------\n # Original dataframe is updated with customer invoices lines.\n #-------------------------------------------------------------------------\n print(\"order_process : shape before concat= \"+str(self._df_invoice_original.shape))\n self._df_invoice_original \\\n = pd.concat([self._df_invoice_original, df_invoice_line], axis=0)\n print(\"order_process : shape after concat= \"+str(self._df_invoice_original.shape))\n \n #-------------------------------------------------------------------------\n # All invoices lines (including new one) related to customer is retrieved \n # from original dataframe.\n #-------------------------------------------------------------------------\n df_invoice_line_customer \\\n = self.get_customer_history_df_invoice_line(customerID)\n\n #-------------------------------------------------------------------------\n # When calling get_customer_marketSegment(), df_invoice_line_customer is\n # concatened to the original dataframe.\n #------------------------------------------------------------------------- \n segmentID = self.get_customer_marketSegment(df_invoice_line_customer)\n \n return segmentID, customerID", "def invoice(customer_id):\n encoder = request.url_rule.endpoint\n template = \"{{ encoder }}#{{ customer_id|%s }}\" % encoder\n return render_template_string(template, **locals())", "def create_customer_df_invoice_line(self, customerID, list_stockCode\\\n , list_quantity, invoiceDate):\n \n dict_invoice = dict()\n\n dict_invoice['Quantity'] = list_quantity\n dict_invoice['StockCode'] = list_stockCode\n\n #------------------------------------------------------------------------\n # Build invoiceDate from local current time\n #------------------------------------------------------------------------\n if invoiceDate is None:\n time_struct = time.localtime()\n invoiceDate = str(time_struct.tm_year)+'-'+str(time_struct.tm_mon)\\\n +'-'+str(time_struct.tm_mday)\n invoiceDate +=' '\n invoiceDate +=str(time_struct.tm_hour)+':'+str(time_struct.tm_min)\\\n +':'+str(time_struct.tm_sec)\n invoiceDate = pd.Timestamp(invoiceDate)\n else:\n pass\n\n\n #------------------------------------------------------------------------\n # Lists initialization\n #------------------------------------------------------------------------\n list_customerID = list()\n list_invoiceNo = list()\n list_invoiceDate = list()\n list_invoice_line_index = list()\n \n #------------------------------------------------------------------------\n # Increase Invoice number\n #------------------------------------------------------------------------\n invoiceNo = max(self._df_invoice_original.InvoiceNo)\n invoiceNo += 1\n\n #------------------------------------------------------------------------\n # Get latest invoice line index value\n #------------------------------------------------------------------------\n invoice_line_index = max(self._df_invoice_original.index)\n\n #------------------------------------------------------------------------\n # Build lists for CustomerID, InvoiceNo, InvoiceDate\n # A list of incremented indexes is built for new rows.\n #------------------------------------------------------------------------\n for quantity in list_quantity:\n list_customerID.append(customerID)\n list_invoiceNo.append(invoiceNo)\n list_invoiceDate.append(invoiceDate)\n invoice_line_index += 1\n list_invoice_line_index.append(invoice_line_index) \n\n \n dict_invoice['CustomerID'] = list_customerID\n dict_invoice['InvoiceNo'] = list_invoiceNo\n dict_invoice['InvoiceDate'] = list_invoiceDate\n\n #------------------------------------------------------------------------\n # Get description list from list of stock codes.\n #------------------------------------------------------------------------\n list_description = self.getDescriptionList(list_stockCode)\n \n dict_invoice['Description'] = list_description\n\n #------------------------------------------------------------------------\n # Get unit price list from list of stock codes.\n #------------------------------------------------------------------------\n list_unitPrice = self.getUnitPriceList(list_stockCode)\n \n dict_invoice['UnitPrice'] = list_unitPrice\n\n #------------------------------------------------------------------------\n # Dataframe with new invoices lines is created.\n #------------------------------------------------------------------------\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys()\\\n , index=list_invoice_line_index)\n \n return df_invoice_line", "def get_customer_history_df_invoice_line(self, customerID):\n df_invoice_line \\\n = self._df_invoice_original[self._df_invoice_original.CustomerID \\\n == customerID]\n return df_invoice_line", "def _create_invoice(self):\n self.ensure_one()\n partner = self.member_id.partner_id\n invoice = self.env['account.invoice'].create({\n 'partner_id': partner.id,\n 'account_id': partner.property_account_receivable_id.id,\n 'fiscal_position_id': partner.property_account_position_id.id\n })\n for line in self.line_ids:\n product = line.activity_id.product_id\n # Handling of invoice lines : needs cache record for onchange, then\n # real writing...\n invoice_line = self.env['account.invoice.line'].new({\n 'product_id': product.id,\n 'invoice_id': invoice.id\n })\n invoice_line._onchange_product_id()\n line_values = dict(invoice_line._cache)\n line_values['price_unit'] = line.price\n invoice_line = self.env['account.invoice.line'].create(line_values)\n invoice.compute_taxes()\n line.registration_id.invoice_line_id = invoice_line.id\n return invoice", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_ids = self.pool.get('account.journal').search(cr, uid,\n [('type', '=', 'sale'), ('company_id', '=', order.company_id.id)],\n limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no sales journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_id.property_account_receivable.id,\n 'journal_id': order.partner_id.property_default_sale_invoice_journal.id,\n 'partner_id': order.partner_id.id,\n 'address_invoice_id': order.partner_invoice_id.id,\n #'address_contact_id': order.partner_order_id.id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n\n return invoice_vals", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_id = self.pool['account.invoice'].default_get(cr, uid, ['journal_id'], context=context)['journal_id']\n if not journal_id:\n raise osv.except_osv(_('Error!'),\n _('Please define sales journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_invoice_id.property_account_receivable.id,\n 'partner_id': order.partner_invoice_id.id,\n 'journal_id': journal_id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_invoice_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False,\n 'section_id' : order.section_id.id,\n 'test_1' :order.test\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n return invoice_vals", "def data_process_deprecated(self, CustomerID, InvoiceDate, InvoiceNo, Description, Quantity\\\n , UnitPrice ):\n dict_invoice = {'InvoiceDate':InvoiceDate, 'Description':Description\\\n , 'Quantity':Quantity, 'UnitPrice':UnitPrice}\n dict_invoice['CustomerID'] = CustomerID\n dict_invoice['InvoiceNo'] = InvoiceNo\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys(), index=[0])\n \n self.data_transform(df_invoice_line)\n\n #self.feature_rfm_encode()\n\n self.feature_scale()\n\n self.list_feature_drop()\n\n self.feature_description_nlp()\n return", "def line_get_convert(self, line, part):\n ret = super(AccountInvoice, self).line_get_convert(line, part)\n\n if 'invl_id' in line:\n line_o = self.env['account.invoice.line'].browse(line['invl_id'])\n if line_o.segment_id and line_o.segment_id.id:\n ret.update({'segment_id': line_o.segment_id.id, 'segment_origin_id': line_o.segment_id.id})\n\n return ret", "def _prepare_invoice_lines(self, exchange_line, order_line):\n invoice_type = {\n 'sale.order.line': {\n 'higher': 'out_invoice', 'lower': 'out_refund',\n 'type': 'sale', 'field': 'exchange_sale_line_id'\n },\n 'purchase.order.line': {\n 'higher': 'in_invoice', 'lower': 'in_refund',\n 'type': 'purchase', 'field': 'exchange_purchase_line_id'\n },\n }\n product = exchange_line.exchange_product_id or exchange_line.product_id\n data = {\n 'invoice_type': False,\n 'values': {\n 'product_id': product.id,\n 'quantity': exchange_line.quantity,\n 'name': 'Exchange for [%s]' % exchange_line.product_id.display_name,\n }\n }\n if exchange_line.exchange_product_id or \\\n exchange_line.price_subtotal > order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['higher']\n elif exchange_line.price_subtotal < order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['lower']\n else:\n return {}\n data[invoice_type[order_line._name]['type']] = order_line.order_id\n data['values'][invoice_type[order_line._name]['field']] = order_line.id\n data['values']['price_unit'] = exchange_line.price_unit\n # TODO i think we should take the different between prices NOT the all price\n # abs(exchange_line.price_unit - order_line.price_unit)\n return data", "def inv_line_new_characteristic_hashcode(self, invoice_line):\n return \"%s-%s-%s\"%(\n invoice_line['account_id'],\n invoice_line.get('analytic_account_id',\"False\"),\n invoice_line.get('date_maturity',\"False\"))", "def _get_account_analytic_invoice(self, cursor, user, picking, move_line):\n if move_line.purchase_line_id:\n return move_line.purchase_line_id.order_id.account_analytic_id.id\n return super(stock_picking, self)._get_account_analytic_invoice(cursor, user, picking, move_line)", "def get_customer_id_by_sale_id(sale_id):\n\n # your code", "def single_customer(customer_name, invoice_file):\n def customer_rental(rental_items):\n \"\"\"Closure to add single customer details\"\"\"\n with open(rental_items, 'r', newline='') as rentals:\n reader = csv.reader(rentals)\n add_invoice_items = partial(add_furniture, invoice_file, customer_name)\n for row in reader:\n add_invoice_items(item_code=row[0],\n item_description=row[1],\n item_monthly_price=row[2])\n return customer_rental", "def get_invoiced_lot_values(self):\n self.ensure_one()\n\n if self.state == 'draft':\n return []\n\n sale_orders = self.mapped('invoice_line_ids.sale_line_ids.order_id')\n stock_move_lines = sale_orders.mapped('picking_ids.move_lines.move_line_ids')\n\n # Get the other customer invoices and refunds.\n ordered_invoice_ids = sale_orders.mapped('invoice_ids') \\\n .filtered(lambda i: i.state not in ['draft', 'cancel']) \\\n .sorted(lambda i: (i.invoice_date, i.id))\n\n # Get the position of self in other customer invoices and refunds.\n self_index = None\n i = 0\n for invoice in ordered_invoice_ids:\n if invoice.id == self.id:\n self_index = i\n break\n i += 1\n\n # Get the previous invoice if any.\n previous_invoices = ordered_invoice_ids[:self_index]\n last_invoice = previous_invoices[-1] if len(previous_invoices) else None\n\n # Get the incoming and outgoing sml between self.invoice_date and the previous invoice (if any).\n write_dates = [wd for wd in self.invoice_line_ids.mapped('write_date') if wd]\n self_datetime = max(write_dates) if write_dates else None\n last_write_dates = last_invoice and [wd for wd in last_invoice.invoice_line_ids.mapped('write_date') if wd]\n last_invoice_datetime = max(last_write_dates) if last_write_dates else None\n\n def _filter_incoming_sml(ml):\n if ml.state == 'done' and ml.location_id.usage == 'customer' and ml.lot_id:\n if last_invoice_datetime:\n return last_invoice_datetime <= ml.date <= self_datetime\n else:\n return ml.date <= self_datetime\n return False\n\n def _filter_outgoing_sml(ml):\n if ml.state == 'done' and ml.location_dest_id.usage == 'customer' and ml.lot_id:\n if last_invoice_datetime:\n return last_invoice_datetime <= ml.date <= self_datetime\n else:\n return ml.date <= self_datetime\n return False\n\n incoming_sml = stock_move_lines.filtered(_filter_incoming_sml)\n outgoing_sml = stock_move_lines.filtered(_filter_outgoing_sml)\n\n # Prepare and return lot_values\n qties_per_lot = defaultdict(lambda: 0)\n if self.type == 'out_refund':\n for ml in outgoing_sml:\n qties_per_lot[ml.lot_id] -= ml.product_uom_id._compute_quantity(ml.qty_done, ml.product_id.uom_id)\n for ml in incoming_sml:\n qties_per_lot[ml.lot_id] += ml.product_uom_id._compute_quantity(ml.qty_done, ml.product_id.uom_id)\n else:\n for ml in outgoing_sml:\n qties_per_lot[ml.lot_id] += ml.product_uom_id._compute_quantity(ml.qty_done, ml.product_id.uom_id)\n for ml in incoming_sml:\n qties_per_lot[ml.lot_id] -= ml.product_uom_id._compute_quantity(ml.qty_done, ml.product_id.uom_id)\n lot_values = []\n for lot_id, qty in qties_per_lot.items():\n if float_is_zero(qty, precision_rounding=lot_id.product_id.uom_id.rounding):\n continue\n lot_values.append({\n 'product_name': lot_id.product_id.display_name,\n 'product_color': lot_id.x_studio_color.x_name,\n 'quantity': qty,\n 'uom_name': lot_id.product_uom_id.name,\n 'lot_name': lot_id.name\n })\n #AQUI ORDENO TODOS LOS LOTES QUE ME QUEDAN EN lot_values POR EL COLOR\n lot_values.sort(key=lambda r: r['product_color'], reverse=False)\n return lot_values", "def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):\n res = super(sale_order_line, self)._prepare_order_line_invoice_line(cr, uid, line, account_id=account_id, context=context)\n \n res.update({'part_number': line.part_number, 'internal_part_number' : line.internal_part_number})\n return res", "def generate_customer_info(self):\n consecutive = check_consecutive(self.customerIds)\n print(\"consecutive\" + str(consecutive))\n if consecutive:\n for customer_id in self.customerIds:\n # next we need to 1) decide if the customer has insurance, and if yes, generate the EOB that gets sent to the customer\n insured = np.random.choice(\n self.distributions[\"insurance_status\"],\n 1,\n p=self.distributions[\"insurance_distribution\"],\n )[0]\n dob = self.dobs[customer_id - 1]\n customer = pd.DataFrame(\n {\n \"customer_id\": [customer_id],\n \"dob\": str(dob),\n \"insurance\": insured,\n \"experiment_id\": random.randint(1, 2),\n }\n )\n self.Customers = self.Customers.append(customer)\n else:\n print(\"Error generating customer info: customerIds aren't consecutive\")\n return True", "def get_customer_segments(self, date):\n date = current_date_to_day().isoformat() if date is None else date\n self.products = pd.merge(self.products,\n self.cs.fetch(start_date=convert_dt_to_day_str(date))[['client', 'segments']],\n on='client', how='left')", "def compute_counterpart_lines(self):\n for item in self:\n move_debit_lines = []\n move_credit_lines = []\n\n # list of all the move lines of the payment's move\n line_list = []\n for entry in item.journal_entry_ids:\n for line in entry.line_ids:\n if line.account_id.treasury_planning:\n line_list.append(line)\n\n # for each line above collect all the reconciled counterpart lines\n for line in line_list:\n if line.credit > 0 and line.debit == 0:\n for match in line.matched_debit_ids:\n move_debit_lines.append(match.debit_move_id.id)\n\n if line.credit == 0 and line.debit > 0:\n for match in line.matched_credit_ids:\n move_credit_lines.append(match.credit_move_id.id)\n\n if move_credit_lines:\n counterpart_move_ids = move_credit_lines\n else:\n counterpart_move_ids = move_debit_lines\n\n # bank move share is transformed to dictionary\n bank_move_dict = (ast.literal_eval(item.cf_share) if\n item.cf_share else {})\n\n # the share of each counterpart line is \"merged or added\"\n # in a weighted manner to the bank line share\n for cpt in counterpart_move_ids:\n dest_move_line = self.env['account.move.line'].browse(cpt)\n weight = round(dest_move_line.balance / item.amount, 2)\n # counterpart share is transformed into dictionary\n move_line_dict = ast.literal_eval(dest_move_line.cf_share)\n\n # each key is finally added to the bank line share\n for key, value in move_line_dict.iteritems():\n draft_dictionary = dictop.sum_dictionary(\n bank_move_dict.get(key, {}), 1,\n move_line_dict.get(key, {}), weight)\n bank_move_dict[key] = dictop.check_dict_total(\n draft_dictionary, 1)\n\n # the dictionary is transformed into string and assigned\n item.cf_share = json.dumps(bank_move_dict)", "def single_customer(customer_name, invoice_file):\n def customer_rental(rental_items):\n \"\"\"\n Loop through rental_items file and append each row to curried invoice_file with same\n customer_name\n \"\"\"\n customer = partial(add_furniture, invoice_file=invoice_file, customer_name=customer_name)\n with open(rental_items, \"r\") as rental_csv:\n for row in csv.reader(rental_csv):\n customer(item_code=row[0], item_description=row[1], item_monthly_price=row[2])\n return customer_rental", "def _prepare_invoice_line(self, inv_id):\n res = {}\n account_id = self.product_id.property_account_income_id.id or self.product_id.categ_id.property_account_income_categ_id.id\n if not account_id:\n raise UserError(_('Please define income account for this product: \"%s\" (id:%d).') % \\\n (self.product_id.name, self.product_id.id,))\n price_unit = self.product_id.lst_price\n res = {\n 'invoice_id': inv_id.id,\n 'name': self.name,\n 'origin': self.order_id.name,\n 'account_id': account_id,\n 'uom_id': self.product_uom_id.id,\n 'quantity': self.product_uom_qty,\n 'price_unit': price_unit,\n 'product_id': self.product_id.id,\n 'invoice_line_tax_id': False,\n 'order_line_id': self.id\n }\n return res", "def get_num_of_sales_per_customer_ids():\n\n # your code", "def get_customer_info(self, customer_id):\n #requested_customer_RDD = self.sc.parallelize(movie_ids).map(lambda x: (user_id, x))\n # Get predicted ratings\n customers = self.__get_customers(customer_id).collect()\n\n return customers", "def invoices(self,org_id=None,invoice_id=''):\n if org_id is None:\n org_id = self.org_id\n return self.get('{}/orgs/{}/invoices/{}'.format(ApiVersion.A1.value,org_id,invoice_id))", "def onchange_invoice_id(self):\n # self.invoice_id = False\n # self.base_amount = 0.0\n # self.wh_src_rate = 5.0\n if self._context is None:\n context = {}\n res = {}\n inv_obj = self.env['account.invoice']\n if not self.invoice_id:\n return {'value': {\n 'invoice_id': False,\n 'base_amount': 0.0,\n 'wh_src_rate': 0.0,\n 'wh_amount': 0.0, }\n }\n\n inv_brw = inv_obj.browse(self.invoice_id.id)\n base_amount = self.base_amount or inv_brw.amount_untaxed\n wh_src_rate = self.wh_src_rate or inv_brw.wh_src_rate or 5.0\n wh_amount = base_amount * wh_src_rate / 100.0\n res = {'value': {\n 'base_amount': base_amount,\n 'wh_src_rate': wh_src_rate,\n 'wh_amount': wh_amount,\n }\n }\n return res", "def create_invoice(self):\n for line in self:\n # if not line.account_id:\n # raise UserError(_('Please Add the incoming Account !!'))\n self.ensure_one()\n journal_id = self.env['account.journal'].search([\n ('type', '=', 'sale')], limit=1)\n inv_line_main = {\n 'name': line.description.name,\n 'price_unit': line.amount or 0.00,\n 'quantity': 1,\n 'discount': line.discount,\n 'account_id': line.description.property_account_income_id.id or line.description.categ_id.property_account_income_categ_id.id or False,\n }\n inv_values = {\n 'partner_id': line.patient_id.partner_id.id,\n 'patient_id': line.patient_id.id,\n 'dentist': line.dentist.id,\n 'move_type': 'out_invoice',\n 'invoice_date': datetime.now().strftime(DF) or False,\n 'journal_id': journal_id and journal_id.id or False,\n 'teeth_id': line.patient_id and line.patient_id.id or False,\n }\n acc_id = self.env['account.move'].create(inv_values)\n acc_id.write({'invoice_line_ids': [(0, 0, inv_line_main)]})\n\n self.write({'invc_id': acc_id.id, 'inv': True})\n context = dict(self._context or {})\n wiz_form_id = self.env['ir.model.data'].get_object_reference(\n 'account', 'view_move_form')[1]\n\n return {\n 'view_type': 'form',\n 'view_id': wiz_form_id,\n 'view_mode': 'form',\n 'res_model': 'account.move',\n 'res_id': self.invc_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': context,\n }", "def customer_id(uncapped_flatprice, uncapped_flatprice_finalizer, team_multisig) -> int:\n customer_id = int(uuid.uuid4().hex, 16) # Customer ids are 128-bit UUID v4\n return customer_id", "def _get_customer_ID(self, file):\n tree = ET.parse(file)\n root = tree.getroot()\n \n try:\n customer_ID = root.find('JournalReport').find('SaleEvent').find('TransactionDetailGroup').find('TransactionLine').find('CustomerID').find('PersonalID').text\n except:\n time.sleep(1)\n customer_ID = self._get_customer_ID(file)\n\n return customer_ID", "def line_segment_intersection(line1,\n line2):\n a = float(line1[0][0]*line1[1][1] - line1[0][1]*line1[1][0])\n b = float(line1[0][1] - line1[1][1])\n c = float(line1[1][0] - line1[0][0])\n\n d = float(line2[0][0]*line2[1][1] - line2[0][1]*line2[1][0])\n e = float(line2[0][1] - line2[1][1])\n f = float(line2[1][0] - line2[0][0])\n\n prod = b*f - c*e\n if abs(prod) < 1e-10:\n return (np.inf, np.inf)\n\n xc = (d*c - a*f) / prod\n yc = (a*e - b*d) / prod\n\n sign_x1 = (xc - line1[0][0])*(xc - line1[1][0])\n sign_y1 = (yc - line1[0][1])*(yc - line1[1][1])\n\n if sign_x1 > 1e-10:\n return (np.inf, np.inf)\n if sign_x1 < 1e-10:\n if sign_y1 > 1e-10:\n return (np.inf, np.inf)\n\n sign_x2 = (xc - line2[0][0])*(xc - line2[1][0])\n sign_y2 = (yc - line2[0][1])*(yc - line2[1][1])\n\n if sign_x2 > 1e-10:\n return (np.inf, np.inf)\n if sign_x2 == 1e-10:\n if sign_y2 > 1e-10:\n return (np.inf, np.inf)\n return (int(xc), int(yc))" ]
[ "0.7056929", "0.62926954", "0.61490464", "0.6014562", "0.59545195", "0.5511189", "0.54317766", "0.5338872", "0.53378683", "0.53256035", "0.5227046", "0.5198791", "0.5182063", "0.51410466", "0.5138717", "0.51151025", "0.51103526", "0.51066583", "0.5078583", "0.5048637", "0.50242376", "0.5010777", "0.49733216", "0.49539632", "0.4936336", "0.49357", "0.49330315", "0.49115035", "0.49027282", "0.49017695" ]
0.81526893
0
This function creates an invoice compounding invoices lines from data given as parameters. Once done, this function computes market segment customer belongs to. If customerID is None, then a new customer identifier is created before order process to take place.
def order_process(self, customerID, list_stockCode, list_quantity\ , orderDate=None): segmentID = -1 #------------------------------------------------------------------------- # A new customer is created and inserted into data-set. #------------------------------------------------------------------------- if customerID is None: customerID = int(self.createCustomerID()) else: pass #------------------------------------------------------------------------- # A new dataframe with new invoice lines are created. #------------------------------------------------------------------------- df_invoice_line = self.create_customer_df_invoice_line(customerID\ , list_stockCode, list_quantity, orderDate) #------------------------------------------------------------------------- # Original dataframe is updated with customer invoices lines. #------------------------------------------------------------------------- print("order_process : shape before concat= "+str(self._df_invoice_original.shape)) self._df_invoice_original \ = pd.concat([self._df_invoice_original, df_invoice_line], axis=0) print("order_process : shape after concat= "+str(self._df_invoice_original.shape)) #------------------------------------------------------------------------- # All invoices lines (including new one) related to customer is retrieved # from original dataframe. #------------------------------------------------------------------------- df_invoice_line_customer \ = self.get_customer_history_df_invoice_line(customerID) #------------------------------------------------------------------------- # When calling get_customer_marketSegment(), df_invoice_line_customer is # concatened to the original dataframe. #------------------------------------------------------------------------- segmentID = self.get_customer_marketSegment(df_invoice_line_customer) return segmentID, customerID
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_customer_df_invoice_line(self, customerID, list_stockCode\\\n , list_quantity, invoiceDate):\n \n dict_invoice = dict()\n\n dict_invoice['Quantity'] = list_quantity\n dict_invoice['StockCode'] = list_stockCode\n\n #------------------------------------------------------------------------\n # Build invoiceDate from local current time\n #------------------------------------------------------------------------\n if invoiceDate is None:\n time_struct = time.localtime()\n invoiceDate = str(time_struct.tm_year)+'-'+str(time_struct.tm_mon)\\\n +'-'+str(time_struct.tm_mday)\n invoiceDate +=' '\n invoiceDate +=str(time_struct.tm_hour)+':'+str(time_struct.tm_min)\\\n +':'+str(time_struct.tm_sec)\n invoiceDate = pd.Timestamp(invoiceDate)\n else:\n pass\n\n\n #------------------------------------------------------------------------\n # Lists initialization\n #------------------------------------------------------------------------\n list_customerID = list()\n list_invoiceNo = list()\n list_invoiceDate = list()\n list_invoice_line_index = list()\n \n #------------------------------------------------------------------------\n # Increase Invoice number\n #------------------------------------------------------------------------\n invoiceNo = max(self._df_invoice_original.InvoiceNo)\n invoiceNo += 1\n\n #------------------------------------------------------------------------\n # Get latest invoice line index value\n #------------------------------------------------------------------------\n invoice_line_index = max(self._df_invoice_original.index)\n\n #------------------------------------------------------------------------\n # Build lists for CustomerID, InvoiceNo, InvoiceDate\n # A list of incremented indexes is built for new rows.\n #------------------------------------------------------------------------\n for quantity in list_quantity:\n list_customerID.append(customerID)\n list_invoiceNo.append(invoiceNo)\n list_invoiceDate.append(invoiceDate)\n invoice_line_index += 1\n list_invoice_line_index.append(invoice_line_index) \n\n \n dict_invoice['CustomerID'] = list_customerID\n dict_invoice['InvoiceNo'] = list_invoiceNo\n dict_invoice['InvoiceDate'] = list_invoiceDate\n\n #------------------------------------------------------------------------\n # Get description list from list of stock codes.\n #------------------------------------------------------------------------\n list_description = self.getDescriptionList(list_stockCode)\n \n dict_invoice['Description'] = list_description\n\n #------------------------------------------------------------------------\n # Get unit price list from list of stock codes.\n #------------------------------------------------------------------------\n list_unitPrice = self.getUnitPriceList(list_stockCode)\n \n dict_invoice['UnitPrice'] = list_unitPrice\n\n #------------------------------------------------------------------------\n # Dataframe with new invoices lines is created.\n #------------------------------------------------------------------------\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys()\\\n , index=list_invoice_line_index)\n \n return df_invoice_line", "def _create_invoice(self):\n self.ensure_one()\n partner = self.member_id.partner_id\n invoice = self.env['account.invoice'].create({\n 'partner_id': partner.id,\n 'account_id': partner.property_account_receivable_id.id,\n 'fiscal_position_id': partner.property_account_position_id.id\n })\n for line in self.line_ids:\n product = line.activity_id.product_id\n # Handling of invoice lines : needs cache record for onchange, then\n # real writing...\n invoice_line = self.env['account.invoice.line'].new({\n 'product_id': product.id,\n 'invoice_id': invoice.id\n })\n invoice_line._onchange_product_id()\n line_values = dict(invoice_line._cache)\n line_values['price_unit'] = line.price\n invoice_line = self.env['account.invoice.line'].create(line_values)\n invoice.compute_taxes()\n line.registration_id.invoice_line_id = invoice_line.id\n return invoice", "def get_customer_marketSegment(self, df_invoice_line_customer):\n #-------------------------------------------------------------------------\n # Building data model \n #-------------------------------------------------------------------------\n self.data_transform(df_invoice_line_customer)\n\n #-------------------------------------------------------------------------\n # Customer features are built thanks to transformers.\n #-------------------------------------------------------------------------\n self.df_customers_features_build()\n \n #-------------------------------------------------------------------------\n # Customer market segment is predicted\n #-------------------------------------------------------------------------\n X_test = self._df_customers.values\n y_pred = self._classifier_model.predict(X_test)\n segmentID = y_pred[0]\n \n return segmentID", "def action_invoice_create(self, grouped=False, final=False):\n if self.invoice_option == 'before_delivery':\n inv_obj = self.env['account.invoice']\n for order in self:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.create(inv_data)\n for inv_line in order.order_line:\n inv_line.invoice_line_create(invoice.id, inv_line.product_uom_qty)\n\n else:\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n invoices_origin = {}\n invoices_name = {}\n\n # Keep track of the sequences of the lines\n # To keep lines under their section\n inv_line_sequence = 0\n for order in self:\n group_key = order.id if grouped else (order.partner_invoice_id.id, order.currency_id.id)\n\n # We only want to create sections that have at least one invoiceable line\n pending_section = None\n\n # Create lines in batch to avoid performance problems\n line_vals_list = []\n # sequence is the natural order of order_lines\n for line in order.order_line:\n if line.display_type == 'line_section':\n pending_section = line\n continue\n if float_is_zero(line.qty_to_invoice, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoices_origin[group_key] = [invoice.origin]\n invoices_name[group_key] = [invoice.name]\n elif group_key in invoices:\n if order.name not in invoices_origin[group_key]:\n invoices_origin[group_key].append(order.name)\n if order.client_order_ref and order.client_order_ref not in invoices_name[group_key]:\n invoices_name[group_key].append(order.client_order_ref)\n\n if line.qty_to_invoice > 0 or (line.qty_to_invoice < 0 and final):\n if pending_section:\n section_invoice = pending_section.invoice_line_create_vals(\n invoices[group_key].id,\n pending_section.qty_to_invoice\n )\n inv_line_sequence += 1\n section_invoice[0]['sequence'] = inv_line_sequence\n line_vals_list.extend(section_invoice)\n pending_section = None\n\n inv_line_sequence += 1\n inv_line = line.invoice_line_create_vals(\n invoices[group_key].id, line.qty_to_invoice\n )\n inv_line[0]['sequence'] = inv_line_sequence\n line_vals_list.extend(inv_line)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n\n self.env['account.invoice.line'].create(line_vals_list)\n\n for group_key in invoices:\n invoices[group_key].write({'name': ', '.join(invoices_name[group_key]),\n 'origin': ', '.join(invoices_origin[group_key])})\n sale_orders = references[invoices[group_key]]\n if len(sale_orders) == 1:\n invoices[group_key].reference = sale_orders.reference\n\n if not invoices:\n raise UserError(_(\n 'There is no invoiceable line. If a product has a Delivered quantities invoicing policy, please make sure that a quantity has been delivered.'))\n\n for invoice in invoices.values():\n invoice.compute_taxes()\n if not invoice.invoice_line_ids:\n raise UserError(_(\n 'There is no invoiceable line. If a product has a Delivered quantities invoicing policy, please make sure that a quantity has been delivered.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_total < 0:\n invoice.type = 'out_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Use additional field helper function (for account extensions)\n for line in invoice.invoice_line_ids:\n line._set_additional_fields(invoice)\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n # Idem for partner\n so_payment_term_id = invoice.payment_term_id.id\n fp_invoice = invoice.fiscal_position_id\n invoice._onchange_partner_id()\n invoice.fiscal_position_id = fp_invoice\n # To keep the payment terms set on the SO\n invoice.payment_term_id = so_payment_term_id\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]", "def action_create_invoices(self, data):\n invoice_obj = self.env['account.invoice']\n values = {}\n for val in data:\n values.setdefault(val['invoice_type'], {\n 'order': val.get('sale', val.get('purchase')),\n 'values': []\n })\n values[val['invoice_type']]['values'].append((0, 0, val['values']))\n\n for inv_type, inv_data in values.items():\n invoice = invoice_obj.new(self._prepare_invoice(inv_type))\n invoice._onchange_partner_id()\n inv = invoice._convert_to_write({\n name: invoice[name] for name in invoice._cache\n })\n for _, _, line in inv_data['values']:\n line['account_id'] = inv['account_id']\n inv['invoice_line_ids'] = inv_data['values']\n new_invoice = invoice_obj.sudo().create(inv)\n new_invoice.action_invoice_open()\n inv_data['order'].write({\n 'exchange_invoice_ids': [(4, new_invoice.id)]\n })", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_id = self.pool['account.invoice'].default_get(cr, uid, ['journal_id'], context=context)['journal_id']\n if not journal_id:\n raise osv.except_osv(_('Error!'),\n _('Please define sales journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_invoice_id.property_account_receivable.id,\n 'partner_id': order.partner_invoice_id.id,\n 'journal_id': journal_id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_invoice_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False,\n 'section_id' : order.section_id.id,\n 'test_1' :order.test\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n return invoice_vals", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_ids = self.pool.get('account.journal').search(cr, uid,\n [('type', '=', 'sale'), ('company_id', '=', order.company_id.id)],\n limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no sales journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_id.property_account_receivable.id,\n 'journal_id': order.partner_id.property_default_sale_invoice_journal.id,\n 'partner_id': order.partner_id.id,\n 'address_invoice_id': order.partner_invoice_id.id,\n #'address_contact_id': order.partner_order_id.id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n\n return invoice_vals", "def action_invoice_create(self, grouped=False, final=False):\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].sudo().precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n for order in self:\n group_key = order.id if grouped else (order.partner_invoice_id.id, order.currency_id.id)\n for line in order.order_line.sorted(key=lambda l: l.qty_to_invoice < 0):\n if float_is_zero(line.qty_to_invoice, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.sudo().create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoice['sale_order_id'] = order.id\n elif group_key in invoices:\n vals = {}\n if order.name not in invoices[group_key].origin.split(', '):\n vals['origin'] = invoices[group_key].origin + ', ' + order.name\n if order.client_order_ref and order.client_order_ref not in invoices[group_key].name.split(\n ', ') and order.client_order_ref != invoices[group_key].name:\n vals['name'] = invoices[group_key].name + ', ' + order.client_order_ref\n invoices[group_key].sudo().write(vals)\n if line.qty_to_invoice > 0:\n line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)\n elif line.qty_to_invoice < 0 and final:\n line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n if not invoices:\n raise UserError(_('There is no invoiceable line.'))\n for invoice in invoices.values():\n if not invoice.invoice_line_ids:\n raise UserError(_('There is no invoiceable line.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_untaxed < 0:\n invoice.type = 'out_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Use additional field helper function (for account extensions)\n for line in invoice.invoice_line_ids:\n line._set_additional_fields(invoice)\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]", "def action_invoice_create(self, grouped=False, final=False):\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n invoices_origin = {}\n invoices_name = {}\n for order in self:\n group_key = order.id if grouped else (order.partner_id.id, order.currency_id.id)\n for line in order.order_line.sorted(key=lambda l: l.qty_received - l.qty_invoiced < 0):\n if float_is_zero(line.qty_received - line.qty_invoiced, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoices_origin[group_key] = [invoice.origin]\n invoices_name[group_key] = [invoice.name]\n elif group_key in invoices:\n if order.name not in invoices_origin[group_key]:\n invoices_origin[group_key].append(order.name)\n if order.partner_ref and order.partner_ref not in invoices_name[group_key]:\n invoices_name[group_key].append(order.partner_ref)\n\n if line.qty_received - line.qty_invoiced > 0:\n line.invoice_line_create(invoices[group_key].id, line.qty_received - line.qty_invoiced)\n elif line.qty_received - line.qty_invoiced < 0 and final:\n line.invoice_line_create(invoices[group_key].id, line.qty_received - line.qty_invoiced)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n\n for group_key in invoices:\n invoices[group_key].write({'name': ', '.join(invoices_name[group_key]),\n 'origin': ', '.join(invoices_origin[group_key])})\n\n if not invoices:\n raise UserError(_('There is no invoicable line.'))\n\n for invoice in invoices.values():\n if not invoice.invoice_line_ids:\n raise UserError(_('There is no invoicable line.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_total < 0:\n invoice.type = 'in_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]", "def invoice(customer_id):\n encoder = request.url_rule.endpoint\n template = \"{{ encoder }}#{{ customer_id|%s }}\" % encoder\n return render_template_string(template, **locals())", "def create_invoice(self):\n for line in self:\n # if not line.account_id:\n # raise UserError(_('Please Add the incoming Account !!'))\n self.ensure_one()\n journal_id = self.env['account.journal'].search([\n ('type', '=', 'sale')], limit=1)\n inv_line_main = {\n 'name': line.description.name,\n 'price_unit': line.amount or 0.00,\n 'quantity': 1,\n 'discount': line.discount,\n 'account_id': line.description.property_account_income_id.id or line.description.categ_id.property_account_income_categ_id.id or False,\n }\n inv_values = {\n 'partner_id': line.patient_id.partner_id.id,\n 'patient_id': line.patient_id.id,\n 'dentist': line.dentist.id,\n 'move_type': 'out_invoice',\n 'invoice_date': datetime.now().strftime(DF) or False,\n 'journal_id': journal_id and journal_id.id or False,\n 'teeth_id': line.patient_id and line.patient_id.id or False,\n }\n acc_id = self.env['account.move'].create(inv_values)\n acc_id.write({'invoice_line_ids': [(0, 0, inv_line_main)]})\n\n self.write({'invc_id': acc_id.id, 'inv': True})\n context = dict(self._context or {})\n wiz_form_id = self.env['ir.model.data'].get_object_reference(\n 'account', 'view_move_form')[1]\n\n return {\n 'view_type': 'form',\n 'view_id': wiz_form_id,\n 'view_mode': 'form',\n 'res_model': 'account.move',\n 'res_id': self.invc_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': context,\n }", "def create_invoices(self, cr, uid, ids, context=None):\n invoice_list = []\n po_obj = self.pool.get('purchase.order')\n inv_line_obj = self.pool.get('account.invoice.line')\n inv_obj = self.pool.get('account.invoice')\n addr_obj = self.pool.get('res.partner')\n journal_obj = self.pool.get('account.journal')\n if context is None:\n context = {}\n\n for purchase_adv_obj in self.browse(cr, uid, ids, context=context):\n for purchase_order in po_obj.browse(cr, uid, context.get('active_ids', []), context=context):\n inv_line_ids = []\n invoice_ids = []\n val = inv_line_obj.product_id_change(cr, uid, [], purchase_adv_obj.product_id.id,\n uom_id=False, partner_id=purchase_order.partner_id.id, fposition_id=purchase_order.fiscal_position.id)\n line_id = inv_line_obj.create(cr, uid, {\n 'name': val['value']['name'],\n 'account_id': val['value']['account_id'],\n 'price_unit': purchase_adv_obj.amount,\n 'quantity': purchase_adv_obj.qtty,\n 'discount': False,\n 'uos_id': val['value']['uos_id'],\n 'product_id': purchase_adv_obj.product_id.id,\n 'invoice_line_tax_id': [(6, 0, val['value']['invoice_line_tax_id'])],\n })\n inv_line_ids.append(line_id)\n addr = addr_obj.address_get(cr, uid, [purchase_order.partner_id.id], ['invoice'])\n journal_ids = journal_obj.search(cr, uid, [('type', '=', 'purchase')])\n context.update({'type':'in_invoice','journal_type':'purchase'})\n inv_vals = {\n 'name': purchase_order.partner_ref or purchase_order.name,\n 'origin': purchase_order.name,\n 'type': 'in_invoice',\n 'reference': False,\n 'account_id': purchase_order.partner_id.property_account_payable.id,\n 'journal_id':journal_ids and journal_ids[0] or False,\n 'partner_id': purchase_order.partner_id.id,\n 'address_invoice_id': addr['invoice'],\n 'invoice_line': [(6, 0, inv_line_ids)],\n 'currency_id': purchase_order.pricelist_id.currency_id.id,\n 'comment': '',\n 'payment_term': purchase_order.payment_term_id and purchase_order.payment_term_id.id or False,\n 'fiscal_position': purchase_order.fiscal_position.id or purchase_order.partner_id.property_account_position.id,\n 'prepaid': True\n }\n\n inv_id = inv_obj.create(cr, uid, inv_vals, context=context)\n inv_obj.button_reset_taxes(cr, uid, [inv_id], context=context)\n for invoice in purchase_order.invoice_ids:\n invoice_ids.append(invoice.id)\n invoice_ids.append(inv_id)\n po_obj.write(cr, uid, purchase_order.id, {'invoice_ids': [(6, 0, invoice_ids)]})\n invoice_list.append(inv_id)\n\n if purchase_order.invoice_method in ('picking','order'):\n self.pool.get('purchase.order.line').create(cr, uid, {\n 'order_id': purchase_order.id,\n 'name': val['value']['name'],\n 'date_planned':purchase_order.date_order,\n 'price_unit': -purchase_adv_obj.amount,\n 'product_uom_qty': purchase_adv_obj.qtty,\n 'product_uos': val['value']['uos_id'],\n 'product_uom': val['value']['uos_id'],\n 'product_id': purchase_adv_obj.product_id.id,\n 'adavance_product':True,\n 'discount': False,\n 'taxes_id': [(6, 0, val['value']['invoice_line_tax_id'])],\n }, context=context)\n\n\n context.update({'invoice_id':invoice_list})\n return {\n 'name': 'Open Invoice',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'purchase.open.invoice',\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n 'context': context\n }", "def invoice_line_create(self, invoice_id, qty):\n invoice_lines = self.env['account.invoice.line']\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n for line in self:\n if not float_is_zero(qty, precision_digits=precision):\n vals = line._prepare_invoice_line(qty=qty)\n vals.update({'invoice_id': invoice_id, 'purchase_line_id': line.id})\n invoice_lines |= self.env['account.invoice.line'].create(vals)\n return invoice_lines", "def generate_eob(\n self, date_of_service, date_of_eob, insured, invoice_id, cpt_code, charge_amount\n ):\n if insured == \"insured\":\n # first copayments\n copay_amount = np.random.choice(\n self.distributions[\"copay_amounts\"],\n 1,\n p=self.distributions[\"copay_distribution\"],\n )[0]\n if copay_amount > 0:\n invoicelineitemeob = pd.DataFrame(\n {\n \"invoice_id\": [invoice_id],\n \"cpt_code\": [cpt_code],\n \"created_on\": [date_of_service],\n \"copay_amount\": [copay_amount],\n \"adjustment_amount\": [0],\n \"paid_amount\": [0],\n }\n )\n self.InvoiceLineItemsEob = self.InvoiceLineItemsEob.append(\n invoicelineitemeob\n )\n remaining_charge = charge_amount - copay_amount\n else:\n remaining_charge = charge_amount\n # next eob discounts\n eob_discount_percent = np.random.choice(\n self.distributions[\"eob_discount_percentages\"],\n 1,\n p=self.distributions[\"eob_discount_distribution\"],\n )[0]\n if eob_discount_percent > 0:\n insurance_adjustment = remaining_charge * eob_discount_percent / 100\n remaining_charge = remaining_charge - insurance_adjustment\n invoicelineitemeob = pd.DataFrame(\n {\n \"invoice_id\": [invoice_id],\n \"cpt_code\": [cpt_code],\n \"created_on\": [date_of_eob],\n \"copay_amount\": [0],\n \"adjustment_amount\": [insurance_adjustment],\n \"paid_amount\": [0],\n }\n )\n self.InvoiceLineItemsEob = self.InvoiceLineItemsEob.append(\n invoicelineitemeob\n )\n # next handle eob payments where relevant\n eob_payment_percentage = np.random.choice(\n self.distributions[\"eob_payment_percentages\"],\n 1,\n p=self.distributions[\"eob_payment_distribution\"],\n )[0]\n eob_payment_amount = remaining_charge * (eob_payment_percentage / 100.0)\n if eob_payment_amount > 0:\n invoicelineitemeob = pd.DataFrame(\n {\n \"invoice_id\": [invoice_id],\n \"cpt_code\": [cpt_code],\n \"created_on\": [date_of_eob],\n \"copay_amount\": [0],\n \"adjustment_amount\": [0],\n \"paid_amount\": [eob_payment_amount],\n }\n )\n self.InvoiceLineItemsEob = self.InvoiceLineItemsEob.append(\n invoicelineitemeob\n )\n remaining_charge = remaining_charge - eob_payment_amount\n else:\n remaining_charge = charge_amount\n return remaining_charge", "def action_invoice_create(self, cr, uid, ids, grouped=False, states=None, date_invoice=False, context=None):\n order = self.browse(cr, uid, ids[0], context=context)\n inv_obj = self.pool.get('account.invoice')\n # create the invoice\n inv_id = super(sale_order, self).action_invoice_create(cr, uid, ids, grouped, states, date_invoice, context=context)\n # modify the invoice\n inv_obj.write(cr, uid, [inv_id], {'past_doc': order.past_doc})\n return inv_id", "def action_invoice_create(self, cr, uid, ids, context=None):\n res = False\n\n journal_obj = self.pool.get('account.journal')\n inv_obj = self.pool.get('account.invoice')\n inv_line_obj = self.pool.get('account.invoice.line')\n fiscal_obj = self.pool.get('account.fiscal.position')\n\n for order in self.browse(cr, uid, ids, context=context):\n# pay_acc_id = order.partner_id.property_account_payable.id\n #use a new method to get the account_id\n pay_acc_id = self._get_inv_pay_acc_id(cr,uid,order) \n journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error!'),\n _('Define purchase journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n\n # generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line\n inv_lines = []\n for po_line in order.order_line:\n #check if this line have quantity to generate invoice, by johnw\n if po_line.product_qty <= po_line.invoice_qty:\n continue \n# if po_line.product_id:\n# acc_id = po_line.product_id.property_account_expense.id\n# if not acc_id:\n# acc_id = po_line.product_id.categ_id.property_account_expense_categ.id\n# if not acc_id:\n# raise osv.except_osv(_('Error!'), _('Define expense account for this company: \"%s\" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))\n# else:\n# acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id \n #use a new method to get the account_id, by johnw \n acc_id = self._get_inv_line_exp_acc_id(cr,uid,order,po_line)\n fpos = order.fiscal_position or False\n acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)\n\n inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)\n #update the quantity to the quantity, by johnw\n inv_line_data.update({'quantity':(po_line.product_qty - po_line.invoice_qty)})\n inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)\n inv_lines.append(inv_line_id)\n\n po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)\n \n #if no lines then return direct, by johnw\n if len(inv_lines) == 0:\n continue\n \n # get invoice data and create invoice\n inv_data = {\n 'name': order.partner_ref or order.name,\n 'reference': order.partner_ref or order.name,\n 'account_id': pay_acc_id,\n 'type': 'in_invoice',\n 'partner_id': order.partner_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'journal_id': len(journal_ids) and journal_ids[0] or False,\n 'invoice_line': [(6, 0, inv_lines)],\n 'origin': order.name,\n 'fiscal_position': order.fiscal_position.id or False,\n 'payment_term': order.payment_term_id.id or False,\n 'company_id': order.company_id.id,\n }\n inv_id = inv_obj.create(cr, uid, inv_data, context=context)\n\n # compute the invoice\n inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)\n\n # Link this new invoice to related purchase order\n order.write({'invoice_ids': [(4, inv_id)]}, context=context)\n res = inv_id\n return res", "def _prepare_invoice_lines(self, exchange_line, order_line):\n invoice_type = {\n 'sale.order.line': {\n 'higher': 'out_invoice', 'lower': 'out_refund',\n 'type': 'sale', 'field': 'exchange_sale_line_id'\n },\n 'purchase.order.line': {\n 'higher': 'in_invoice', 'lower': 'in_refund',\n 'type': 'purchase', 'field': 'exchange_purchase_line_id'\n },\n }\n product = exchange_line.exchange_product_id or exchange_line.product_id\n data = {\n 'invoice_type': False,\n 'values': {\n 'product_id': product.id,\n 'quantity': exchange_line.quantity,\n 'name': 'Exchange for [%s]' % exchange_line.product_id.display_name,\n }\n }\n if exchange_line.exchange_product_id or \\\n exchange_line.price_subtotal > order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['higher']\n elif exchange_line.price_subtotal < order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['lower']\n else:\n return {}\n data[invoice_type[order_line._name]['type']] = order_line.order_id\n data['values'][invoice_type[order_line._name]['field']] = order_line.id\n data['values']['price_unit'] = exchange_line.price_unit\n # TODO i think we should take the different between prices NOT the all price\n # abs(exchange_line.price_unit - order_line.price_unit)\n return data", "def action_create_invoice(self):\n if self.partner_id:\n supplier = self.partner_id\n else:\n supplier = self.partner_id.search(\n [(\"name\", \"=\", \"Salon Default Customer\")])\n lines = []\n product_id = self.env['product.product'].search(\n [(\"name\", \"=\", \"Salon Service\")])\n for records in self.order_line_ids:\n if product_id.property_account_income_id.id:\n income_account = product_id.property_account_income_id.id\n elif product_id.categ_id.property_account_income_categ_id.id:\n income_account = product_id.categ_id.\\\n property_account_income_categ_id.id\n else:\n raise UserError(\n _(\"Please define income account for this product: \"\n \"'%s' (id:%d).\") % (product_id.name, product_id.id))\n value = (0, 0, {\n 'name': records.service_id.name,\n 'account_id': income_account,\n 'price_unit': records.price,\n 'quantity': 1,\n 'product_id': product_id.id,\n })\n lines.append(value)\n invoice_line = {\n 'move_type': 'out_invoice',\n 'partner_id': supplier.id,\n 'invoice_user_id': self.env.user.id,\n 'invoice_origin': self.name,\n 'invoice_line_ids': lines,\n }\n inv = self.env['account.move'].create(invoice_line)\n action = self.env.ref('account.action_move_out_invoice_type',\n raise_if_not_found=False)\n result = {\n 'name': action.name,\n 'type': 'ir.actions.act_window',\n 'views': [[False, 'form']],\n 'target': 'current',\n 'res_id': inv.id,\n 'res_model': 'account.move',\n }\n self.inv_stage_identifier = True\n self.stage_id = 3\n invoiced_records = self.env['salon.order'].search(\n [('stage_id', 'in', [3, 4]), ('chair_id', '=', self.chair_id.id)])\n total = 0\n for rows in invoiced_records:\n invoiced_date = str(rows.date)\n invoiced_date = invoiced_date[0:10]\n if invoiced_date == str(date.today()):\n total = total + rows.price_subtotal\n self.chair_id.collection_today = total\n self.update_number_of_orders()\n return result", "def _prepare_invoice_line(self, qty):\n self.ensure_one()\n res = super(SaleOrderLine, self)._prepare_invoice_line(qty)\n\n res.update({\n 'cost_center_id': self.cost_center_id and self.cost_center_id.id or False\n })\n return res", "def _prepare_invoice(self):\n self.ensure_one()\n journal_id = self.env['account.invoice'].default_get(['journal_id'])['journal_id']\n if not journal_id:\n raise UserError(_('Please define an accounting sales journal for this company.'))\n invoice_vals = {\n 'name': self.client_order_ref or '',\n 'origin': self.name,\n 'type': 'out_invoice',\n 'account_id': self.partner_invoice_id.property_account_receivable_id.id,\n 'partner_id': self.partner_invoice_id.id,\n 'partner_shipping_id': self.partner_shipping_id.id,\n 'journal_id': journal_id,\n 'currency_id': self.pricelist_id.currency_id.id,\n 'comment': self.note,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_invoice_id.property_account_position_id.id,\n 'company_id': self.company_id.id,\n 'user_id': self.user_id and self.user_id.id,\n 'team_id': self.team_id.id,\n 'x_studio_field_rgEdd': self.x_studio_field_icWOZ.id,\n 'x_studio_car_type_1': self.vehicle.id,\n 'x_studio_job_card_1': self.x_studio_agency_job_card,\n 'x_studio_car_type_name': self.vehicle.model_id.name,\n 'x_studio_plate_num': self.vehicle.license_plate,\n 'x_studio_claim_num': self.claim_no,\n\n 'x_studio_is_insured':self.is_insured,\n 'x_studio_service_provider': self.service_advisor.id,\n 'date_invoice': fields.Date.today(),\n 'transaction_ids': [(6, 0, self.transaction_ids.ids)],\n }\n return invoice_vals\n\n # 'x_studio_field_rgEdd':order.x_studio_field_icWOZ.id,", "def generate_customer_info(self):\n consecutive = check_consecutive(self.customerIds)\n print(\"consecutive\" + str(consecutive))\n if consecutive:\n for customer_id in self.customerIds:\n # next we need to 1) decide if the customer has insurance, and if yes, generate the EOB that gets sent to the customer\n insured = np.random.choice(\n self.distributions[\"insurance_status\"],\n 1,\n p=self.distributions[\"insurance_distribution\"],\n )[0]\n dob = self.dobs[customer_id - 1]\n customer = pd.DataFrame(\n {\n \"customer_id\": [customer_id],\n \"dob\": str(dob),\n \"insurance\": insured,\n \"experiment_id\": random.randint(1, 2),\n }\n )\n self.Customers = self.Customers.append(customer)\n else:\n print(\"Error generating customer info: customerIds aren't consecutive\")\n return True", "def compute_counterpart_lines(self):\n for item in self:\n move_debit_lines = []\n move_credit_lines = []\n\n # list of all the move lines of the payment's move\n line_list = []\n for entry in item.journal_entry_ids:\n for line in entry.line_ids:\n if line.account_id.treasury_planning:\n line_list.append(line)\n\n # for each line above collect all the reconciled counterpart lines\n for line in line_list:\n if line.credit > 0 and line.debit == 0:\n for match in line.matched_debit_ids:\n move_debit_lines.append(match.debit_move_id.id)\n\n if line.credit == 0 and line.debit > 0:\n for match in line.matched_credit_ids:\n move_credit_lines.append(match.credit_move_id.id)\n\n if move_credit_lines:\n counterpart_move_ids = move_credit_lines\n else:\n counterpart_move_ids = move_debit_lines\n\n # bank move share is transformed to dictionary\n bank_move_dict = (ast.literal_eval(item.cf_share) if\n item.cf_share else {})\n\n # the share of each counterpart line is \"merged or added\"\n # in a weighted manner to the bank line share\n for cpt in counterpart_move_ids:\n dest_move_line = self.env['account.move.line'].browse(cpt)\n weight = round(dest_move_line.balance / item.amount, 2)\n # counterpart share is transformed into dictionary\n move_line_dict = ast.literal_eval(dest_move_line.cf_share)\n\n # each key is finally added to the bank line share\n for key, value in move_line_dict.iteritems():\n draft_dictionary = dictop.sum_dictionary(\n bank_move_dict.get(key, {}), 1,\n move_line_dict.get(key, {}), weight)\n bank_move_dict[key] = dictop.check_dict_total(\n draft_dictionary, 1)\n\n # the dictionary is transformed into string and assigned\n item.cf_share = json.dumps(bank_move_dict)", "def _prepare_invoice_grp(self, cr, uid, order, line_ids, context=None):\n if context is None:\n context = {}\n context = dict(context)\n\n inv_data = super(grp_orden_compra, self)._prepare_invoice_grp(cr, uid, order, line_ids, context=context)\n\n # adicionando campos numero compromiso y no obligacion desde la OC\n monto_oc = math.floor(order.total_llavep or 0)\n monto_oc = int(monto_oc)\n inv_data.update({'nro_compromiso': order.nro_compromiso or False, 'monto_comprometido': monto_oc or 0, 'currency_id':order.currency_oc.id})\n\n # adicionando campos no afectacion y monto autorizado desde la primera APG\n if order.pc_apg_id:\n first_apg = order.pc_apg_id\n monto_apg = math.floor(first_apg.total_llavep)\n monto_apg = int(monto_apg)\n # TODO R SPRING X ADICIONANDO CABEZALES SIIF A LA FACTURA A PARTIR DE LA APG\n inv_data.update({'nro_afectacion': first_apg.nro_afectacion_siif or False,\n 'monto_afectado': monto_apg or 0,\n 'siif_tipo_ejecucion':first_apg.siif_tipo_ejecucion.id,\n 'siif_concepto_gasto':first_apg.siif_concepto_gasto.id,\n 'siif_financiamiento':first_apg.siif_financiamiento.id,\n 'siif_codigo_sir':first_apg.siif_codigo_sir.id,\n 'siif_nro_fondo_rot':first_apg.siif_nro_fondo_rot.id,\n }) # cambiando nro_afectacion 23/10\n # inv.update({'nro_afectacion': first_apg.nro_afectacion_apg or False, 'monto_afectado': monto_apg or 0})\n\n # # TODO R SPRING X NO LLEVAR LAS LLAVES PRESUPUESTALES POR DEFECTO\n # if order.pc_apg_id.llpapg_ids:\n # llavep_ids = []\n # for llavep in order.pc_apg_id.llpapg_ids:\n # llavep_ids.append((0, 0, {\n # 'programa_id': llavep.programa_id.id,\n # 'odg_id': llavep.odg_id.id,\n # 'auxiliar_id': llavep.auxiliar_id.id,\n # 'disponible': llavep.disponible,\n # 'proyecto_id': llavep.proyecto_id.id,\n # 'fin_id': llavep.fin_id.id,\n # 'mon_id': llavep.mon_id.id,\n # 'tc_id': llavep.tc_id.id,\n # 'importe': llavep.importe\n # }))\n # inv_data.update({'llpapg_ids': llavep_ids})\n\n return inv_data", "def get_customer_history_df_invoice_line(self, customerID):\n df_invoice_line \\\n = self._df_invoice_original[self._df_invoice_original.CustomerID \\\n == customerID]\n return df_invoice_line", "def generate_new_visit(self):\n if self.consecutive:\n customer_id = np.random.choice(\n self.customerIds, 1\n ) # choose a customer at random\n insured = self.Customers[self.Customers[\"customer_id\"] == customer_id[0]][\n \"insurance\"\n ].values[\n 0\n ] # does the customer have insurance?\n experiment_id = self.Customers[\n self.Customers[\"customer_id\"] == customer_id[0]\n ][\"experiment_id\"].values[\n 0\n ] # does the customer have insurance?\n\n event_list = (\n self.billing_choose_dates()\n ) # generate dates associated with this invoice\n cpt_code = random.sample(self.CPTCodes, 1)[0]\n date_of_service = str(event_list.values[0][0])\n created_on = str(event_list.values[1][0])\n date_of_eob = str(event_list.values[2][0])\n date_of_provider_adjustment = str(event_list.values[3][0])\n date_of_patient_payment = str(event_list.values[4][0])\n # generate a new invoice\n (invoice_id, charge_amount) = self.generate_new_invoice(\n created_on, date_of_service, customer_id, cpt_code\n )\n # generate subsequent EOB (i.e. copay, EOB adjustment, EOB payment)\n remaining_amount = self.generate_eob(\n date_of_service,\n date_of_eob,\n insured,\n invoice_id,\n cpt_code,\n charge_amount,\n )\n # generate provider adjustments\n remaining_amount = self.generate_provider_adjustment(\n date_of_provider_adjustment, invoice_id, cpt_code, remaining_amount\n )\n # generate a possible payment from the patient\n remaining_amount = self.generate_patient_payment(\n date_of_patient_payment,\n invoice_id,\n cpt_code,\n remaining_amount,\n experiment_id,\n )\n # record the remaining amounts in a separate table.\n self.record_remaining_amount(\n date_of_patient_payment, invoice_id, cpt_code, remaining_amount\n )\n return True\n else:\n print(\"Error generating new invoice- customerIds aren't consecutive\")", "def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):\n res = super(sale_order_line, self)._prepare_order_line_invoice_line(cr, uid, line, account_id=account_id, context=context)\n \n res.update({'part_number': line.part_number, 'internal_part_number' : line.internal_part_number})\n return res", "def _prepare_invoice_line(self, inv_id):\n res = {}\n account_id = self.product_id.property_account_income_id.id or self.product_id.categ_id.property_account_income_categ_id.id\n if not account_id:\n raise UserError(_('Please define income account for this product: \"%s\" (id:%d).') % \\\n (self.product_id.name, self.product_id.id,))\n price_unit = self.product_id.lst_price\n res = {\n 'invoice_id': inv_id.id,\n 'name': self.name,\n 'origin': self.order_id.name,\n 'account_id': account_id,\n 'uom_id': self.product_uom_id.id,\n 'quantity': self.product_uom_qty,\n 'price_unit': price_unit,\n 'product_id': self.product_id.id,\n 'invoice_line_tax_id': False,\n 'order_line_id': self.id\n }\n return res", "def test_invoice_create(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n self._create_model(\"invoice\", data, [])\n self.assertIsNotNone(id)", "def merge_purchase_invoice(self):\r\n active_id = self.env['purchase.order'].browse(self.env['purchase.order']._context.get('active_ids'))\r\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')]) \r\n active_id_count = 0\r\n active_count = 0\r\n exist_vendor = []; invoice = [];exist_vendors = [];ctx = ();invoice_id = []\r\n for rec in active_id : \r\n po_reference = self.env['account.invoice'].search([('origin', 'like', rec.name)])\r\n active_count = len(active_id)\r\n if rec.picking_count >= 1 and rec.picking_count != rec.invoice_count:\r\n len_name = [] \r\n for inv in po_reference: \r\n len_name = inv.origin.split(\":\") \r\n if rec.name in len_name:\r\n if po_reference.state == 'draft':\r\n for record in po_reference.invoice_line_ids:\r\n print (record.line_id)\r\n for res in rec.order_line:\r\n if res.id == record.line_id: \r\n record.write({'quantity':res.qty_received})\r\n res.write({'qty_invoiced':record.quantity})\r\n \r\n else:\r\n \r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received \r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':datetime.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n else:\r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':date.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n invoices = []\r\n invoice_counts = 0\r\n for record in invoice:\r\n invoice_id = self.env['account.invoice'].create(record)\r\n invoices.append(invoice_id.id)\r\n invoice_counts = len(invoices)\r\n if active_id_count == 1:\r\n if invoice_counts == 1:\r\n form_view = self.env.ref('purchase.view_invoice_supplier_purchase_form').id\r\n tree_view = self.env.ref('account.invoice_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'res_id':invoices[0],\r\n 'views_id':False,\r\n 'views':[(form_view , 'form'), (tree_view , 'tree')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n } \r\n else: \r\n form_view = self.env.ref('account.invoice_supplier_form').id\r\n tree_view = self.env.ref('account.invoice_supplier_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'views_id':True,\r\n 'views':[(tree_view , 'tree'), (form_view , 'form')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n }", "def predict_segment(self, df_invoice_line=None):\n if df_invoice_line is not None:\n self.data_transform(df_invoice_line) \n self.df_customers_features_build() \n else:\n pass\n X_test = self._df_customers.values\n y_pred = self._classifier_model.predict(X_test)\n return y_pred[0]" ]
[ "0.65959746", "0.6386151", "0.6226632", "0.5918294", "0.58827716", "0.5865145", "0.5850917", "0.58120507", "0.57048744", "0.57040673", "0.563682", "0.56352204", "0.5545573", "0.55451196", "0.5510552", "0.5419621", "0.54099417", "0.5400348", "0.53597164", "0.5323142", "0.5315464", "0.5270124", "0.52435124", "0.5240882", "0.52162296", "0.5213667", "0.51926345", "0.517341", "0.51313394", "0.5125631" ]
0.68115205
0
Return the segment identifier a customers is predicted to belongs to.
def predict_segment(self, df_invoice_line=None): if df_invoice_line is not None: self.data_transform(df_invoice_line) self.df_customers_features_build() else: pass X_test = self._df_customers.values y_pred = self._classifier_model.predict(X_test) return y_pred[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_customer_marketSegment(self, df_invoice_line_customer):\n #-------------------------------------------------------------------------\n # Building data model \n #-------------------------------------------------------------------------\n self.data_transform(df_invoice_line_customer)\n\n #-------------------------------------------------------------------------\n # Customer features are built thanks to transformers.\n #-------------------------------------------------------------------------\n self.df_customers_features_build()\n \n #-------------------------------------------------------------------------\n # Customer market segment is predicted\n #-------------------------------------------------------------------------\n X_test = self._df_customers.values\n y_pred = self._classifier_model.predict(X_test)\n segmentID = y_pred[0]\n \n return segmentID", "def segment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"segment_name\")", "def get_segment_name(self, offset):\n self.ret = idc.get_segm_name(offset)\n return self.ret", "def segment_number(self):\n if hasattr(self, '_m_segment_number'):\n return self._m_segment_number if hasattr(self, '_m_segment_number') else None\n\n self._m_segment_number = self.segment_number_raw.value\n return self._m_segment_number if hasattr(self, '_m_segment_number') else None", "def get_segment(self):\n return self.segment", "def get_segment(self):\n return self.segment", "def segment_counter(self):\n return self._data_writer.get_segment_counter()", "def get_segm_num(*args):\n return _ida_segment.get_segm_num(*args)", "def segment_func1(self):\n # computing neighboors graph\n A = self.normal_graph()\n\n # SpectralClustering segmentation\n sc = SpectralClustering(3, affinity='precomputed', n_init=10, assign_labels='discretize')\n labels = sc.fit_predict(A)\n\n return labels", "def getSegment(self):\n return self.segment", "def getSentenceId(self):\n return( int(self.id.split('.')[1]) )", "def find_index(segmentation, stroke_id):\n for i, symbol in enumerate(segmentation):\n for sid in symbol:\n if sid == stroke_id:\n return i\n return -1", "def get_seg(self):\n self.seg = self.render()[4]\n return self.seg", "def identifier(self):\n return self.viztrail.identifier", "def getSegment(self):\n\n segname = self.getSegname()\n if segname is not None:\n return self._hv.getSegment(segname)", "def get_segm_name(*args):\n return _ida_segment.get_segm_name(*args)", "def segment_func2(self):\n # computing neighboors graph\n A = self.boundaryprob_graph()\n\n # SpectralClustering segmentation\n sc = SpectralClustering(3, affinity='precomputed', n_init=10, assign_labels='discretize')\n labels = sc.fit_predict(A)\n\n return labels", "def cinters_segment(self, s):\r\n if self.contains_point(s.start[0], s.start[1]) == self.contains_point(s.end[0], s.end[1]):\r\n # The segment doesn't cross the contour of the polygon\r\n return None\r\n else:\r\n if self.__segments == None:\r\n self.__load_segments()\r\n \r\n for segment in self.__segments:\r\n p = segment.inters_segment(s)\r\n if p != None:\r\n return p\r\n \r\n return None", "def get_visible_segm_name(*args):\n return _ida_segment.get_visible_segm_name(*args)", "def inters_segment(self, s):\r\n if (self.m == s.m) and (self.n == s.n):\r\n # The segment s is over this segment. Return the middle point\r\n x = (self.start[0] + self.end[0]) / 2\r\n y = (self.start[1] + self.end[1]) / 2\r\n elif self.m == s.m:\r\n # The segments are parallels\r\n return None\r\n elif self.m == None:\r\n x = self.start[0]\r\n y = int(s.m * x + s.n)\r\n elif s.m == None:\r\n x = s.start[0]\r\n y = self.m * x + self.n\r\n else:\r\n x = (s.n - self.n) / (self.m - s.m)\r\n y = self.m * x + self.n \r\n \r\n if self.contains_point(x, y) and s.contains_point(x, y):\r\n return int(x), int(y)\r\n else:\r\n return None", "def customer_id(self) -> str:\n return self._customer_id", "def get_physical_seg_id(self, local_seg_id: int) -> int:\n return 0 if (self.seg_id == local_seg_id) else self.seg_id", "def get_classification(self):\n try:\n receiver = self.cleaned_data[\"customer\"]\n classification = self.CLASSIFICATION_DICT[receiver]\n except KeyError:\n return \"------\"\n except AttributeError:\n return \"------\"\n\n return classification", "def get_customer_count(self):\n return self._df_invoice_original.CustomerID.unique().shape[0]", "def get_customer_id(self):\n return self.machine_config_file_value(\"DEFAULT.CID\").strip('\"')", "def get_customer_info(self, customer_id):\n #requested_customer_RDD = self.sc.parallelize(movie_ids).map(lambda x: (user_id, x))\n # Get predicted ratings\n customers = self.__get_customers(customer_id).collect()\n\n return customers", "def getSegments(points):\n return _identifyStrokes(points)[1]", "def get_identifier(self):\n return 'Sequence SMNIST'", "def predict(self, test_vector):\n return self.find_closest(test_vector)[1].class_id", "def get_segment_index(datadb):\n #match in time!!!!\n if cfg.use_saliency:\n segment_index_tar = util.get_time_for_visual(datadb)\n segment_index_tar_future = OrderedDict()\n for key in segment_index_tar.keys():\n segment_index_tar_future[key] = np.array(segment_index_tar[key])+max_encoder_seq_length\n return segment_index_tar,segment_index_tar_future" ]
[ "0.768172", "0.6201237", "0.61484486", "0.5937784", "0.5902539", "0.5902539", "0.58263624", "0.57347125", "0.5691365", "0.56732774", "0.5623136", "0.55613863", "0.5555856", "0.5535719", "0.5528222", "0.55247766", "0.5446849", "0.54316", "0.5398863", "0.5389348", "0.52877736", "0.52812827", "0.5259461", "0.5229752", "0.5216766", "0.52108204", "0.52103513", "0.5195189", "0.51856345", "0.51749325" ]
0.63881826
1
Returns list of stock codes from list of items descriptions.
def getStockCodeList(self, list_description=None): list_stockCode = list() df = self._df_invoice_original if list_description is None: list_stockCode = list(df.StockCode.unique()) else : for description in list_description: stockCode = df[df.Description==description].StockCode.unique()[0] list_stockCode.append(stockCode) return list_stockCode
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDescriptionList(self, list_stockCode=None):\n df = self._df_invoice_original\n\n list_description = list()\n if list_stockCode is None :\n list_description = list(df.Description.unique())\n else:\n for stockCode in list_stockCode:\n description = df[df.StockCode==stockCode].Description.unique()[0]\n list_description.append(description)\n \n return list_description", "def codes(self):\n return [card.code for card in self.cards]", "def find_item_codes(transaction):\n t = transaction\n item_codes = []\n if t['transaction_type'] in ('SALE', 'REFUND'):\n # Search using line item IDs and order_id\n for oli in (t['order_line_items'] or []):\n li_id = oli['line_item_id']\n item_codes.append(\n get_item_code_for_order(t['order_id'], order_line_item_id=li_id)\n )\n else:\n # Search for ITEM reference\n for ref in (transaction['references'] or []):\n if ref['reference_type'] == 'ITEM_ID':\n item_codes.append(\n get_item_code_for_item_id(ref['reference_id'])\n )\n\n return item_codes", "def parse_quantities(quantities):\n codes = []; names = []\n\n for q in quantities:\n c, n = parse_quantity(q)\n codes.append(c)\n names.append(n)\n\n return codes, names", "def discount_codes(self):\n return [DiscountCode(x) for x in self._dict.get('discount_codes', [])]", "def codelists():\n return CodelistSet()", "def get_order_lists(self, n_items, n_quantities):\n arr_stock_code = self._df_invoice_original.StockCode.unique()\n arr_stock_code = np.random.choice(arr_stock_code, n_items)\n list_stockCode = list(arr_stock_code)\n list_quantities = np.ones(arr_stock_code.shape[0])\n list_quantities *=n_quantities\n\n return list_stockCode, list_quantities", "def license_codes(self) -> Sequence[str]:\n return pulumi.get(self, \"license_codes\")", "def get_pcode_list(self) -> List[str]:\n return self.pcodes", "def all_currency_codes():\n return [(a, CURRENCIES[a].name) for a in CURRENCIES]", "def currency_codes():\n return list(settings.CURRENCIES)", "def extract_promocodes(self):\n promocode_description = self.text\n\n sentences: list = self._split_by_sentences(promocode_description)\n\n sentence_with_promocode = promocode_description # no needed\n\n promocodes = ()\n\n for sentence in sentences:\n if any(keyword in sentence.lower()\n for keyword in (\"промокод\", \"купон\", \"промо-код\", )):\n\n sentence_with_promocode = sentence\n\n promocodes: list = \\\n self.get_promocodes(sentence_with_promocode,\n parser_constants.instagram_patterns)\n if promocodes:\n break\n # TODO:\n # make probabilities and do not break\n # continue iter by senteces and search4 promo in every\n # after that (we know that here is 1 promo)\n # we can choose the most suitable coupon\n\n for p in promocodes:\n if p and len(p) >= 3:\n promocode = p\n if self.is_valid_promocode_morph_check(promocode):\n break\n else:\n return []\n\n if any(forbidden_promocode in promocode.lower()\n for forbidden_promocode in\n parser_constants.forbidden_promocodes):\n\n return []\n\n expiration_date = self.parse_date(promocode_description)\n\n for key in parser_constants.replacement_table.keys():\n promocode_description = \\\n promocode_description.replace(\n key, parser_constants.replacement_table[key]\n )\n\n return [data_classes.Promocode(\n coupon=promocode,\n promoCodeDescription=promocode_description,\n estimated_date=expiration_date,\n source=self.source\n )]", "def create_not_included_list(codes):\n string = '\\\\begin{itemize}\\n'\n for code in codes:\n title = get_course_title_only(code)\n string += '\\\\item{' + title + '}\\n'\n string += '\\\\end{itemize}\\n'\n return string", "def get_code_mapping( id ):\n returnVal = []\n theCodes = _theRegistry.get_code( id )\n codes = theCodes.get_codes()\n descs = theCodes.get_descriptions()\n for (code, desc) in map(None, codes, descs):\n returnVal.append( { 'code' : code, 'description' : desc } )\n return returnVal", "def list():\n\n return cache.codeTableList()", "def to_iob(text: str, items: List[Instance]) -> List[str]:\n coding = [\"O\"] * len(text)\n for (s, e), label in items:\n b = f\"B-{label}\"\n i = f\"I-{label}\"\n coding[s] = b\n for x in range(s + 1, e):\n coding[x] = i\n\n return coding", "def list_of_langs(data):\n lang_codes = []\n for lang_data in data:\n lang_codes.append(lang_data.get('value'))\n return lang_codes", "def get_item_code_for_order(order_id, order_line_item_id=None, item_id=None):\n\n params = {\n 'order_id': order_id,\n 'order_line_item_id': order_line_item_id,\n 'item_id': item_id\n }\n\n if order_line_item_id and item_id:\n raise ValueError('Supply either order_line_item_id or item_id!')\n\n if order_line_item_id:\n filter_line = \"\"\"sii.ebay_order_line_item_id = %(order_line_item_id)s\"\"\"\n else:\n filter_line = \"\"\"sii.ebay_item_id = %(item_id)s\"\"\"\n\n # Try loading from SINVs first\n records = frappe.db.sql(f\"\"\"\n SELECT sii.item_code\n FROM `tabSales Invoice Item` AS sii\n LEFT JOIN `tabSales Invoice` AS si\n ON sii.parent = si.name\n WHERE si.ebay_order_id = %(order_id)s\n AND {filter_line};\n \"\"\", params, as_dict=True)\n item_code = {x.item_code for x in records}\n if len(item_code) > 1:\n raise ValueError(\n f'Multiple results for order {order_id} line '\n + f'item {order_line_item_id or item_id}!'\n )\n if item_code:\n # We have a single result; return it\n item_code, = item_code\n return item_code\n # We will have to look up the order\n try:\n order = get_order(order_id)\n except eBayRestError as e:\n raise ErpnextEbaySyncError(\n f'Unable to load order to get item code!\\n{e}')\n for li in order['line_items']:\n if order_line_item_id:\n # Check order line ID\n if li['line_item_id'] == order_line_item_id:\n return li['sku']\n else:\n # Check legacy item ID\n if li['legacy_item_id'] == item_id:\n return li['sku']\n # We could not locate correct line item\n if order_line_item_id:\n msg = f'line item {order_line_item_id}'\n else:\n msg = f'item ID {item_id}'\n raise ErpnextEbaySyncError(f'Order {order_id} did not contain {msg}?')", "def currency_code_mappings():\n return [(a, CURRENCIES[a].name) for a in settings.CURRENCIES]", "def get_lock_codes(device: Device) -> Sequence[str]:\n try:\n codes_str = cast(str, device.attributes[ATTR_LOCK_CODES].value)\n codes = loads(codes_str)\n return [codes[id][\"name\"] for id in codes]\n except Exception as e:\n _LOGGER.warn(\"Error getting lock codes for %s: %s\", device, e)\n return []", "def getUnitPriceList(self, list_stockCode):\n df = self._df_invoice_original\n\n list_unitPrice = list()\n \n for stockCode in list_stockCode:\n unitPrice = df[df.StockCode==stockCode].UnitPrice.unique()[0]\n list_unitPrice.append(unitPrice)\n return list_unitPrice", "def lcode(self):\n###############################################################################\n lcode = []\n for M in list(self.estimates.values()):\n if (M.code not in lcode):lcode.append(M.code)\n return(lcode)", "def get_codes(cls, query: Optional[str] = None):\n return search(\n SummaryItemCounts.get_data_frame(cls.table_name()), query=query\n )", "def populate_code_list():\n\tletter_code_ST = \"JZIHGFEDCBA\"\n\tletter_code_FG = \"XWUTRQPNMLK\"\n\tfor pos in range(\n\t len(letter_code_ST)): #Interestingly, the values start from 0\n\t\tcode_ST.append(pos) # Number first\n\t\tcode_ST.append(letter_code_ST[pos])\n\tfor pos in range(len(letter_code_FG)):\n\t\tcode_FG.append(pos)\n\t\tcode_FG.append(letter_code_FG[pos])", "def itemnames():\n g = ['KIS_NA_39', 'VII_57', 'MX_48', 'MX_56', 'KIS_NA_42', 'VII_54',\n 'MX_S_48', 'MX_S_52', 'MX_52', 'KIS_NA_45', 'KIS_NA_51', 'MIP_45',\n 'MIP_49', 'MIP_52', 'MIP_plus_48', 'MIP_plus_51', 'MX_42', 'MX_45',\n 'MIP_G_42', 'KIS_42', 'KIS_NA_48']\n return(g)", "def _get_stock_item_ids(cls, *skus):\n return linnapi.inventory.get_stock_item_ids_by_sku(*skus)", "def hs_code_process(si):\n hs_code = re.sub(r'\\W+', '', si.get('hs_code', ''))\n descrip = re.sub(r'\\W+', '', si.get('description_of_goods', ''))\n bl_type = re.sub(r'\\W+', '', si.get('bl_type', ''))\n hs_codes = []\n if hs_code != '' and hs_code in descrip:\n hs_raw = si.pop('hs_code')\n for raw_line in hs_raw.split('\\n'):\n line = re.sub(r'\\W+', '', raw_line).upper()\n if 'HSCODE' in line:\n remain = line.replace('HSCODE', '').replace('\\n', '')\n remain = re.sub(r'[A-Z]+', '', remain)\n if remain.isdigit() and len(remain) > 4:\n hs_codes.append(remain)\n else:\n # CODE in line below\n hs_line_no = hs_raw.split('\\n').index(raw_line)\n for hs_line in hs_raw.split('\\n')[hs_line_no + 1:]:\n if len(re.findall(r'[a-zA-Z]+', hs_line)) < 1:\n for hs_code in re.findall(r'\\d+', hs_line):\n hs_codes.append(hs_code)\n else:\n break\n\n bl_type = si.get('bl_type', '')\n\n elif hs_code != '' and hs_code in bl_type:\n hs_raw = si.pop('hs_code')\n for raw_info in hs_raw.split('/'):\n info = re.sub(r'\\W+', '', raw_info).upper()\n if 'HSCODE' in info:\n hs_code = info.replace('HSCODE', '').replace('\\n', '')\n hs_code = re.sub(r'[A-Z]+', '', hs_code)\n hs_codes.append(hs_code)\n break\n bl_type = hs_raw.split('/')[0]\n\n else:\n hs_code = re.sub(r'[^\\d]+', '', hs_code)\n hs_codes.append(hs_code)\n bl_type = si.get('bl_type', '')\n\n return hs_codes, bl_type", "def product_db() -> List[Text]:\n\n return [\n \"credit\",\n \"forex\",\n \"debit\",\n \"atm\"\n ]", "def get_products(self):\n return [item.code for item in self._products]", "def get_registry_codes( ):\n return _theRegistry.get_codes( )" ]
[ "0.63454723", "0.5953065", "0.58789575", "0.5782579", "0.5730202", "0.551616", "0.5417973", "0.5402704", "0.5288644", "0.5257909", "0.5255299", "0.5200416", "0.5199842", "0.51931006", "0.5064142", "0.50508547", "0.50235844", "0.49738976", "0.4950196", "0.49323055", "0.49277037", "0.49023208", "0.4864917", "0.4849586", "0.48492593", "0.4833745", "0.4795869", "0.47937497", "0.4766169", "0.4765236" ]
0.7344879
0
Returns list of imtes unit price from list of stock codes.
def getUnitPriceList(self, list_stockCode): df = self._df_invoice_original list_unitPrice = list() for stockCode in list_stockCode: unitPrice = df[df.StockCode==stockCode].UnitPrice.unique()[0] list_unitPrice.append(unitPrice) return list_unitPrice
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getStockCodeList(self, list_description=None):\n list_stockCode = list()\n df = self._df_invoice_original\n \n if list_description is None:\n list_stockCode = list(df.StockCode.unique())\n else :\n for description in list_description:\n stockCode = df[df.Description==description].StockCode.unique()[0]\n list_stockCode.append(stockCode)\n return list_stockCode", "def getDescriptionList(self, list_stockCode=None):\n df = self._df_invoice_original\n\n list_description = list()\n if list_stockCode is None :\n list_description = list(df.Description.unique())\n else:\n for stockCode in list_stockCode:\n description = df[df.StockCode==stockCode].Description.unique()[0]\n list_description.append(description)\n \n return list_description", "def dishlist_prices(n: list) -> list:\r\n return [dish.price for dish in n]", "def get_units(self) -> List[str]:\n result = []\n for elements in self._get_results_list():\n result.append(elements[3])\n return result", "def add_gst (list_of_prices):\n\n add_gst=[]\n for item in list_of_prices:\n list_with_gst = round(item*1.15,2)\n add_gst+=[list_with_gst]\n return add_gst", "def get_lp(s):\n sl = [] \n for stock in s.symbols: \n #creates a list of latest stock prices\n quote = get(stock,\"LON\")\n #changes string to integer and removes ','\n x = (quote.replace(',',''))\n x = float(x)\n sl.append(x)\n return sl", "def get_prices(self):\n price = self.get_price()\n if price:\n return [price]\n return []", "def getCurrencies():", "def calculate_prices(self, good=None):\n\n stock = self.calculate_init_stock(good)\n buy = self.buying_price()\n\n if stock == 0:\n sell = 0\n buy = buy + (buy * 0.5)\n\n elif stock < 500:\n # mild bug: stock, without selling price\n sell = self.selling_price()\n elif stock >= 500:\n # higher production, lower prices\n sell = self.selling_price() / 2\n buy = buy - (buy * 0.5)\n\n return [buy, sell, stock]", "def get_order_lists(self, n_items, n_quantities):\n arr_stock_code = self._df_invoice_original.StockCode.unique()\n arr_stock_code = np.random.choice(arr_stock_code, n_items)\n list_stockCode = list(arr_stock_code)\n list_quantities = np.ones(arr_stock_code.shape[0])\n list_quantities *=n_quantities\n\n return list_stockCode, list_quantities", "def parse_quantities(quantities):\n codes = []; names = []\n\n for q in quantities:\n c, n = parse_quantity(q)\n codes.append(c)\n names.append(n)\n\n return codes, names", "def prices(parsed, appearance_value, removal_value, gate_single_value, gate_double_value):\n\t\tprices = []\n\n\t\tfor item in parsed:\n\t\t\tif item.entityType == \"fence\":\n\t\t\t\tprices.append(QuoteCalculation._fencePrice(item, appearance_value, removal_value))\n\n\t\t\telif item.entityType == \"gate\":\n\t\t\t\tprices.append(QuoteCalculation._gatePrice(item, gate_single_value, gate_double_value, removal_value))\n\n\t\t\t# Not required?\n\t\t\t#elif item.entityType == \"post\":\n\t\t\t\t#prices.append(QuoteCalculation._postPrice(item))\n\n\t\treturn prices", "def return_currency_pairs(self):\n return list(sorted(list(c for c in self.return_24_volume().keys()\n if not c.startswith('total'))))", "def get_coin_price_list(df: pd.DataFrame) -> list:\n return df['rates'].to_list()", "def uCSIsCurrencySymbols(code):\n ret = libxml2mod.xmlUCSIsCurrencySymbols(code)\n return ret", "def get_stock_price(df_excld):\n\n ts = TimeSeries(os.environ['ALPHA_VANTAGE_KEY'])\n\n info = []\n symbols = []\n counter = 0\n\n for t in df_excld['Ticker']:\n\n if counter % 5 == 0:\n time.sleep(65)\n\n i, m = ts.get_daily(symbol=t, outputsize='full')\n info.append(i)\n symbols.append(m['2. Symbol'])\n counter += 1\n\n return info, symbols", "def _get_prix_tarif(self,cout,pricelist):\n cr = self._cr\n product=cout.name\n prix_tarif=0\n date=time.strftime('%Y-%m-%d') # Date du jour\n if pricelist:\n #Convertion du lot_mini de US vers UA\n min_quantity = self.env['product.uom']._compute_qty(cout.name.uom_id.id, cout.name.lot_mini, cout.name.uom_po_id.id)\n #TODO : Pour contourner un bug d'arrondi (le 31/01/2017)\n min_quantity=min_quantity+0.00000000001\n #TODO en utilisant la fonction repr à la place de str, cela ne tronque pas les décimales\n SQL=\"\"\"\n select ppi.price_surcharge\n from product_pricelist_version ppv inner join product_pricelist_item ppi on ppv.id=ppi.price_version_id\n where ppv.pricelist_id=\"\"\"+str(pricelist.id)+ \"\"\" \n and min_quantity<=\"\"\"+repr(min_quantity)+\"\"\"\n and (ppv.date_start <= '\"\"\"+date+\"\"\"' or ppv.date_start is null)\n and (ppv.date_end >= '\"\"\"+date+\"\"\"' or ppv.date_end is null)\n\n and ppi.product_id=\"\"\"+str(product.id)+ \"\"\" \n and (ppi.date_start <= '\"\"\"+date+\"\"\"' or ppi.date_start is null)\n and (ppi.date_end >= '\"\"\"+date+\"\"\"' or ppi.date_end is null)\n order by ppi.sequence\n limit 1\n \"\"\"\n cr.execute(SQL)\n result = cr.fetchall()\n for row in result:\n coef=1\n if min_quantity:\n coef=cout.name.lot_mini/min_quantity\n prix_tarif=row[0]/coef\n\n\n\n return prix_tarif", "def currency_codes():\n return list(settings.CURRENCIES)", "async def _get_stock_data(self, stocks: list):\n\t\tapi_url = 'https://sandbox.tradier.com/v1/markets/quotes'\n\t\tstocks = ','.join(stocks)\n\t\tif not stocks:\n\t\t\treturn []\n\t\ttoken = await self.bot.get_shared_api_tokens('stocks')\n\t\ttoken = token.get('key', None)\n\t\tif not token:\n\t\t\traise ValueError(\n\t\t\t\t'You need to set an API key!\\n'\n\t\t\t\t'Follow this guide for instructions on how to get one:\\n'\n\t\t\t\t'<https://github.com/Flame442/FlameCogs/blob/master/stocks/setup.md>'\n\t\t\t)\n\t\tparams = {'symbols': stocks}\n\t\theaders = {'Authorization': f'Bearer {token}', 'Accept': 'application/json'}\n\t\tasync with aiohttp.ClientSession() as session:\n\t\t\tasync with session.get(api_url, params=params, headers=headers) as r:\n\t\t\t\ttry:\n\t\t\t\t\tr = await r.json()\n\t\t\t\texcept aiohttp.client_exceptions.ContentTypeError:\n\t\t\t\t\t#This might happen when being rate limited, but IDK for sure...\n\t\t\t\t\traise ValueError('Could not get stock data. The API key entered is most likely not valid.')\n\t\tr = r['quotes']\n\t\tif 'quote' not in r:\n\t\t\treturn []\n\t\tr = r['quote']\n\t\tif not isinstance(r, list):\n\t\t\tr = [r]\n\t\tstock = {\n\t\t\tx['symbol']: {\n\t\t\t\t'price': max(1, int(x['last'] * 100)),\n\t\t\t\t#New API does not give this info.\n\t\t\t\t'total_count': None, #int(x['marketCap'] / x['last']) if x['marketCap'] else None\n\t\t\t} for x in r if 'last' in x and x['last'] is not None\n\t\t}\n\t\treturn stock", "def bond_price(fv, c,n,m,r):\n return sum([a*b for a,b in zip(discount_factors(r,n,m),bond_cashflows(fv, c, n, m))])", "def __call__(self):\n currency_data = getUtility(ICurrencyData)\n currency_data_list = currency_data.currency_data_list()\n results = {}\n for i in currency_data_list:\n results.update({i['code']:i['decimal']})\n return results", "def test_lowest_price_many_listings(self):\n listings = steam_market.get_lowest_price(soup=get_soup_from_path(TEST_FILE_MANY_RESULTS))\n self.assertEqual('0,03€', listings)", "def to_quantities(\n values: list,\n uncertainties: list\n) -> typing.List[IndexedQuantity]:\n\n return [\n IndexedQuantity(i, v)\n for i, v in enumerate(mstats.values.join(values, uncertainties))\n ]", "def getListOfUnits(self, *args):\n return _libsbml.UnitDefinition_getListOfUnits(self, *args)", "def get_stocks():\n print(\"fetching remote...\")\n code_dataframes = pd.read_html(\n 'http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0]\n # 우리가 필요한 것은 회사명과 종목코드이기 때문에 필요없는 column들은 제외해준다.\n print(\"parsing and filtering data...\")\n code_dataframes.종목코드 = code_dataframes.종목코드.map('{:06d}'.format)\n # 한글로된 컬럼명을 영어로 바꿔준다.\n code_dataframes = code_dataframes[['회사명', '종목코드']]\n code_dataframes = code_dataframes.rename(\n columns={'회사명': 'name', '종목코드': 'code'})\n codes = code_dataframes['code']\n names = code_dataframes['name']\n stocks = []\n for i in range(len(names)):\n stocks.append({\n 'name': names[i],\n 'code': codes[i]\n })\n return stocks", "def get_stock_price(stock):\n pass", "def get_units(self, names):\n # Make sure names is a list\n if isinstance(names, str) or isinstance(names, unicode):\n names = [names]\n \n # Return the list of units\n ans = []\n for name in names:\n if name in self.interp_ds:\n ans.append(self.interp_ds[name].attrs['units'])\n else:\n ans.append('Not Available in Dataset')\n \n return ans", "def get_prices(symbol, sd, ed):\n\n # get symbol list and date range\n syms = [symbol]\n dates = pd.date_range(sd, ed)\n\n # Get prices data, automatically adds SPY\n prices_all = ut.get_data(syms, dates)\n\n # normalize price, price[t] /= price[0]\n prices_all = ind.normalize(prices_all)\n\n prices = prices_all[syms] # only portfolio symbols\n prices_SPY = prices_all['SPY'] # only SPY, for comparison later\n # if self.verbose: print prices\n\n return prices", "def all_currency_codes():\n return [(a, CURRENCIES[a].name) for a in CURRENCIES]", "def construct_futures_symbols(symbol, start_year=2015, end_year=2017):\n futures = []\n # March, June, September and December delivery codes\n months = 'HMUZ'\n for y in range(start_year, end_year+1):\n for m in months:\n futures.append('%s%s%s' % (symbol, m, y))\n return futures" ]
[ "0.6023552", "0.5827989", "0.5810261", "0.57817996", "0.57182974", "0.5632579", "0.55539757", "0.5493873", "0.5484011", "0.54804665", "0.54708654", "0.5443625", "0.5422661", "0.5405019", "0.5299616", "0.5295281", "0.527327", "0.5272383", "0.52600485", "0.52459085", "0.52217275", "0.52160186", "0.52093273", "0.5187559", "0.5186118", "0.517191", "0.5165962", "0.5137883", "0.5137879", "0.5132765" ]
0.8246834
0
Returns list of items descriptions from list of stock codes.
def getDescriptionList(self, list_stockCode=None): df = self._df_invoice_original list_description = list() if list_stockCode is None : list_description = list(df.Description.unique()) else: for stockCode in list_stockCode: description = df[df.StockCode==stockCode].Description.unique()[0] list_description.append(description) return list_description
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getStockCodeList(self, list_description=None):\n list_stockCode = list()\n df = self._df_invoice_original\n \n if list_description is None:\n list_stockCode = list(df.StockCode.unique())\n else :\n for description in list_description:\n stockCode = df[df.Description==description].StockCode.unique()[0]\n list_stockCode.append(stockCode)\n return list_stockCode", "def create_not_included_list(codes):\n string = '\\\\begin{itemize}\\n'\n for code in codes:\n title = get_course_title_only(code)\n string += '\\\\item{' + title + '}\\n'\n string += '\\\\end{itemize}\\n'\n return string", "def get_order_lists(self, n_items, n_quantities):\n arr_stock_code = self._df_invoice_original.StockCode.unique()\n arr_stock_code = np.random.choice(arr_stock_code, n_items)\n list_stockCode = list(arr_stock_code)\n list_quantities = np.ones(arr_stock_code.shape[0])\n list_quantities *=n_quantities\n\n return list_stockCode, list_quantities", "def getUnitPriceList(self, list_stockCode):\n df = self._df_invoice_original\n\n list_unitPrice = list()\n \n for stockCode in list_stockCode:\n unitPrice = df[df.StockCode==stockCode].UnitPrice.unique()[0]\n list_unitPrice.append(unitPrice)\n return list_unitPrice", "def _getListing(self):\n\n # lets assure consistent litsting order\n items = self._items.items()\n items.sort()\n return [ \"%s%s%s: %s\" % (_def_sep, str(x[1]), _def_sep, x[1].__doc__)\n for x in items ]", "def parse_quantities(quantities):\n codes = []; names = []\n\n for q in quantities:\n c, n = parse_quantity(q)\n codes.append(c)\n names.append(n)\n\n return codes, names", "def get_stocks():\n print(\"fetching remote...\")\n code_dataframes = pd.read_html(\n 'http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0]\n # 우리가 필요한 것은 회사명과 종목코드이기 때문에 필요없는 column들은 제외해준다.\n print(\"parsing and filtering data...\")\n code_dataframes.종목코드 = code_dataframes.종목코드.map('{:06d}'.format)\n # 한글로된 컬럼명을 영어로 바꿔준다.\n code_dataframes = code_dataframes[['회사명', '종목코드']]\n code_dataframes = code_dataframes.rename(\n columns={'회사명': 'name', '종목코드': 'code'})\n codes = code_dataframes['code']\n names = code_dataframes['name']\n stocks = []\n for i in range(len(names)):\n stocks.append({\n 'name': names[i],\n 'code': codes[i]\n })\n return stocks", "def itemnames():\n g = ['KIS_NA_39', 'VII_57', 'MX_48', 'MX_56', 'KIS_NA_42', 'VII_54',\n 'MX_S_48', 'MX_S_52', 'MX_52', 'KIS_NA_45', 'KIS_NA_51', 'MIP_45',\n 'MIP_49', 'MIP_52', 'MIP_plus_48', 'MIP_plus_51', 'MX_42', 'MX_45',\n 'MIP_G_42', 'KIS_42', 'KIS_NA_48']\n return(g)", "def getAllDescWords(itemList):\r\n itemList = list(set(itemList)) # make itemList unique\r\n descWords = []\r\n for item in itemList:\r\n descWords.extend(worldItems[item][DESCWORDS])\r\n return list(set(descWords))", "def get_items_for_catalog(catalog_id):\n pass", "def description(self):\n item_counts = [f'{i.quantity}x {i.item.name}' for i in self.items]\n return ','.join(item_counts)", "def ConstructList(title, items):\n buf = cStringIO.StringIO()\n fmt = 'list[title=\"{title}\",always-display-title]'.format(title=title)\n resource_printer.Print(sorted(set(items)), fmt, out=buf)\n return buf.getvalue()", "def list_items(self):\n click.echo(\"ID --|-- Item Title\")\n for index, item in enumerate(self.items):\n click.echo(\" {} --|-- {}\".format(index, item.title))", "def serve_recos(ids, ref_catalog):\r\n desc_list = []\r\n for desc_id in ids:\r\n desc_list.append(ref_catalog[ref_catalog['id'] == desc_id].iloc[0]['description'])\r\n return desc_list", "def get_code_mapping( id ):\n returnVal = []\n theCodes = _theRegistry.get_code( id )\n codes = theCodes.get_codes()\n descs = theCodes.get_descriptions()\n for (code, desc) in map(None, codes, descs):\n returnVal.append( { 'code' : code, 'description' : desc } )\n return returnVal", "def list():\n\n return cache.codeTableList()", "def getArtistsofArtwork(catalog, codes):\n return model.getArtistname(catalog,codes)", "def list(self):\n return 'Decks available: \\n{}'.format(\"\\n\".join([\n 'Deck {}: {} ({} cards)'.format(deck['id'], deck['title'], len(deck['cards']))\n for key, deck in self.decks.items()\n ]))", "def consult_books(self, bar_code: str):\n try:\n book_data = []\n self.db.cursor.execute('SELECT * from books WHERE bar_code = %s', (bar_code,))\n for i in self.db.cursor.fetchall():\n book_data.append(i)\n except Exception as error:\n print(error)\n else:\n print(f\"ID BOOK: {book_data[0][0]}\\n\"\n f\"TITLE: {book_data[0][1]}\\n\"\n f\"AUTHOR: {book_data[0][2]}\\n\"\n f\"PRICE: R$:{book_data[0][3]}\\n\"\n f\"BAR CODE: {book_data[0][4]}\\n\"\n f\"STOCK: {book_data[0][5]}\")", "def hs_code_process(si):\n hs_code = re.sub(r'\\W+', '', si.get('hs_code', ''))\n descrip = re.sub(r'\\W+', '', si.get('description_of_goods', ''))\n bl_type = re.sub(r'\\W+', '', si.get('bl_type', ''))\n hs_codes = []\n if hs_code != '' and hs_code in descrip:\n hs_raw = si.pop('hs_code')\n for raw_line in hs_raw.split('\\n'):\n line = re.sub(r'\\W+', '', raw_line).upper()\n if 'HSCODE' in line:\n remain = line.replace('HSCODE', '').replace('\\n', '')\n remain = re.sub(r'[A-Z]+', '', remain)\n if remain.isdigit() and len(remain) > 4:\n hs_codes.append(remain)\n else:\n # CODE in line below\n hs_line_no = hs_raw.split('\\n').index(raw_line)\n for hs_line in hs_raw.split('\\n')[hs_line_no + 1:]:\n if len(re.findall(r'[a-zA-Z]+', hs_line)) < 1:\n for hs_code in re.findall(r'\\d+', hs_line):\n hs_codes.append(hs_code)\n else:\n break\n\n bl_type = si.get('bl_type', '')\n\n elif hs_code != '' and hs_code in bl_type:\n hs_raw = si.pop('hs_code')\n for raw_info in hs_raw.split('/'):\n info = re.sub(r'\\W+', '', raw_info).upper()\n if 'HSCODE' in info:\n hs_code = info.replace('HSCODE', '').replace('\\n', '')\n hs_code = re.sub(r'[A-Z]+', '', hs_code)\n hs_codes.append(hs_code)\n break\n bl_type = hs_raw.split('/')[0]\n\n else:\n hs_code = re.sub(r'[^\\d]+', '', hs_code)\n hs_codes.append(hs_code)\n bl_type = si.get('bl_type', '')\n\n return hs_codes, bl_type", "def getNamesFromDescr(descr):\n i = getIter(descr)\n if not i:\n return\n\n try:\n item = i.next()\n while item:\n if isinstance(item[1], str):\n yield item[0]\n else:\n l = []\n for j in getNamesFromDescr(item[1]):\n l.append(j)\n r = (item[0], l)\n yield r\n item = i.next()\n except StopIteration:\n pass", "def codes(self):\n return [card.code for card in self.cards]", "def get_snippet_info_list(self):\n snippets = []\n for snippet in self.snippets:\n snippets.append([snippet[\"title\"], snippet[\"description\"]])\n return snippets", "def product_db() -> List[Text]:\n\n return [\n \"credit\",\n \"forex\",\n \"debit\",\n \"atm\"\n ]", "def run(self):\n logging.debug('List Available Recipes')\n if self.short:\n print(' '.join(pakit.recipe.RDB.names(desc=False)))\n return\n\n available = ['Program Description']\n available.extend(pakit.recipe.RDB.names(desc=True))\n\n msg = 'Available Recipes:'\n msg += PREFIX + PREFIX.join(available)\n print(msg)\n return msg", "def list():\n\n click.secho('List of libraries in SJSU-Dev2\\n', fg='white', bold=True)\n package_registry = GetListOfSJSUDev2Repos()\n library_list = [f'{x : <20}: {package_registry[x]}'\n for x in package_registry if x.startswith('lib')]\n print('\\n'.join(library_list))", "def codelists():\n return CodelistSet()", "def extract_promocodes(self):\n promocode_description = self.text\n\n sentences: list = self._split_by_sentences(promocode_description)\n\n sentence_with_promocode = promocode_description # no needed\n\n promocodes = ()\n\n for sentence in sentences:\n if any(keyword in sentence.lower()\n for keyword in (\"промокод\", \"купон\", \"промо-код\", )):\n\n sentence_with_promocode = sentence\n\n promocodes: list = \\\n self.get_promocodes(sentence_with_promocode,\n parser_constants.instagram_patterns)\n if promocodes:\n break\n # TODO:\n # make probabilities and do not break\n # continue iter by senteces and search4 promo in every\n # after that (we know that here is 1 promo)\n # we can choose the most suitable coupon\n\n for p in promocodes:\n if p and len(p) >= 3:\n promocode = p\n if self.is_valid_promocode_morph_check(promocode):\n break\n else:\n return []\n\n if any(forbidden_promocode in promocode.lower()\n for forbidden_promocode in\n parser_constants.forbidden_promocodes):\n\n return []\n\n expiration_date = self.parse_date(promocode_description)\n\n for key in parser_constants.replacement_table.keys():\n promocode_description = \\\n promocode_description.replace(\n key, parser_constants.replacement_table[key]\n )\n\n return [data_classes.Promocode(\n coupon=promocode,\n promoCodeDescription=promocode_description,\n estimated_date=expiration_date,\n source=self.source\n )]", "def get_list_html(self, items):\n html = \"\"\"\n <html>\n\t\t\t<head>\n\t\t\t\t<title>OpenFDA Cool App</title>\n\t\t\t</head>\n\t\t\t<body>\n <ol>\n \"\"\"\n\n for item in items:\n html += \"<li>\" + item + \"</li>\\n\"\n\n html += \"\"\"\n </ol>\n\t\t\t</body>\n </html>\n \"\"\"\n\n return html", "def get_pcode_list(self) -> List[str]:\n return self.pcodes" ]
[ "0.65685076", "0.566973", "0.5336407", "0.5295758", "0.51715976", "0.51711005", "0.5148782", "0.5139008", "0.508993", "0.5061929", "0.5032067", "0.49966037", "0.49956048", "0.4957243", "0.4957215", "0.4956653", "0.49447757", "0.49301252", "0.4907753", "0.49050713", "0.4902797", "0.48980734", "0.4886963", "0.4886797", "0.48835567", "0.48771304", "0.48710713", "0.48617464", "0.4855102", "0.4849284" ]
0.7398652
0
Creates new dataframe with invoices lines issued from given parameters. Once done, the new dataframe is aggregated with original one.
def create_customer_df_invoice_line(self, customerID, list_stockCode\ , list_quantity, invoiceDate): dict_invoice = dict() dict_invoice['Quantity'] = list_quantity dict_invoice['StockCode'] = list_stockCode #------------------------------------------------------------------------ # Build invoiceDate from local current time #------------------------------------------------------------------------ if invoiceDate is None: time_struct = time.localtime() invoiceDate = str(time_struct.tm_year)+'-'+str(time_struct.tm_mon)\ +'-'+str(time_struct.tm_mday) invoiceDate +=' ' invoiceDate +=str(time_struct.tm_hour)+':'+str(time_struct.tm_min)\ +':'+str(time_struct.tm_sec) invoiceDate = pd.Timestamp(invoiceDate) else: pass #------------------------------------------------------------------------ # Lists initialization #------------------------------------------------------------------------ list_customerID = list() list_invoiceNo = list() list_invoiceDate = list() list_invoice_line_index = list() #------------------------------------------------------------------------ # Increase Invoice number #------------------------------------------------------------------------ invoiceNo = max(self._df_invoice_original.InvoiceNo) invoiceNo += 1 #------------------------------------------------------------------------ # Get latest invoice line index value #------------------------------------------------------------------------ invoice_line_index = max(self._df_invoice_original.index) #------------------------------------------------------------------------ # Build lists for CustomerID, InvoiceNo, InvoiceDate # A list of incremented indexes is built for new rows. #------------------------------------------------------------------------ for quantity in list_quantity: list_customerID.append(customerID) list_invoiceNo.append(invoiceNo) list_invoiceDate.append(invoiceDate) invoice_line_index += 1 list_invoice_line_index.append(invoice_line_index) dict_invoice['CustomerID'] = list_customerID dict_invoice['InvoiceNo'] = list_invoiceNo dict_invoice['InvoiceDate'] = list_invoiceDate #------------------------------------------------------------------------ # Get description list from list of stock codes. #------------------------------------------------------------------------ list_description = self.getDescriptionList(list_stockCode) dict_invoice['Description'] = list_description #------------------------------------------------------------------------ # Get unit price list from list of stock codes. #------------------------------------------------------------------------ list_unitPrice = self.getUnitPriceList(list_stockCode) dict_invoice['UnitPrice'] = list_unitPrice #------------------------------------------------------------------------ # Dataframe with new invoices lines is created. #------------------------------------------------------------------------ df_invoice_line \ = pd.DataFrame(dict_invoice, columns=dict_invoice.keys()\ , index=list_invoice_line_index) return df_invoice_line
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_purchase_invoice(self):\r\n active_id = self.env['purchase.order'].browse(self.env['purchase.order']._context.get('active_ids'))\r\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')]) \r\n active_id_count = 0\r\n active_count = 0\r\n exist_vendor = []; invoice = [];exist_vendors = [];ctx = ();invoice_id = []\r\n for rec in active_id : \r\n po_reference = self.env['account.invoice'].search([('origin', 'like', rec.name)])\r\n active_count = len(active_id)\r\n if rec.picking_count >= 1 and rec.picking_count != rec.invoice_count:\r\n len_name = [] \r\n for inv in po_reference: \r\n len_name = inv.origin.split(\":\") \r\n if rec.name in len_name:\r\n if po_reference.state == 'draft':\r\n for record in po_reference.invoice_line_ids:\r\n print (record.line_id)\r\n for res in rec.order_line:\r\n if res.id == record.line_id: \r\n record.write({'quantity':res.qty_received})\r\n res.write({'qty_invoiced':record.quantity})\r\n \r\n else:\r\n \r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received \r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':datetime.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n else:\r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':date.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n invoices = []\r\n invoice_counts = 0\r\n for record in invoice:\r\n invoice_id = self.env['account.invoice'].create(record)\r\n invoices.append(invoice_id.id)\r\n invoice_counts = len(invoices)\r\n if active_id_count == 1:\r\n if invoice_counts == 1:\r\n form_view = self.env.ref('purchase.view_invoice_supplier_purchase_form').id\r\n tree_view = self.env.ref('account.invoice_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'res_id':invoices[0],\r\n 'views_id':False,\r\n 'views':[(form_view , 'form'), (tree_view , 'tree')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n } \r\n else: \r\n form_view = self.env.ref('account.invoice_supplier_form').id\r\n tree_view = self.env.ref('account.invoice_supplier_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'views_id':True,\r\n 'views':[(tree_view , 'tree'), (form_view , 'form')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n }", "def _prepare_invoice_lines(self, exchange_line, order_line):\n invoice_type = {\n 'sale.order.line': {\n 'higher': 'out_invoice', 'lower': 'out_refund',\n 'type': 'sale', 'field': 'exchange_sale_line_id'\n },\n 'purchase.order.line': {\n 'higher': 'in_invoice', 'lower': 'in_refund',\n 'type': 'purchase', 'field': 'exchange_purchase_line_id'\n },\n }\n product = exchange_line.exchange_product_id or exchange_line.product_id\n data = {\n 'invoice_type': False,\n 'values': {\n 'product_id': product.id,\n 'quantity': exchange_line.quantity,\n 'name': 'Exchange for [%s]' % exchange_line.product_id.display_name,\n }\n }\n if exchange_line.exchange_product_id or \\\n exchange_line.price_subtotal > order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['higher']\n elif exchange_line.price_subtotal < order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['lower']\n else:\n return {}\n data[invoice_type[order_line._name]['type']] = order_line.order_id\n data['values'][invoice_type[order_line._name]['field']] = order_line.id\n data['values']['price_unit'] = exchange_line.price_unit\n # TODO i think we should take the different between prices NOT the all price\n # abs(exchange_line.price_unit - order_line.price_unit)\n return data", "def make_invoices(self):\n for invoice in self.policy.invoices:\n db.session.delete(invoice)\n db.session.commit()\n\n billing_schedules = {'Annual': None, 'Semi-Annual': 3, 'Quarterly': 4, 'Monthly': 12}\n\n invoices = []\n first_invoice = Invoice(self.policy.id,\n self.policy.effective_date, # bill_date\n self.policy.effective_date + relativedelta(months=1), # due\n self.policy.effective_date + relativedelta(months=1, days=14), # cancel\n self.policy.annual_premium)\n invoices.append(first_invoice)\n\n if self.policy.billing_schedule == \"Annual\":\n pass\n elif self.policy.billing_schedule == \"Two-Pay\":\n first_invoice.amount_due = first_invoice.amount_due / billing_schedules.get(self.policy.billing_schedule)\n for i in range(1, billing_schedules.get(self.policy.billing_schedule)):\n months_after_eff_date = i*6\n bill_date = self.policy.effective_date + relativedelta(months=months_after_eff_date)\n invoice = Invoice(self.policy.id,\n bill_date,\n bill_date + relativedelta(months=1),\n bill_date + relativedelta(months=1, days=14),\n self.policy.annual_premium / billing_schedules.get(self.policy.billing_schedule))\n invoices.append(invoice)\n elif self.policy.billing_schedule == \"Quarterly\":\n first_invoice.amount_due = first_invoice.amount_due / billing_schedules.get(self.policy.billing_schedule)\n for i in range(1, billing_schedules.get(self.policy.billing_schedule)):\n months_after_eff_date = i*3\n bill_date = self.policy.effective_date + relativedelta(months=months_after_eff_date)\n invoice = Invoice(self.policy.id,\n bill_date,\n bill_date + relativedelta(months=1),\n bill_date + relativedelta(months=1, days=14),\n self.policy.annual_premium / billing_schedules.get(self.policy.billing_schedule))\n invoices.append(invoice)\n elif self.policy.billing_schedule == \"Monthly\":\n first_invoice.amount_due = first_invoice.amount_due / billing_schedules.get(self.policy.billing_schedule)\n for i in range(1, billing_schedules.get(self.policy.billing_schedule)):\n months_after_eff_date = i\n bill_date = self.policy.effective_date + relativedelta(months=months_after_eff_date)\n invoice = Invoice(self.policy.id,\n bill_date,\n bill_date + relativedelta(months=1),\n bill_date + relativedelta(months=1, days=14),\n self.policy.annual_premium / billing_schedules.get(self.policy.billing_schedule))\n invoices.append(invoice)\n else:\n print \"You have chosen a bad billing schedule.\"\n\n logger.info(str(len(invoices)) + \" invoices generated for policy %s\" % self.policy.id)\n\n for invoice in invoices:\n db.session.add(invoice)\n db.session.commit()", "def _create_invoice(self):\n self.ensure_one()\n partner = self.member_id.partner_id\n invoice = self.env['account.invoice'].create({\n 'partner_id': partner.id,\n 'account_id': partner.property_account_receivable_id.id,\n 'fiscal_position_id': partner.property_account_position_id.id\n })\n for line in self.line_ids:\n product = line.activity_id.product_id\n # Handling of invoice lines : needs cache record for onchange, then\n # real writing...\n invoice_line = self.env['account.invoice.line'].new({\n 'product_id': product.id,\n 'invoice_id': invoice.id\n })\n invoice_line._onchange_product_id()\n line_values = dict(invoice_line._cache)\n line_values['price_unit'] = line.price\n invoice_line = self.env['account.invoice.line'].create(line_values)\n invoice.compute_taxes()\n line.registration_id.invoice_line_id = invoice_line.id\n return invoice", "def data_process_deprecated(self, CustomerID, InvoiceDate, InvoiceNo, Description, Quantity\\\n , UnitPrice ):\n dict_invoice = {'InvoiceDate':InvoiceDate, 'Description':Description\\\n , 'Quantity':Quantity, 'UnitPrice':UnitPrice}\n dict_invoice['CustomerID'] = CustomerID\n dict_invoice['InvoiceNo'] = InvoiceNo\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys(), index=[0])\n \n self.data_transform(df_invoice_line)\n\n #self.feature_rfm_encode()\n\n self.feature_scale()\n\n self.list_feature_drop()\n\n self.feature_description_nlp()\n return", "def invoice_items(self,org_id=None,query={}):\n if org_id is None:\n org_id = self.org_id\n query_end_date = datetime.strptime(query['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n # Given a 'query_end_date' to find the invoice containing the\n # line items for that date we need to find the invoice which \n # has 'endDate' equal to the end of the month of the `query_end_date`\n query_first_next_month = query_end_date + relativedelta(months=+1) - relativedelta(days=(query_end_date.day-1))\n target_invoices = []\n invoices = self.invoices(org_id)\n if self.verbose:\n print('Searching invoices org_id={}'.format(org_id))\n print('query={} query_end_date={}'.format(query,query_end_date))\n print('Result keys: {}'.format( invoices['content'].keys() ))\n print('Total result count: {}'.format( invoices['content']['totalCount'] ))\n for invoice in invoices['content']['results']:\n #pprint.pprint(invoice)\n end_date = datetime.strptime(invoice['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if self.verbose: \n print('invoice({})[\\'endDate\\']={} end_date={}'.format(invoice['id'],invoice['endDate'],end_date))\n if end_date == query_first_next_month:\n target_invoices.append(invoice)\n \n if self.verbose: \n print('Target invoices: {}'.format(target_invoices))\n \n\n target_line_items = []\n for invoice in target_invoices:\n invoice_details = self.invoices(org_id,invoice['id']) \n print('invoice_details: {}'.format(invoice_details))\n for item in invoice_details['content']['lineItems']:\n end_date = datetime.strptime(item['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if end_date == query_end_date:\n target_line_items.append(item)\n if self.verbose:\n print('target_line_items: {}'.format(target_line_items)) \n return target_line_items", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_ids = self.pool.get('account.journal').search(cr, uid,\n [('type', '=', 'sale'), ('company_id', '=', order.company_id.id)],\n limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no sales journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_id.property_account_receivable.id,\n 'journal_id': order.partner_id.property_default_sale_invoice_journal.id,\n 'partner_id': order.partner_id.id,\n 'address_invoice_id': order.partner_invoice_id.id,\n #'address_contact_id': order.partner_order_id.id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n\n return invoice_vals", "def get_dataframe() -> pandas.DataFrame:\n database_connection = processing.establish_connection(database_path)\n dataframe = database_connection.to_dataframe(['CustomerId', 'InvoiceDate', 'Total'], table_name)\n database_connection.close()\n dataframe = processing.get_invoice_date_fixed(dataframe)\n analyze_dataframe = dataframe.copy()\n total_sum_dataframe = processing.get_column_sum(analyze_dataframe)\n\n customer_count_dataframe = processing.drop_duplicates(analyze_dataframe)\n customer_count_dataframe = processing.get_column_count(customer_count_dataframe)\n return customer_count_dataframe.merge(total_sum_dataframe, how='inner', on='InvoiceDate')", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_id = self.pool['account.invoice'].default_get(cr, uid, ['journal_id'], context=context)['journal_id']\n if not journal_id:\n raise osv.except_osv(_('Error!'),\n _('Please define sales journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_invoice_id.property_account_receivable.id,\n 'partner_id': order.partner_invoice_id.id,\n 'journal_id': journal_id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_invoice_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False,\n 'section_id' : order.section_id.id,\n 'test_1' :order.test\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n return invoice_vals", "def make_claim_df(claim_list, columns = ['Sl','Name of Bank','Name of Branch','A/C Number (15 digit)','A/C Title','Amount of Remittance in BDT','Date of A/C Credit','Remittance Received through BEFTN/RTGS','Name of Remittance Collecting/BEFTN Processing Bank','Date of Claim']):\n sl=[]\n nrbc_bank = []\n branch = []\n ac_no = []\n ac_title = []\n amount=[]\n date_account_credit=[]\n channel = []\n other_bank=[]\n claim_date=[]\n i=1\n for claim in claim_list:\n sl.append(i)\n i=i+1\n nrbc_bank.append(\"NRBC Bank Ltd.\")\n branch.append(claim.branch.name.upper())\n ac_no.append(claim.account_no)\n ac_title.append(claim.account_title)\n amount.append(claim.remittance_amount)\n date_account_credit.append(claim.date_account_credit)\n channel.append(claim.get_channel_display())\n other_bank.append(claim.collecting_bank.name)\n claim_date.append(claim.date_claim.date())\n dc = {\n 'SL':sl,\n 'Name of Bank':nrbc_bank,\n 'Name of Branch': branch,\n 'A/C Number': ac_no,\n 'A/C Title': ac_title,\n 'Amount of Remittance in BDT': amount,\n 'Date of A/C Credit': date_account_credit,\n 'Remittance Received Through BEFTN/RTGS': channel,\n 'Name of Remittance Processing Bank': other_bank,\n 'Date of Claim': claim_date\n }\n df = pd.DataFrame(dc)\n return df.sort_values(by=['Name of Remittance Processing Bank',])", "def order_process(self, customerID, list_stockCode, list_quantity\\\n , orderDate=None):\n\n segmentID = -1\n\n #-------------------------------------------------------------------------\n # A new customer is created and inserted into data-set.\n #-------------------------------------------------------------------------\n if customerID is None:\n customerID = int(self.createCustomerID())\n else:\n pass\n \n #-------------------------------------------------------------------------\n # A new dataframe with new invoice lines are created.\n #-------------------------------------------------------------------------\n df_invoice_line = self.create_customer_df_invoice_line(customerID\\\n , list_stockCode, list_quantity, orderDate)\n \n #-------------------------------------------------------------------------\n # Original dataframe is updated with customer invoices lines.\n #-------------------------------------------------------------------------\n print(\"order_process : shape before concat= \"+str(self._df_invoice_original.shape))\n self._df_invoice_original \\\n = pd.concat([self._df_invoice_original, df_invoice_line], axis=0)\n print(\"order_process : shape after concat= \"+str(self._df_invoice_original.shape))\n \n #-------------------------------------------------------------------------\n # All invoices lines (including new one) related to customer is retrieved \n # from original dataframe.\n #-------------------------------------------------------------------------\n df_invoice_line_customer \\\n = self.get_customer_history_df_invoice_line(customerID)\n\n #-------------------------------------------------------------------------\n # When calling get_customer_marketSegment(), df_invoice_line_customer is\n # concatened to the original dataframe.\n #------------------------------------------------------------------------- \n segmentID = self.get_customer_marketSegment(df_invoice_line_customer)\n \n return segmentID, customerID", "def create_invoice(self):\n for line in self:\n # if not line.account_id:\n # raise UserError(_('Please Add the incoming Account !!'))\n self.ensure_one()\n journal_id = self.env['account.journal'].search([\n ('type', '=', 'sale')], limit=1)\n inv_line_main = {\n 'name': line.description.name,\n 'price_unit': line.amount or 0.00,\n 'quantity': 1,\n 'discount': line.discount,\n 'account_id': line.description.property_account_income_id.id or line.description.categ_id.property_account_income_categ_id.id or False,\n }\n inv_values = {\n 'partner_id': line.patient_id.partner_id.id,\n 'patient_id': line.patient_id.id,\n 'dentist': line.dentist.id,\n 'move_type': 'out_invoice',\n 'invoice_date': datetime.now().strftime(DF) or False,\n 'journal_id': journal_id and journal_id.id or False,\n 'teeth_id': line.patient_id and line.patient_id.id or False,\n }\n acc_id = self.env['account.move'].create(inv_values)\n acc_id.write({'invoice_line_ids': [(0, 0, inv_line_main)]})\n\n self.write({'invc_id': acc_id.id, 'inv': True})\n context = dict(self._context or {})\n wiz_form_id = self.env['ir.model.data'].get_object_reference(\n 'account', 'view_move_form')[1]\n\n return {\n 'view_type': 'form',\n 'view_id': wiz_form_id,\n 'view_mode': 'form',\n 'res_model': 'account.move',\n 'res_id': self.invc_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': context,\n }", "def action_create_invoices(self, data):\n invoice_obj = self.env['account.invoice']\n values = {}\n for val in data:\n values.setdefault(val['invoice_type'], {\n 'order': val.get('sale', val.get('purchase')),\n 'values': []\n })\n values[val['invoice_type']]['values'].append((0, 0, val['values']))\n\n for inv_type, inv_data in values.items():\n invoice = invoice_obj.new(self._prepare_invoice(inv_type))\n invoice._onchange_partner_id()\n inv = invoice._convert_to_write({\n name: invoice[name] for name in invoice._cache\n })\n for _, _, line in inv_data['values']:\n line['account_id'] = inv['account_id']\n inv['invoice_line_ids'] = inv_data['values']\n new_invoice = invoice_obj.sudo().create(inv)\n new_invoice.action_invoice_open()\n inv_data['order'].write({\n 'exchange_invoice_ids': [(4, new_invoice.id)]\n })", "def _get_lines(self, cr, uid, ids, context=None):\n List=[]\n if ids:\n line = self.pool.get('payment.enrich.lines').browse(cr, uid, ids[0], context=context)\n \n record = line.enrich_id\n val = 0.0\n for line in record.enrich_lines:\n if line.state == 'done' :\n val += line.cost\n res = {\n 'paid_amount':val,\n 'residual_amount':record.amount - val,\n }\n record.write(res)\n return List", "def data_transform(self, df) :\n\n #-------------------------------------------------------------------------\n # Copy of given dataframe to be transformed\n #-------------------------------------------------------------------------\n self.df_invoice_line = df\n \n #-------------------------------------------------------------------------\n # Features issued from InvoiceDate are created\n #-------------------------------------------------------------------------\n if self.is_transform_timeFeature is True:\n self.strprint(\"\\n*** Time features transformation ***\")\n self.data_transform_timeFeature()\n\n #-------------------------------------------------------------------------\n # RFM is computed and encoded\n #-------------------------------------------------------------------------\n if self.is_transform_rfm is True:\n self.strprint(\"\\n*** RFM transformation ***\")\n self.data_transform_rfm()\n\n #-------------------------------------------------------------------------\n # NLP features issued from Description are created\n #-------------------------------------------------------------------------\n if self.is_transform_nlp is True:\n self.strprint(\"\\n*** NLP transformation ***\")\n self.data_transform_nlp()\n \n return self.df_invoice_line", "def create_invoices(self, cr, uid, ids, context=None):\n invoice_list = []\n po_obj = self.pool.get('purchase.order')\n inv_line_obj = self.pool.get('account.invoice.line')\n inv_obj = self.pool.get('account.invoice')\n addr_obj = self.pool.get('res.partner')\n journal_obj = self.pool.get('account.journal')\n if context is None:\n context = {}\n\n for purchase_adv_obj in self.browse(cr, uid, ids, context=context):\n for purchase_order in po_obj.browse(cr, uid, context.get('active_ids', []), context=context):\n inv_line_ids = []\n invoice_ids = []\n val = inv_line_obj.product_id_change(cr, uid, [], purchase_adv_obj.product_id.id,\n uom_id=False, partner_id=purchase_order.partner_id.id, fposition_id=purchase_order.fiscal_position.id)\n line_id = inv_line_obj.create(cr, uid, {\n 'name': val['value']['name'],\n 'account_id': val['value']['account_id'],\n 'price_unit': purchase_adv_obj.amount,\n 'quantity': purchase_adv_obj.qtty,\n 'discount': False,\n 'uos_id': val['value']['uos_id'],\n 'product_id': purchase_adv_obj.product_id.id,\n 'invoice_line_tax_id': [(6, 0, val['value']['invoice_line_tax_id'])],\n })\n inv_line_ids.append(line_id)\n addr = addr_obj.address_get(cr, uid, [purchase_order.partner_id.id], ['invoice'])\n journal_ids = journal_obj.search(cr, uid, [('type', '=', 'purchase')])\n context.update({'type':'in_invoice','journal_type':'purchase'})\n inv_vals = {\n 'name': purchase_order.partner_ref or purchase_order.name,\n 'origin': purchase_order.name,\n 'type': 'in_invoice',\n 'reference': False,\n 'account_id': purchase_order.partner_id.property_account_payable.id,\n 'journal_id':journal_ids and journal_ids[0] or False,\n 'partner_id': purchase_order.partner_id.id,\n 'address_invoice_id': addr['invoice'],\n 'invoice_line': [(6, 0, inv_line_ids)],\n 'currency_id': purchase_order.pricelist_id.currency_id.id,\n 'comment': '',\n 'payment_term': purchase_order.payment_term_id and purchase_order.payment_term_id.id or False,\n 'fiscal_position': purchase_order.fiscal_position.id or purchase_order.partner_id.property_account_position.id,\n 'prepaid': True\n }\n\n inv_id = inv_obj.create(cr, uid, inv_vals, context=context)\n inv_obj.button_reset_taxes(cr, uid, [inv_id], context=context)\n for invoice in purchase_order.invoice_ids:\n invoice_ids.append(invoice.id)\n invoice_ids.append(inv_id)\n po_obj.write(cr, uid, purchase_order.id, {'invoice_ids': [(6, 0, invoice_ids)]})\n invoice_list.append(inv_id)\n\n if purchase_order.invoice_method in ('picking','order'):\n self.pool.get('purchase.order.line').create(cr, uid, {\n 'order_id': purchase_order.id,\n 'name': val['value']['name'],\n 'date_planned':purchase_order.date_order,\n 'price_unit': -purchase_adv_obj.amount,\n 'product_uom_qty': purchase_adv_obj.qtty,\n 'product_uos': val['value']['uos_id'],\n 'product_uom': val['value']['uos_id'],\n 'product_id': purchase_adv_obj.product_id.id,\n 'adavance_product':True,\n 'discount': False,\n 'taxes_id': [(6, 0, val['value']['invoice_line_tax_id'])],\n }, context=context)\n\n\n context.update({'invoice_id':invoice_list})\n return {\n 'name': 'Open Invoice',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'purchase.open.invoice',\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n 'context': context\n }", "def prepare_report(user, from_date, to_date,\n show_which=\"worked\", # \"worked\", \"invoiced\", or \"paid\"\n client_ids=[], project_ids=[]):\n\n if show_which == \"worked\":\n sessions = Session.objects.filter(\n project__client__user=user,\n date__gte=from_date,\n date__lte=to_date\n )\n elif show_which == \"invoiced\":\n sessions = Session.objects.filter(\n project__client__user=user,\n invoice__invoice_date__gte=from_date,\n invoice__invoice_date__lte=to_date\n )\n elif show_which == \"paid\":\n sessions = Session.objects.filter(\n project__client__user=user,\n invoice__paid_date__gte=from_date,\n invoice__paid_date__lte=to_date\n )\n else:\n raise ValueError(\"Invalid value for the 'show_which' argument \"\n \"supplied\")\n\n if client_ids != []:\n sessions = sessions.filter(\n project__client__in=client_ids\n )\n if project_ids != []:\n sessions = sessions.filter(\n project__in=project_ids\n )\n\n # Starting Python 3.6, the dict maintains order as inserted\n # When running this on a different computer with older Python,\n # the sessions_per_date was all jumbled-up.\n # https://stackoverflow.com/questions/1867861/dictionaries-how-to-keep-keys-values-in-same-order-as-declared\n date_range = pd.date_range(from_date, to_date).date\n sessions_per_date = {today: sessions.filter(date=today)\n for today in date_range}\n\n total_earned = sum([sesh.get_money_earned() for sesh in sessions])\n\n context = {\n 'sessions': sessions, # obsolete if sessions_per_date will work\n 'from': from_date,\n 'to': to_date,\n 'date_range': date_range,\n 'sessions_per_date': sessions_per_date,\n 'total_earned': total_earned,\n }\n\n if client_ids != []:\n context['clients'] = Client.objects.filter(pk__in=client_ids)\n\n if project_ids != []:\n context['projects'] = Project.objects.filter(pk__in=project_ids)\n\n return context", "def generate_eob(\n self, date_of_service, date_of_eob, insured, invoice_id, cpt_code, charge_amount\n ):\n if insured == \"insured\":\n # first copayments\n copay_amount = np.random.choice(\n self.distributions[\"copay_amounts\"],\n 1,\n p=self.distributions[\"copay_distribution\"],\n )[0]\n if copay_amount > 0:\n invoicelineitemeob = pd.DataFrame(\n {\n \"invoice_id\": [invoice_id],\n \"cpt_code\": [cpt_code],\n \"created_on\": [date_of_service],\n \"copay_amount\": [copay_amount],\n \"adjustment_amount\": [0],\n \"paid_amount\": [0],\n }\n )\n self.InvoiceLineItemsEob = self.InvoiceLineItemsEob.append(\n invoicelineitemeob\n )\n remaining_charge = charge_amount - copay_amount\n else:\n remaining_charge = charge_amount\n # next eob discounts\n eob_discount_percent = np.random.choice(\n self.distributions[\"eob_discount_percentages\"],\n 1,\n p=self.distributions[\"eob_discount_distribution\"],\n )[0]\n if eob_discount_percent > 0:\n insurance_adjustment = remaining_charge * eob_discount_percent / 100\n remaining_charge = remaining_charge - insurance_adjustment\n invoicelineitemeob = pd.DataFrame(\n {\n \"invoice_id\": [invoice_id],\n \"cpt_code\": [cpt_code],\n \"created_on\": [date_of_eob],\n \"copay_amount\": [0],\n \"adjustment_amount\": [insurance_adjustment],\n \"paid_amount\": [0],\n }\n )\n self.InvoiceLineItemsEob = self.InvoiceLineItemsEob.append(\n invoicelineitemeob\n )\n # next handle eob payments where relevant\n eob_payment_percentage = np.random.choice(\n self.distributions[\"eob_payment_percentages\"],\n 1,\n p=self.distributions[\"eob_payment_distribution\"],\n )[0]\n eob_payment_amount = remaining_charge * (eob_payment_percentage / 100.0)\n if eob_payment_amount > 0:\n invoicelineitemeob = pd.DataFrame(\n {\n \"invoice_id\": [invoice_id],\n \"cpt_code\": [cpt_code],\n \"created_on\": [date_of_eob],\n \"copay_amount\": [0],\n \"adjustment_amount\": [0],\n \"paid_amount\": [eob_payment_amount],\n }\n )\n self.InvoiceLineItemsEob = self.InvoiceLineItemsEob.append(\n invoicelineitemeob\n )\n remaining_charge = remaining_charge - eob_payment_amount\n else:\n remaining_charge = charge_amount\n return remaining_charge", "def action_invoice_create(self, grouped=False, final=False):\n if self.invoice_option == 'before_delivery':\n inv_obj = self.env['account.invoice']\n for order in self:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.create(inv_data)\n for inv_line in order.order_line:\n inv_line.invoice_line_create(invoice.id, inv_line.product_uom_qty)\n\n else:\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n invoices_origin = {}\n invoices_name = {}\n\n # Keep track of the sequences of the lines\n # To keep lines under their section\n inv_line_sequence = 0\n for order in self:\n group_key = order.id if grouped else (order.partner_invoice_id.id, order.currency_id.id)\n\n # We only want to create sections that have at least one invoiceable line\n pending_section = None\n\n # Create lines in batch to avoid performance problems\n line_vals_list = []\n # sequence is the natural order of order_lines\n for line in order.order_line:\n if line.display_type == 'line_section':\n pending_section = line\n continue\n if float_is_zero(line.qty_to_invoice, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoices_origin[group_key] = [invoice.origin]\n invoices_name[group_key] = [invoice.name]\n elif group_key in invoices:\n if order.name not in invoices_origin[group_key]:\n invoices_origin[group_key].append(order.name)\n if order.client_order_ref and order.client_order_ref not in invoices_name[group_key]:\n invoices_name[group_key].append(order.client_order_ref)\n\n if line.qty_to_invoice > 0 or (line.qty_to_invoice < 0 and final):\n if pending_section:\n section_invoice = pending_section.invoice_line_create_vals(\n invoices[group_key].id,\n pending_section.qty_to_invoice\n )\n inv_line_sequence += 1\n section_invoice[0]['sequence'] = inv_line_sequence\n line_vals_list.extend(section_invoice)\n pending_section = None\n\n inv_line_sequence += 1\n inv_line = line.invoice_line_create_vals(\n invoices[group_key].id, line.qty_to_invoice\n )\n inv_line[0]['sequence'] = inv_line_sequence\n line_vals_list.extend(inv_line)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n\n self.env['account.invoice.line'].create(line_vals_list)\n\n for group_key in invoices:\n invoices[group_key].write({'name': ', '.join(invoices_name[group_key]),\n 'origin': ', '.join(invoices_origin[group_key])})\n sale_orders = references[invoices[group_key]]\n if len(sale_orders) == 1:\n invoices[group_key].reference = sale_orders.reference\n\n if not invoices:\n raise UserError(_(\n 'There is no invoiceable line. If a product has a Delivered quantities invoicing policy, please make sure that a quantity has been delivered.'))\n\n for invoice in invoices.values():\n invoice.compute_taxes()\n if not invoice.invoice_line_ids:\n raise UserError(_(\n 'There is no invoiceable line. If a product has a Delivered quantities invoicing policy, please make sure that a quantity has been delivered.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_total < 0:\n invoice.type = 'out_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Use additional field helper function (for account extensions)\n for line in invoice.invoice_line_ids:\n line._set_additional_fields(invoice)\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n # Idem for partner\n so_payment_term_id = invoice.payment_term_id.id\n fp_invoice = invoice.fiscal_position_id\n invoice._onchange_partner_id()\n invoice.fiscal_position_id = fp_invoice\n # To keep the payment terms set on the SO\n invoice.payment_term_id = so_payment_term_id\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]", "def _get_query(self, type, date_from=False, date_to=False, users=None, products=None):\n # TODO: Revisar def _create_invoice(self, order, so_line, amount):...\n # so.user_id AS id_salesman\n # AND so.user_id IN (%s)\n # AND pp.id IN (%s)\n # GROUP BY salesman\n\n if type == 'most_sold':\n sql = \"\"\"\n SELECT min(sol.id) AS id, \n so.user_id AS salesman, \n sol.product_id AS product,\n AVG(sol.price_reduce_taxexcl) AS price, \n pp.product_tmpl_id AS product_template,\n so.company_id AS company,\n SUM(sol.product_uom_qty) AS qty,\n SUM(sol.price_subtotal) AS subtotal\n FROM sale_order_line sol\n LEFT JOIN sale_order so ON so.id = sol.order_id\n LEFT JOIN product_product pp ON pp.id = sol.product_id\n LEFT JOIN product_template pt ON pt.id = pp.product_tmpl_id\n WHERE so.state NOT IN ('draft', 'sent', 'cancel')\n AND so.date_order BETWEEN '%s' AND '%s'\n AND so.user_id IN (%s)\n AND pp.id IN (%s)\n GROUP BY salesman, sol.product_id, pp.product_tmpl_id, so.company_id\n ORDER BY qty DESC;\n \"\"\" % (date_from, date_to, ', '.join(str(u) for u in users), ', '.join(str(p) for p in products))\n else:\n sql = \"\"\" \n \"\"\"\n self.env.cr.execute(sql)\n return self.env.cr.dictfetchall()", "def get_customer_history_df_invoice_line(self, customerID):\n df_invoice_line \\\n = self._df_invoice_original[self._df_invoice_original.CustomerID \\\n == customerID]\n return df_invoice_line", "def _prepare_invoice_line(self, inv_id):\n res = {}\n account_id = self.product_id.property_account_income_id.id or self.product_id.categ_id.property_account_income_categ_id.id\n if not account_id:\n raise UserError(_('Please define income account for this product: \"%s\" (id:%d).') % \\\n (self.product_id.name, self.product_id.id,))\n price_unit = self.product_id.lst_price\n res = {\n 'invoice_id': inv_id.id,\n 'name': self.name,\n 'origin': self.order_id.name,\n 'account_id': account_id,\n 'uom_id': self.product_uom_id.id,\n 'quantity': self.product_uom_qty,\n 'price_unit': price_unit,\n 'product_id': self.product_id.id,\n 'invoice_line_tax_id': False,\n 'order_line_id': self.id\n }\n return res", "def action_invoice_create(self, grouped=False, final=False):\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n invoices_origin = {}\n invoices_name = {}\n for order in self:\n group_key = order.id if grouped else (order.partner_id.id, order.currency_id.id)\n for line in order.order_line.sorted(key=lambda l: l.qty_received - l.qty_invoiced < 0):\n if float_is_zero(line.qty_received - line.qty_invoiced, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoices_origin[group_key] = [invoice.origin]\n invoices_name[group_key] = [invoice.name]\n elif group_key in invoices:\n if order.name not in invoices_origin[group_key]:\n invoices_origin[group_key].append(order.name)\n if order.partner_ref and order.partner_ref not in invoices_name[group_key]:\n invoices_name[group_key].append(order.partner_ref)\n\n if line.qty_received - line.qty_invoiced > 0:\n line.invoice_line_create(invoices[group_key].id, line.qty_received - line.qty_invoiced)\n elif line.qty_received - line.qty_invoiced < 0 and final:\n line.invoice_line_create(invoices[group_key].id, line.qty_received - line.qty_invoiced)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n\n for group_key in invoices:\n invoices[group_key].write({'name': ', '.join(invoices_name[group_key]),\n 'origin': ', '.join(invoices_origin[group_key])})\n\n if not invoices:\n raise UserError(_('There is no invoicable line.'))\n\n for invoice in invoices.values():\n if not invoice.invoice_line_ids:\n raise UserError(_('There is no invoicable line.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_total < 0:\n invoice.type = 'in_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]", "def action_invoice_create(self, grouped=False, final=False):\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].sudo().precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n for order in self:\n group_key = order.id if grouped else (order.partner_invoice_id.id, order.currency_id.id)\n for line in order.order_line.sorted(key=lambda l: l.qty_to_invoice < 0):\n if float_is_zero(line.qty_to_invoice, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.sudo().create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoice['sale_order_id'] = order.id\n elif group_key in invoices:\n vals = {}\n if order.name not in invoices[group_key].origin.split(', '):\n vals['origin'] = invoices[group_key].origin + ', ' + order.name\n if order.client_order_ref and order.client_order_ref not in invoices[group_key].name.split(\n ', ') and order.client_order_ref != invoices[group_key].name:\n vals['name'] = invoices[group_key].name + ', ' + order.client_order_ref\n invoices[group_key].sudo().write(vals)\n if line.qty_to_invoice > 0:\n line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)\n elif line.qty_to_invoice < 0 and final:\n line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n if not invoices:\n raise UserError(_('There is no invoiceable line.'))\n for invoice in invoices.values():\n if not invoice.invoice_line_ids:\n raise UserError(_('There is no invoiceable line.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_untaxed < 0:\n invoice.type = 'out_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Use additional field helper function (for account extensions)\n for line in invoice.invoice_line_ids:\n line._set_additional_fields(invoice)\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]", "def merge_invoice(self, cr, uid, invoices, context=None):\n order_ids = []\n pick_ids = []\n if len(invoices) <= 1:\n return False\n parent = self.pool.get('account.invoice').browse(cr, uid, context['active_id'])\n for inv in invoices:\n if parent.partner_id != inv.partner_id:\n raise osv.except_osv(_(\"Partners don't match!\"), _(\"Can not merge invoice(s) on different partners or states !.\"))\n\n if inv.state != 'draft':\n raise osv.except_osv(_(\"Invalid action !\"), _(\"You can merge only invoices in draft state.\"))\n\n # Merge invoices that are in draft state\n inv_line_obj = self.pool.get('account.invoice.line')\n name = parent.name\n comment = parent.comment\n origin = parent.origin\n for inv in invoices:\n if inv.id == parent.id:\n continue\n\n # check if a line with the same product already exist. if so add quantity. else hang up invoice line to first invoice head.\n if inv.name:\n # Find if the same name already exist, if yes, skip to add.\n name_list = name.replace(' ', '').split(',')\n if inv.name not in name_list:\n name += ', %s' % inv.name\n if inv.comment:\n comment = comment and comment + ', %s' % inv.comment or inv.comment\n if inv.origin:\n origin += ', %s' % inv.origin\n line_ids = inv_line_obj.search(cr, uid, [('invoice_id', '=', inv.id)])\n for inv_lin in inv_line_obj.browse(cr, uid, line_ids):\n mrg_pdt_ids = inv_line_obj.search(cr, uid, [('invoice_id', '=', parent.id), ('product_id', '=', inv_lin.product_id.id),\n ('uos_id', '=', inv_lin.uos_id.id), ('price_unit', '=', inv_lin.price_unit) # kittiu: extra condition, unit price must also be the same.\n ])\n if len(mrg_pdt_ids) == 1 and inv.type == parent.type: # product found --> add quantity\n inv_line_obj.write(cr, uid, mrg_pdt_ids, {'quantity': inv_line_obj._can_merge_quantity(cr, uid, mrg_pdt_ids[0], inv_lin.id)})\n inv_line_obj.unlink(cr, uid, inv_lin.id)\n elif inv.type == parent.type:\n inv_line_obj.write(cr, uid, inv_lin.id, {'invoice_id': parent.id})\n else:\n inv_line_obj.write(cr, uid, inv_lin.id, {'invoice_id': parent.id, 'quantity': -inv_lin.quantity})\n\n if inv.sale_order_ids:\n order_ids += [order.id for order in inv.sale_order_ids]\n if inv.picking_ids:\n pick_ids += [picking.id for picking in inv.picking_ids]\n\n self.write(cr, uid, parent.id, {'origin': origin, 'name': name, 'comment': comment})\n\n #Remove By DRB\n #cr.execute('update sale_order_invoice_rel set invoice_id = %s where invoice_id = %s', (parent.id, inv.id))\n #cr.execute('update picking_invoice_rel set invoice_id = %s where invoice_id = %s', (parent.id, inv.id))\n\n self.unlink(cr, uid, [inv.id])\n #Distinct List\n order_ids = list(set(order_ids))\n pick_ids = list(set(pick_ids))\n\n self.write(cr, uid, parent.id, {'sale_order_ids': [(6, 0, order_ids)], 'picking_ids': [(6, 0, pick_ids)]})\n self.button_reset_taxes(cr, uid, [parent.id])\n return parent.id", "def invoices(self):\r\n return Invoices(self)", "def generate_report(df, start_date, end_date):\n # Remove any transactions that had to do with collecting or returning security\n security_df = df[(df[CATEGORY] == 'Security') | (df[CATEGORY] == 'Security-Income')]\n df = df[(df[CATEGORY] != 'Security')]\n\n # Exclude the data for everything except our quarter\n period_data = df.loc[start_date:end_date] # Note: NOT using extended quarter range\n rental_income = period_data[period_data[CATEGORY] == 'Rent']\n utilities = period_data[(period_data[CATEGORY] == 'Utilities')]\n other_income = period_data[(period_data['Transaction Type'] == 'credit') & (period_data[CATEGORY] != 'Rent')]\n expenses = period_data[(period_data['Transaction Type'] == 'debit')]\n unpaid_util_overages = float(0)\n\n # print(rental_income)\n # print(other_income)\n # print(expenses)\n \n html_config.initialize()\n print(html_config.HTML_OPEN)\n\n print('<H1>Income and Expense Report for %s-%s:' % (start_date, end_date), '</H1><p>')\n\n # List all unit specific rents and expenses for the quarter\n for UNIT in sorted(rental_income['Unit'].unique()):\n # Show rental income info\n temp_df = rental_income[rental_income['Unit'] == UNIT]\n print('<br><H2>Total rent for Unit ', UNIT, ': ${:,.2f}'.format(temp_df['Amount'].sum()), '</H2>')\n print(temp_df[['Description', 'Amount']].to_html())\n \n if not SKIP_UTIL_ANALYSIS:\n # Show utilities payments and calculate any overage due\n temp_df = utilities[(utilities['Unit'] == UNIT) & (utilities['Transaction Type'] == 'debit')]\n print('<br><H2>Utilities Expenses for Unit', UNIT, ': ${:,.2f}'.format(temp_df['Amount'].sum()), '</h2>')\n overage = temp_df.assign(Overage=lambda x: x.Amount - limit_df.loc[UNIT].Amount)\n # Disable warning when setting negative overage values to zero\n pd.set_option('mode.chained_assignment', None)\n overage.Overage[overage.Overage < 0] = 0\n pd.set_option('mode.chained_assignment', 'warn')\n print(overage[['Description', 'Amount', 'Overage']].to_html())\n # Show if utilties costs exceeded allotted amount\n if (overage['Overage'].sum() > 0):\n print('<h3>Utilities overage for unit', UNIT, ': ${:,.2f}'.format(overage['Overage'].sum()), '</h3>')\n unpaid_util_overages += overage['Overage'].sum()\n # Show any untilities that were collected \n overage_collected = utilities[(utilities['Unit'] == UNIT) & (utilities['Transaction Type'] == 'credit')]\n if not overage_collected.empty:\n print('<br><H2>Util overages collected for Unit ', UNIT, ': ${:,.2f}'.format(overage_collected['Amount'].sum()), '</H2>')\n print(overage_collected[['Description', 'Amount', CATEGORY]].to_html())\n print('<h3>Net Utils overage for unit', UNIT, 'for period: ${:,.2f}'.format(overage['Overage'].sum() - overage_collected['Amount'].sum()), '</h3>')\n \n\n \n # Generate unit specific Utility usage reports\n if GEN_TENANT_UTIL_REPORTS and OUTPUT_DIRECTORY:\n TENANT_FILE = '%s/122-Spring-St-%s-%s-Unit-%s-utils.html' % (OUTPUT_DIRECTORY, start_date, end_date, UNIT)\n TENANT_REPORTS.append(TENANT_FILE)\n sys.stdout = open(TENANT_FILE, 'w')\n print(html_config.HTML_OPEN)\n\n print('<H1>Unit', UNIT, '</H1>')\n print('<br><H2>Utilities Expenses for: %s-%s' % (start_date, end_date))\n print('<br>Utilites included in rent: ${:,.2f}'.format(limit_df.loc[UNIT].Amount))\n print('</H2>')\n print(overage[['Description', 'Amount', 'Overage']].to_html())\n # Show if any utilties overage may be due\n if (overage['Overage'].sum() > 0):\n print('<h3>Utilities overage for unit', UNIT, ': ${:,.2f}'.format(overage['Overage'].sum()), '</h3>')\n if not overage_collected.empty:\n print('<br><H2>Util overages collected for Unit ', UNIT, ': ${:,.2f}'.format(overage_collected['Amount'].sum()), '</H2>')\n print(overage_collected[['Description', 'Amount', CATEGORY]].to_html())\n print('<h3>Net Utils overage for unit', UNIT, 'for period: ${:,.2f}'.format(overage['Overage'].sum() - overage_collected['Amount'].sum()), '</h3>')\n\n print(html_config.HTML_CLOSE)\n\n # Restore stdout to the main report file\n sys.stdout = open(REPORT_FILE, 'a')\n \n # Show other unit specific transactions\n if SKIP_UTIL_ANALYSIS:\n unit_exp = expenses[(expenses['Unit'] == UNIT)]\n unit_income = other_income[other_income['Unit'] == UNIT]\n else:\n unit_exp = expenses[(expenses['Unit'] == UNIT) & (expenses[CATEGORY] != 'Utilities')]\n unit_income = other_income[(other_income['Unit'] == UNIT) & (other_income[CATEGORY] != 'Utilities')]\n \n if not unit_exp.empty:\n print('<br><H2>Other Unit specific expenses for: ', UNIT, ': ${:,.2f}'.format(unit_exp['Amount'].sum()), '</h2>')\n print(unit_exp[['Description', 'Amount', 'Unit', CATEGORY]].to_html())\n print('<p>')\n \n # Show any other unit specific credit\n other_income = other_income[other_income['Unit'] == UNIT]\n if not other_income.empty:\n print('<br><H2>Expense offsets for Unit ', UNIT, ': ${:,.2f}'.format(other_income['Amount'].sum()), '</H2>')\n print(other_income[['Description', 'Amount', CATEGORY]].to_html())\n \n # Add a line between units\n print('<hr>')\n \n # List the shared income and expenses for the quarter\n temp_df = other_income[other_income['Unit'].isnull()]\n if not temp_df.empty:\n print ('<br><H2>Non unit specific income: ${:,.2f}'.format(temp_df['Amount'].sum()), '</h2>')\n print(temp_df[['Description', 'Amount', CATEGORY]].to_html())\n gen_expenses = expenses[expenses['Unit'].isnull()]\n if not gen_expenses.empty:\n print ('<br><H1>Non unit specific expenses</h1>')\n # Get the list of expense categories and generate summary for each\n for category in sorted(gen_expenses[CATEGORY].unique()):\n temp_df = gen_expenses[(gen_expenses[CATEGORY] == category)]\n print ('<br><H2>'+ category +': ${:,.2f}'.format(temp_df['Amount'].sum()), '</h2>')\n print(temp_df[['Description', 'Amount', CATEGORY]].to_html())\n \n # If there were any security transactions in the period give a security report\n if not security_df.loc[start_date:end_date].empty:\n temp_df = security_df.loc[start_date:end_date] \n print('<hr><H2>Security related transactions:</H2>')\n print(temp_df[['Description', 'Amount', 'Transaction Type', 'Unit']].to_html())\n for UNIT in sorted(rental_income['Unit'].unique()):\n unit_df = security_df[security_df['Unit'] == UNIT]\n collected = unit_df[(unit_df['Transaction Type'] == 'credit')]['Amount'].sum()\n returned = unit_df[(unit_df['Transaction Type'] == 'debit')]['Amount'].sum()\n print('<center><H4>Current Liability on Unit '+str(UNIT)+': ${:,.2f}'.format(collected-returned), '</H4></center>')\n \n # # Summarize the periods income and expenses -- old way to be discarded...\n # print('<br><H3>Total income for period: ${:,.2f}'.format(rental_income['Amount'].sum()), '</H3>')\n # print('<H3>Total expense for period: ${:,.2f}'.format(expenses['Amount'].sum() - other_income['Amount'].sum()), '</H3>')\n # print('<H3>Profit for period: ${:,.2f}'.format(rental_income['Amount'].sum() + other_income['Amount'].sum() -expenses['Amount'].sum()), '</H3>')\n # Summarize the periods income and expenses\n print('<br><H3>Total income for period: ${:,.2f}'.format(rental_income['Amount'].sum()))\n print('<br><H3>Total expense for period: ${:,.2f}'.format(expenses['Amount'].sum() - other_income['Amount'].sum()), '</H3>')\n print('<H3>Profit for period: ${:,.2f}'.format(rental_income['Amount'].sum() + other_income['Amount'].sum() -expenses['Amount'].sum()), '</H3>')\n# print('</H3>')\n \n print(html_config.HTML_CLOSE)\n sys.stdout.flush()", "def _prepare_invoice_grp(self, cr, uid, order, line_ids, context=None):\n if context is None:\n context = {}\n context = dict(context)\n\n inv_data = super(grp_orden_compra, self)._prepare_invoice_grp(cr, uid, order, line_ids, context=context)\n\n # adicionando campos numero compromiso y no obligacion desde la OC\n monto_oc = math.floor(order.total_llavep or 0)\n monto_oc = int(monto_oc)\n inv_data.update({'nro_compromiso': order.nro_compromiso or False, 'monto_comprometido': monto_oc or 0, 'currency_id':order.currency_oc.id})\n\n # adicionando campos no afectacion y monto autorizado desde la primera APG\n if order.pc_apg_id:\n first_apg = order.pc_apg_id\n monto_apg = math.floor(first_apg.total_llavep)\n monto_apg = int(monto_apg)\n # TODO R SPRING X ADICIONANDO CABEZALES SIIF A LA FACTURA A PARTIR DE LA APG\n inv_data.update({'nro_afectacion': first_apg.nro_afectacion_siif or False,\n 'monto_afectado': monto_apg or 0,\n 'siif_tipo_ejecucion':first_apg.siif_tipo_ejecucion.id,\n 'siif_concepto_gasto':first_apg.siif_concepto_gasto.id,\n 'siif_financiamiento':first_apg.siif_financiamiento.id,\n 'siif_codigo_sir':first_apg.siif_codigo_sir.id,\n 'siif_nro_fondo_rot':first_apg.siif_nro_fondo_rot.id,\n }) # cambiando nro_afectacion 23/10\n # inv.update({'nro_afectacion': first_apg.nro_afectacion_apg or False, 'monto_afectado': monto_apg or 0})\n\n # # TODO R SPRING X NO LLEVAR LAS LLAVES PRESUPUESTALES POR DEFECTO\n # if order.pc_apg_id.llpapg_ids:\n # llavep_ids = []\n # for llavep in order.pc_apg_id.llpapg_ids:\n # llavep_ids.append((0, 0, {\n # 'programa_id': llavep.programa_id.id,\n # 'odg_id': llavep.odg_id.id,\n # 'auxiliar_id': llavep.auxiliar_id.id,\n # 'disponible': llavep.disponible,\n # 'proyecto_id': llavep.proyecto_id.id,\n # 'fin_id': llavep.fin_id.id,\n # 'mon_id': llavep.mon_id.id,\n # 'tc_id': llavep.tc_id.id,\n # 'importe': llavep.importe\n # }))\n # inv_data.update({'llpapg_ids': llavep_ids})\n\n return inv_data", "def _prepare_invoice_line(self, qty):\n self.ensure_one()\n res = {\n 'name': self.name,\n 'sequence': self.sequence,\n 'origin': self.order_id.name,\n 'account_id': self.product_id.product_tmpl_id._get_product_accounts()['stock_input'].id,\n 'price_unit': self.price_unit,\n 'quantity': qty,\n 'uom_id': self.product_uom.id,\n 'product_id': self.product_id.id or False,\n 'invoice_line_tax_ids': [(6, 0, self.taxes_id.ids)],\n 'account_analytic_id': self.account_analytic_id.id,\n 'analytic_tag_ids': [(6, 0, self.analytic_tag_ids.ids)],\n }\n return res", "def invoice(self,context,params):\n url = f\"https://api.freshbooks.com/accounting/account/{params['account_id']}/invoices/invoices/{params['invoice_id']}\"\n result = json.loads(util.rest(\"GET\", url, {}, context[\"headers\"][\"access_token\"]).text)\n invoice = result[\"response\"][\"result\"][\"invoice\"]\n invoice_obj = FreshbooksInvoice(\n account_id=invoice['accountid'],\n customerid=invoice['customerid'], \n invoice_id=invoice['invoiceid'],\n currency_code=invoice['currency_code'],\n language=invoice['language'],\n terms=invoice['terms'],\n discount_value=invoice['discount_value'],\n discount_amount=invoice['discount_total']['amount'],\n invoice_number=invoice['invoice_number'],\n po_number=invoice['po_number'],\n amount=invoice['amount']['amount'],\n code=invoice['amount']['code'],\n create_date=invoice['create_date']\n )\n return invoice_obj.__dict__" ]
[ "0.58933586", "0.58273345", "0.5787141", "0.5730418", "0.57022715", "0.56592727", "0.5587117", "0.5582993", "0.5557562", "0.55526274", "0.55505604", "0.55257595", "0.5520605", "0.54978883", "0.5488627", "0.5473216", "0.5465187", "0.54585433", "0.54323196", "0.53706306", "0.5359521", "0.5348314", "0.53445005", "0.5337536", "0.5308676", "0.52830005", "0.52545595", "0.5254459", "0.5245022", "0.5233254" ]
0.61920017
0
Returns a dataframe with all invoice lines from customerID given as parameter.
def get_customer_history_df_invoice_line(self, customerID): df_invoice_line \ = self._df_invoice_original[self._df_invoice_original.CustomerID \ == customerID] return df_invoice_line
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_customer_df_invoice_line(self, customerID, list_stockCode\\\n , list_quantity, invoiceDate):\n \n dict_invoice = dict()\n\n dict_invoice['Quantity'] = list_quantity\n dict_invoice['StockCode'] = list_stockCode\n\n #------------------------------------------------------------------------\n # Build invoiceDate from local current time\n #------------------------------------------------------------------------\n if invoiceDate is None:\n time_struct = time.localtime()\n invoiceDate = str(time_struct.tm_year)+'-'+str(time_struct.tm_mon)\\\n +'-'+str(time_struct.tm_mday)\n invoiceDate +=' '\n invoiceDate +=str(time_struct.tm_hour)+':'+str(time_struct.tm_min)\\\n +':'+str(time_struct.tm_sec)\n invoiceDate = pd.Timestamp(invoiceDate)\n else:\n pass\n\n\n #------------------------------------------------------------------------\n # Lists initialization\n #------------------------------------------------------------------------\n list_customerID = list()\n list_invoiceNo = list()\n list_invoiceDate = list()\n list_invoice_line_index = list()\n \n #------------------------------------------------------------------------\n # Increase Invoice number\n #------------------------------------------------------------------------\n invoiceNo = max(self._df_invoice_original.InvoiceNo)\n invoiceNo += 1\n\n #------------------------------------------------------------------------\n # Get latest invoice line index value\n #------------------------------------------------------------------------\n invoice_line_index = max(self._df_invoice_original.index)\n\n #------------------------------------------------------------------------\n # Build lists for CustomerID, InvoiceNo, InvoiceDate\n # A list of incremented indexes is built for new rows.\n #------------------------------------------------------------------------\n for quantity in list_quantity:\n list_customerID.append(customerID)\n list_invoiceNo.append(invoiceNo)\n list_invoiceDate.append(invoiceDate)\n invoice_line_index += 1\n list_invoice_line_index.append(invoice_line_index) \n\n \n dict_invoice['CustomerID'] = list_customerID\n dict_invoice['InvoiceNo'] = list_invoiceNo\n dict_invoice['InvoiceDate'] = list_invoiceDate\n\n #------------------------------------------------------------------------\n # Get description list from list of stock codes.\n #------------------------------------------------------------------------\n list_description = self.getDescriptionList(list_stockCode)\n \n dict_invoice['Description'] = list_description\n\n #------------------------------------------------------------------------\n # Get unit price list from list of stock codes.\n #------------------------------------------------------------------------\n list_unitPrice = self.getUnitPriceList(list_stockCode)\n \n dict_invoice['UnitPrice'] = list_unitPrice\n\n #------------------------------------------------------------------------\n # Dataframe with new invoices lines is created.\n #------------------------------------------------------------------------\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys()\\\n , index=list_invoice_line_index)\n \n return df_invoice_line", "def invoice(customer_id):\n encoder = request.url_rule.endpoint\n template = \"{{ encoder }}#{{ customer_id|%s }}\" % encoder\n return render_template_string(template, **locals())", "def get_dataframe() -> pandas.DataFrame:\n database_connection = processing.establish_connection(database_path)\n dataframe = database_connection.to_dataframe(['CustomerId', 'InvoiceDate', 'Total'], table_name)\n database_connection.close()\n dataframe = processing.get_invoice_date_fixed(dataframe)\n analyze_dataframe = dataframe.copy()\n total_sum_dataframe = processing.get_column_sum(analyze_dataframe)\n\n customer_count_dataframe = processing.drop_duplicates(analyze_dataframe)\n customer_count_dataframe = processing.get_column_count(customer_count_dataframe)\n return customer_count_dataframe.merge(total_sum_dataframe, how='inner', on='InvoiceDate')", "def order_process(self, customerID, list_stockCode, list_quantity\\\n , orderDate=None):\n\n segmentID = -1\n\n #-------------------------------------------------------------------------\n # A new customer is created and inserted into data-set.\n #-------------------------------------------------------------------------\n if customerID is None:\n customerID = int(self.createCustomerID())\n else:\n pass\n \n #-------------------------------------------------------------------------\n # A new dataframe with new invoice lines are created.\n #-------------------------------------------------------------------------\n df_invoice_line = self.create_customer_df_invoice_line(customerID\\\n , list_stockCode, list_quantity, orderDate)\n \n #-------------------------------------------------------------------------\n # Original dataframe is updated with customer invoices lines.\n #-------------------------------------------------------------------------\n print(\"order_process : shape before concat= \"+str(self._df_invoice_original.shape))\n self._df_invoice_original \\\n = pd.concat([self._df_invoice_original, df_invoice_line], axis=0)\n print(\"order_process : shape after concat= \"+str(self._df_invoice_original.shape))\n \n #-------------------------------------------------------------------------\n # All invoices lines (including new one) related to customer is retrieved \n # from original dataframe.\n #-------------------------------------------------------------------------\n df_invoice_line_customer \\\n = self.get_customer_history_df_invoice_line(customerID)\n\n #-------------------------------------------------------------------------\n # When calling get_customer_marketSegment(), df_invoice_line_customer is\n # concatened to the original dataframe.\n #------------------------------------------------------------------------- \n segmentID = self.get_customer_marketSegment(df_invoice_line_customer)\n \n return segmentID, customerID", "def get_rows(contract_address: str, file_name: str, receipts_filename: str) -> pd.DataFrame():\n receipts_df = pd.read_csv(receipts_filename)\n receipts_df = receipts_df.sort_values(['block_number', 'transaction_index'], ignore_index=True)\n df = pd.read_csv(file_name)\n df = df.sort_values(['block_number', 'transaction_index'], ignore_index=True)\n\n df = df.loc[receipts_df['status'] == 1] \n df = df.loc[df[\"to_address\"] == contract_address.lower()]\n df = df.reset_index()\n df = df.drop(columns='index')\n return df", "def invoices(self,org_id=None,invoice_id=''):\n if org_id is None:\n org_id = self.org_id\n return self.get('{}/orgs/{}/invoices/{}'.format(ApiVersion.A1.value,org_id,invoice_id))", "def data_process_deprecated(self, CustomerID, InvoiceDate, InvoiceNo, Description, Quantity\\\n , UnitPrice ):\n dict_invoice = {'InvoiceDate':InvoiceDate, 'Description':Description\\\n , 'Quantity':Quantity, 'UnitPrice':UnitPrice}\n dict_invoice['CustomerID'] = CustomerID\n dict_invoice['InvoiceNo'] = InvoiceNo\n df_invoice_line \\\n = pd.DataFrame(dict_invoice, columns=dict_invoice.keys(), index=[0])\n \n self.data_transform(df_invoice_line)\n\n #self.feature_rfm_encode()\n\n self.feature_scale()\n\n self.list_feature_drop()\n\n self.feature_description_nlp()\n return", "def return_customer_orders(customer_id):\n with MY_CONNECTION as connection:\n cursor = connection.cursor()\n cursor.execute(\n \"\"\"\n SELECT id_order, id_customer, id_product, quantity, total_price,\n payment_status, send_status, order_date, location\n FROM Orders\n Where id_customer=?\n \"\"\",\n (customer_id,))\n return cursor.fetchall()", "def invoice(self, id):\r\n return Invoice(self, id)", "def get_all_customer_ids():\n table = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_all_customer_ids_from_table(table)", "def get_all_sales_ids_for_customer_ids():\n\n # your code", "def invoices(self):\r\n return Invoices(self)", "def invoices(self):\r\n return inv.Invoices(self)", "def invoice(self,context,params):\n url = f\"https://api.freshbooks.com/accounting/account/{params['account_id']}/invoices/invoices/{params['invoice_id']}\"\n result = json.loads(util.rest(\"GET\", url, {}, context[\"headers\"][\"access_token\"]).text)\n invoice = result[\"response\"][\"result\"][\"invoice\"]\n invoice_obj = FreshbooksInvoice(\n account_id=invoice['accountid'],\n customerid=invoice['customerid'], \n invoice_id=invoice['invoiceid'],\n currency_code=invoice['currency_code'],\n language=invoice['language'],\n terms=invoice['terms'],\n discount_value=invoice['discount_value'],\n discount_amount=invoice['discount_total']['amount'],\n invoice_number=invoice['invoice_number'],\n po_number=invoice['po_number'],\n amount=invoice['amount']['amount'],\n code=invoice['amount']['code'],\n create_date=invoice['create_date']\n )\n return invoice_obj.__dict__", "def invoices(self, account_id):\n from pureport_client.commands.accounts.invoices import Command\n return Command(self.client, account_id)", "def invoice_items(self,org_id=None,query={}):\n if org_id is None:\n org_id = self.org_id\n query_end_date = datetime.strptime(query['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n # Given a 'query_end_date' to find the invoice containing the\n # line items for that date we need to find the invoice which \n # has 'endDate' equal to the end of the month of the `query_end_date`\n query_first_next_month = query_end_date + relativedelta(months=+1) - relativedelta(days=(query_end_date.day-1))\n target_invoices = []\n invoices = self.invoices(org_id)\n if self.verbose:\n print('Searching invoices org_id={}'.format(org_id))\n print('query={} query_end_date={}'.format(query,query_end_date))\n print('Result keys: {}'.format( invoices['content'].keys() ))\n print('Total result count: {}'.format( invoices['content']['totalCount'] ))\n for invoice in invoices['content']['results']:\n #pprint.pprint(invoice)\n end_date = datetime.strptime(invoice['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if self.verbose: \n print('invoice({})[\\'endDate\\']={} end_date={}'.format(invoice['id'],invoice['endDate'],end_date))\n if end_date == query_first_next_month:\n target_invoices.append(invoice)\n \n if self.verbose: \n print('Target invoices: {}'.format(target_invoices))\n \n\n target_line_items = []\n for invoice in target_invoices:\n invoice_details = self.invoices(org_id,invoice['id']) \n print('invoice_details: {}'.format(invoice_details))\n for item in invoice_details['content']['lineItems']:\n end_date = datetime.strptime(item['endDate'],'%Y-%m-%dT%H:%M:%SZ')\n if end_date == query_end_date:\n target_line_items.append(item)\n if self.verbose:\n print('target_line_items: {}'.format(target_line_items)) \n return target_line_items", "def invoice(self, invoice_number):\r\n return inv.Invoice(self, invoice_number)", "def get_all_customer_ids():\n\n # your code", "def invoices(self):\r\n return inv.AccountInvoices(self)", "def invoices_in_route(self):\n from invoicing.models import Invoice\n invoices = Invoice.objects.filter(\n route=self.number, print_date__range=(date.today() - timedelta(6), date.today()),\n canceled=False).count()\n return invoices", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_ids = self.pool.get('account.journal').search(cr, uid,\n [('type', '=', 'sale'), ('company_id', '=', order.company_id.id)],\n limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no sales journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_id.property_account_receivable.id,\n 'journal_id': order.partner_id.property_default_sale_invoice_journal.id,\n 'partner_id': order.partner_id.id,\n 'address_invoice_id': order.partner_invoice_id.id,\n #'address_contact_id': order.partner_order_id.id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n\n return invoice_vals", "def get_customer_orders(customerId):\n data = user_obj.get_customer_orders(customerId)\n return data", "def ListInvoices(self, **kwargs):\n return self._stub.ListInvoices(ln.ListInvoiceRequest(**kwargs))", "def get_invoices(self, since, until):\n return self._request('getInvoices', data={\n 'date_from': since.strftime('%d/%m/%Y'),\n 'date_to': until.strftime('%d/%m/%Y')\n })", "def get_all_customer_ids_from_table(table):\n\n # your code", "def _get_invoices_for_payment(cls, account_id: int) -> List[InvoiceModel]:\n valid_statuses = (InvoiceStatus.APPROVED.value, InvoiceStatus.REFUND_REQUESTED.value)\n invoice_ref_subquery = db.session.query(InvoiceReferenceModel.invoice_id). \\\n filter(InvoiceReferenceModel.status_code.in_((InvoiceReferenceStatus.ACTIVE.value,)))\n\n invoices: List[InvoiceModel] = db.session.query(InvoiceModel) \\\n .filter(InvoiceModel.invoice_status_code.in_(valid_statuses)) \\\n .filter(InvoiceModel.payment_method_code == PaymentMethod.EJV.value) \\\n .filter(InvoiceModel.payment_account_id == account_id) \\\n .filter(InvoiceModel.id.notin_(invoice_ref_subquery)) \\\n .all()\n return invoices", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_id = self.pool['account.invoice'].default_get(cr, uid, ['journal_id'], context=context)['journal_id']\n if not journal_id:\n raise osv.except_osv(_('Error!'),\n _('Please define sales journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_invoice_id.property_account_receivable.id,\n 'partner_id': order.partner_invoice_id.id,\n 'journal_id': journal_id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_invoice_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False,\n 'section_id' : order.section_id.id,\n 'test_1' :order.test\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n return invoice_vals", "def _create_invoice(self):\n self.ensure_one()\n partner = self.member_id.partner_id\n invoice = self.env['account.invoice'].create({\n 'partner_id': partner.id,\n 'account_id': partner.property_account_receivable_id.id,\n 'fiscal_position_id': partner.property_account_position_id.id\n })\n for line in self.line_ids:\n product = line.activity_id.product_id\n # Handling of invoice lines : needs cache record for onchange, then\n # real writing...\n invoice_line = self.env['account.invoice.line'].new({\n 'product_id': product.id,\n 'invoice_id': invoice.id\n })\n invoice_line._onchange_product_id()\n line_values = dict(invoice_line._cache)\n line_values['price_unit'] = line.price\n invoice_line = self.env['account.invoice.line'].create(line_values)\n invoice.compute_taxes()\n line.registration_id.invoice_line_id = invoice_line.id\n return invoice", "def _prepare_invoice_line(self, inv_id):\n res = {}\n account_id = self.product_id.property_account_income_id.id or self.product_id.categ_id.property_account_income_categ_id.id\n if not account_id:\n raise UserError(_('Please define income account for this product: \"%s\" (id:%d).') % \\\n (self.product_id.name, self.product_id.id,))\n price_unit = self.product_id.lst_price\n res = {\n 'invoice_id': inv_id.id,\n 'name': self.name,\n 'origin': self.order_id.name,\n 'account_id': account_id,\n 'uom_id': self.product_uom_id.id,\n 'quantity': self.product_uom_qty,\n 'price_unit': price_unit,\n 'product_id': self.product_id.id,\n 'invoice_line_tax_id': False,\n 'order_line_id': self.id\n }\n return res", "def get_listCustomer_out_sample(self, customerCount=10):\n \n if customerCount is None :\n listCustomer= list(self._df_invoice_line_out_sample.CustomerID.unique())\n else:\n if customerCount <= 0 :\n listCustomer \\\n = list(self._df_invoice_line_out_sample.CustomerID.unique())\n else:\n listCustomer \\\n = list(self._df_invoice_line_out_sample.CustomerID.unique()[:customerCount])\n return listCustomer" ]
[ "0.6706535", "0.637564", "0.6069902", "0.59014344", "0.58117783", "0.5807765", "0.57650644", "0.56994027", "0.5648026", "0.5595924", "0.5531552", "0.55192786", "0.54935724", "0.54334897", "0.5429923", "0.5344283", "0.53376174", "0.5330216", "0.532311", "0.5300056", "0.5274764", "0.52722394", "0.5269863", "0.5219844", "0.521598", "0.52118987", "0.5197928", "0.5197842", "0.51852095", "0.5170273" ]
0.76564085
0
Returns a list of customers that have been excluded of data sampling used for building model. By default, 10 customers identifier is returned. If customerCount value is None, or <= 0, then list of all customers that have been excluded of data sampling is returned.
def get_listCustomer_out_sample(self, customerCount=10): if customerCount is None : listCustomer= list(self._df_invoice_line_out_sample.CustomerID.unique()) else: if customerCount <= 0 : listCustomer \ = list(self._df_invoice_line_out_sample.CustomerID.unique()) else: listCustomer \ = list(self._df_invoice_line_out_sample.CustomerID.unique()[:customerCount]) return listCustomer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_customers_count():\n data = user_obj.get_all_customers(\"1\")\n return data", "def pdelements_num_customers(self) -> int:\n return self.dss_obj.PDElementsI(ctypes.c_int32(4), ctypes.c_int32(0))", "def get_all_customer_ids():\n table = data_manager.get_table_from_file(\"sales/sales.csv\")\n return get_all_customer_ids_from_table(table)", "def get_all_customers():\n data = user_obj.get_all_customers()\n return data", "def get_all_customer_ids():\n\n # your code", "def pdelements_total_customers(self) -> int:\n return self.dss_obj.PDElementsI(ctypes.c_int32(5), ctypes.c_int32(0))", "def show_all_customers():\n return cr.show_all_customers()", "def get_all_customers_not_purchasing_next_quarter(self) -> list:\n\n #Check if predictions can be made\n if self._load_model:\n y_labels = self._predict_labels()\n return y_labels[y_labels == 0].index.tolist()\n\n else:\n raise NoTrainedModelError('There is no trained model to make predictions with, please call initialize_purchase_predictor() first or set load_existing_model to True.')", "def get_customers(self):\n self.navigate_to_page()\n customer_list=[]\n while True:\n page_customer = [{\n 'name': self.get_name(customer), \n 'parent':self.get_parent(customer),\n 'active':self.get_active(customer),\n 'link':self.get_details_link(customer)\n } for customer in self.get_page_customers()]\n customer_list = page_customer + customer_list\n if not CustomerGroupsPage.have_next_page(self):\n break\n self.navigate_to_page()\n return customer_list", "def get_customer_list(self):\n return self._customer_repo.get_customer_list()", "def get(self):\n return get_all_customers()", "def get_all_sales_ids_for_customer_ids():\n\n # your code", "def get_page_customers(self):\n return self.driver.find_elements(*CustomerGroupsPage.CUSTOMER_GROUP)", "def users_excludeds(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"users_excludeds\")", "def get_excluded_observations(self):\n\n return copy.deepcopy(self._excluded_observations)", "def list_active_customers():\n return Customer.select().where(Customer.is_active).count()", "def get_customer_count(self):\n return self._df_invoice_original.CustomerID.unique().shape[0]", "def get_unselected_benefits(cls, excluded_benefits):\n benefits = cls.query.filter(cls.id.notin_(excluded_benefits))\n return [benefit.serialize() for benefit in benefits]", "def excluded(cls):\n return []", "def list_active_customers():\n count_query = (Customer\n .select(Customer, fn.COUNT(Customer.name)\n .alias('cust_count'))\n .where(Customer.status == 'active'))\n for count in count_query:\n return count.cust_count", "def generateCustomers(self):\r\n\r\n # Counters\r\n shoppers = 0\r\n models = 0\r\n oldl = 0\r\n oldf = 0\r\n doctor = 0\r\n nudist = 0\r\n hippie = 0\r\n nerd = 0\r\n\r\n for i in range(self.num_of_customers):\r\n\r\n # With these weights, our store has plenty of youngs and olds, but few mids\r\n # Most grocery shoppers come in the evening\r\n # Young people have equal distribution between morning and evening\r\n # etc\r\n age1 = random.randint(18, 28)\r\n age2 = random.randint(28, 50)\r\n age3 = random.randint(50, 85)\r\n weighted_ages = [(age1, 10), (age2, 2), (age3, 15)]\r\n randomAge = [val for val, cnt in weighted_ages for a in range(cnt)]\r\n\r\n hour1 = random.randint(8, 13)\r\n hour2 = random.randint(13, 18)\r\n hour3 = random.randint(18, 22)\r\n weighted_hours = [(hour1, 10), (hour2, 3), (hour3, 20)]\r\n randomHour = [val for val, cnt in weighted_hours for b in range(cnt)]\r\n\r\n age = random.choice(randomAge)\r\n hour = random.choice(randomHour)\r\n gender = random.choice(['M', 'M', 'M', 'M', 'F', 'F', 'F', 'F', 'F', 'F', 'F', 'F'])\r\n\r\n # Base chances, 100 total\r\n gs, sm, hp, ol, nrd, of, sd, nud = 20, 5, 5, 5, 5, 5, 10, 10\r\n\r\n customerID = random.randint(0, self.num_of_customers*2)\r\n while customerID in self.all_customers:\r\n customerID = random.randint(0, self.num_of_customers*2)\r\n\r\n # Weights\r\n if 18 < age < 22:\r\n if gender == 'M':\r\n if 8 <= hour <= 12:\r\n ol, sm, nrd, hp = 2, 2, 35, 20\r\n elif 13 <= hour <= 17:\r\n ol, sm, nrd, hp, gs = 2, 2, 15, 30, 5\r\n elif 18 <= hour <= 22:\r\n ol, sm, gs = 2, 2, 50\r\n\r\n elif gender == 'F':\r\n if 8 <= hour <= 12:\r\n ol, sm, nrd = 5, 35, 15\r\n elif 13 <= hour <= 17:\r\n ol, sm, hp = 5, 30, 30\r\n elif 18 <= hour <= 22:\r\n ol, sm, gs, = 5, 25, 50\r\n\r\n elif gender == 'M' and 22 < age < 29:\r\n if 8 <= hour <= 12:\r\n ol, sm, nrd, hp = 5, 5, 35, 25\r\n elif 13 <= hour <= 17:\r\n ol, sm, nrd, hp = 5, 5, 35, 40\r\n elif 18 <= hour <= 22:\r\n ol, sm, nrd, hp, gs = 5, 5, 20, 20, 50\r\n\r\n elif gender == 'M' and 29 < age < 50:\r\n if 8 <= hour <= 12:\r\n ol, sm, nrd, gs = 5, 5, 40, 30\r\n elif 13 <= hour <= 17:\r\n ol, sm, nrd = 5, 5, 30\r\n elif 18 <= hour <= 22:\r\n ol, sm, gs = 5, 5, 70\r\n\r\n elif gender == 'M' and age > 50:\r\n if 8 <= hour <= 12:\r\n ol, sm, gs, of, hp = 5, 5, 30, 60, 20\r\n elif 13 <= hour <= 17:\r\n ol, sm, gs, of, hp = 5, 5, 15, 70, 20\r\n elif 18 <= hour <= 22:\r\n ol, sm, gs, of, hp = 5, 5, 50, 25, 20\r\n\r\n elif gender == 'F' and 22 < age < 35:\r\n if 8 <= hour <= 12:\r\n ol, sm, hp, gs = 5, 30, 30, 30\r\n elif 13 <= hour <= 17:\r\n ol, sm, hp, gs = 5, 30, 30, 15\r\n elif 18 <= hour <= 22:\r\n ol, sm, hp, gs = 5, 15, 25, 60\r\n\r\n elif gender == 'F' and 35 < age < 55:\r\n if 8 <= hour <= 12:\r\n ol, sm, hp, gs = 5, 5, 5, 40\r\n elif 13 <= hour <= 17:\r\n ol, sm, hp, gs = 25, 5, 5, 25\r\n elif 18 <= hour <= 22:\r\n ol, sm, hp, gs = 30, 5, 5, 40\r\n\r\n elif gender == 'F' and age > 55:\r\n if 8 <= hour <= 12:\r\n ol, sm, of, gs = 20, 5, 15, 30\r\n elif 13 <= hour <= 17:\r\n ol, sm, of, gs = 60, 5, 30, 15\r\n elif 18 <= hour <= 22:\r\n ol, sm, of, gs = 40, 5, 20, 40\r\n\r\n weighted_choices = [('Grocery Shopper', gs), ('Supermodel', sm), ('Hippie', hp), ('Old Lady', ol), ('Nerd', nrd), ('Self Doctor', sd), ('Nudist', nud), ('Old Fart', of)]\r\n randomType = [val for val, cnt in weighted_choices for n in range(cnt)]\r\n\r\n customer = random.choice(randomType)\r\n\r\n if customer == 'Grocery Shopper':\r\n shoppers += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 18\r\n medicalChance = 3\r\n electronicsChance = 1\r\n outdoorsChance = 1\r\n clothingChance = 1\r\n beautyChance = 2\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n elif customer == 'Supermodel':\r\n models += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 0\r\n medicalChance = 5\r\n electronicsChance = 0\r\n outdoorsChance = 0\r\n clothingChance = 10\r\n beautyChance = 13\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n elif customer == 'Hippie':\r\n hippie += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 6\r\n medicalChance = 2\r\n electronicsChance = 1\r\n outdoorsChance = 14\r\n clothingChance = 7\r\n beautyChance = 1\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n elif customer == 'Old Lady':\r\n oldl += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 6\r\n medicalChance = 8\r\n electronicsChance = 0\r\n outdoorsChance = 0\r\n clothingChance = 3\r\n beautyChance = 10\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n elif customer == 'Nerd':\r\n nerd += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 4\r\n medicalChance = 3\r\n electronicsChance = 14\r\n outdoorsChance = 0\r\n clothingChance = 2\r\n beautyChance = 1\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n elif customer == 'Self Doctor':\r\n doctor += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 5\r\n medicalChance = 32\r\n electronicsChance = 4\r\n outdoorsChance = 1\r\n clothingChance = 2\r\n beautyChance = 1\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n elif customer == 'Nudist':\r\n nudist += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 10\r\n medicalChance = 5\r\n electronicsChance = 0\r\n outdoorsChance = 14\r\n clothingChance = 0\r\n beautyChance = 0\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n elif customer == 'Old Fart':\r\n oldf += 1\r\n num_of_purchases = random.randint(0, 20)\r\n foodChance = 10\r\n medicalChance = 18\r\n electronicsChance = 5\r\n outdoorsChance = 3\r\n clothingChance = 3\r\n beautyChance = 0\r\n self.generatePurchases(num_of_purchases, foodChance, medicalChance, electronicsChance, outdoorsChance, clothingChance, beautyChance, customer)\r\n self.all_customers[customerID] = [age, gender, hour, customer, self.customer_purchases]\r\n\r\n itemsBought = (\", \".join(repr(e) for e in self.customer_purchases))\r\n self.c.execute(\"INSERT INTO Customer (CustomerID, Hour, Age, Gender, Items) VALUES (?, ?, ?, ?, ?)\", (customerID, hour, age, gender, itemsBought))\r\n self.conn.commit()\r\n\r\n if self.print_counters:\r\n print(\"\\nShoppers:\", shoppers)\r\n print(\"Models:\", models)\r\n print(\"Old Ladies:\", oldl)\r\n print(\"Old Farts:\", oldf)\r\n print(\"Self doctors:\", doctor)\r\n print(\"Nerds:\", nerd)\r\n print(\"Hippies:\", hippie)\r\n print(\"Nudists:\", nudist)\r\n\r\n if self.print_customers:\r\n print(\"\\nRaw Customer Data: \")\r\n print(self.all_customers)", "def list_active_customers():\n active_customers = 0\n for customer in cm.Customers:\n if customer.status == \"Active \":\n active_customers += 1\n return active_customers", "def getCustomer(self):\n return self.base.get(\"customer\", [])", "def remove_existing_customers(self):\n\n for i in range(len(self.customers)):\n if self.customers[i].is_active() == False:\n self.customers[i]= 'out'\n self.customers = [item for item in self.customers if item!='out' ]", "def display_customers_list():\n selected_path = request.url_rule\n selected_customer_list = get_selected_path(selected_path)\n page_number = 0\n if 'page' in request.args:\n page_number = int(request.args.get('page'))\n customers, pagination = generate_pagination(page_number, selected_customer_list)\n total_number_of_records = get_number_of_records(selected_customer_list)\n return render_template(\"clients.html\", domain_name= DOMAIN_NAME, selected_customer_list=selected_customer_list, customers=customers, pagination=pagination, url_path=selected_path, total_number_of_records=total_number_of_records, phone_error=None)", "def list_active_customers():\n init_database()\n return Customer.select().where(Customer.active_status).count()", "def list_active_customers():\n try:\n active_customer_count = 0\n for _ in Customer.select().where(Customer.status == 'Active'):\n active_customer_count += 1\n logger.info(\n f\"Successfully counted active customers {active_customer_count}\"\n )\n return active_customer_count\n except Exception as unknown_error:\n logger.error(f\"Error. Failed to count customers. {unknown_error}\")\n print(\n f'Error. Not able to count number of active customers.'\n ' {unknown_error}'\n )", "def get_num_of_sales_per_customer_ids():\n\n # your code", "def customer_agents(self):\n return self.get(\"customer_agents\")", "def getCustomersInfo(self):\n rval = None\n tries = 0\n while not rval and tries < 5:\n if tries > 0:\n time.sleep(30)\n rval = self._getService(self.ns_customer,\n self.soapCustomer % (self.ns_customer, \"\",\n \"100\", \"Advertiser\"),\n \"GetCustomersInfo\", '', '',\n self.cwsdl, self.chost)\n tries += 1\n return rval" ]
[ "0.5755343", "0.5630526", "0.5468172", "0.5455088", "0.5447088", "0.53655386", "0.5348006", "0.5255881", "0.52160037", "0.5184504", "0.5118855", "0.51016146", "0.50954854", "0.5094486", "0.50568765", "0.50267607", "0.49917355", "0.49625525", "0.49529928", "0.49519303", "0.49320677", "0.49264386", "0.49157014", "0.4894947", "0.48897344", "0.4889139", "0.48599762", "0.48556495", "0.48475817", "0.48341656" ]
0.7302814
0
Returns number of invoices from original dataset.
def get_invoice_count(self): return self._df_invoice_original.InvoiceNo.unique().shape[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_no_of_invoices(self):\n for record in self:\n record.invoice_count = len(record.invoice_ids)", "def get_invl_count(self):\n return self._df_invoice_original.index.unique().shape[0]", "def invoices(self):\r\n return inv.Invoices(self)", "def get_customer_count(self):\n return self._df_invoice_original.CustomerID.unique().shape[0]", "def expired_invoices_count(self):\n return self.get_expired_invoices().count()", "def getNumOfInvoice(self,id,start,finish):\n self.calls += 1\n invoice = self.getResponse(self.buildParams(id,start,finish))\n if not self.isNumeric(invoice):\n middle = self.diveDates(start,finish)\n plusMiddle = middle + timedelta(days = 1)\n middle = self.removeHours(middle)\n plusMiddle = self.removeHours(plusMiddle)\n invoice = self.getNumOfInvoice(id,start,middle)+\\\n self.getNumOfInvoice(id,plusMiddle,finish)\n return invoice", "def invoices(self):\r\n return inv.AccountInvoices(self)", "def countAndGetCallInvoice(self,id,start,finish):\n self.calls = 0\n return self.getNumOfInvoice(id,start,finish)", "def get_num_of_sales_per_customer_ids():\n\n # your code", "def invoices_in_route(self):\n from invoicing.models import Invoice\n invoices = Invoice.objects.filter(\n route=self.number, print_date__range=(date.today() - timedelta(6), date.today()),\n canceled=False).count()\n return invoices", "def test_total_invoices(self):\n sale = SaleFactory(total_value=1000)\n InvoiceFactory(sale=sale, total_value=50)\n InvoiceFactory(sale=sale, total_value=500)\n self.assertEqual(sale.total_invoices, 550)", "def invoices(self):\r\n return Invoices(self)", "def test_total_invoices_in_cero(self):\n sale = SaleFactory(total_value=100)\n self.assertEqual(sale.total_invoices, 0)", "def _compute_count(self):\n for orders in self:\n orders.count = self.env['account.move'].search_count(\n [('invoice_origin', '=', self.name)])", "def getNoOfRows(self):\n return _patchExtractor.patchExtractor_getNoOfRows(self)", "def num_items(self):\n num_items = 0\n for line in self.lines.all():\n num_items += line.quantity\n return num_items", "def getNumRows(self) -> int:\n ...", "def getInvoice(self):\n return self.base.get(\"invoice\", [])", "def get_entity_contracts_count():\n url = 'http://www.base.gov.pt/base2/rest/contratos?adjudicatariaid=%d' \\\n '&sort(-id)' % entity.base_id\n\n response = requests.get(url, headers={'Range': 'items=0-24'})\n\n results_range = response.headers['content-range']\n _, count = results_range.split('/')\n\n return int(count)", "def row_count(data):\n return int(arcpy.GetCount_management(data).getOutput(0))", "def get_num_rows(self, data, omit_metric=False):\n if omit_metric:\n num_rows = int((len(data.keys())-1)/4)\n else:\n num_rows = int(len(data.keys())/4)\n if len(data.keys())%4 != 0:\n num_rows += 1\n return num_rows", "def invoices(self,org_id=None,invoice_id=''):\n if org_id is None:\n org_id = self.org_id\n return self.get('{}/orgs/{}/invoices/{}'.format(ApiVersion.A1.value,org_id,invoice_id))", "def get_invoices(self, limit=50, closed=False, get_all=False):\n mask = \"mask[invoiceTotalAmount, itemCount]\"\n _filter = {\n 'invoices': {\n 'createDate': {\n 'operation': 'orderBy',\n 'options': [{\n 'name': 'sort',\n 'value': ['DESC']\n }]\n },\n 'statusCode': {'operation': 'OPEN'},\n }\n }\n if closed:\n del _filter['invoices']['statusCode']\n\n return self.client.call('Account', 'getInvoices', mask=mask, filter=_filter, iter=get_all, limit=limit)", "def get_total_item_size(dataset):\n total_items = 0\n for element in dataset:\n total_items += 1\n return total_items", "def invoices(self):\n if self.__invoices_manager is None:\n self.__invoices_manager = TaxRetunsManager(\"/invoices\", self._client)\n return self.__invoices_manager", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def get_invoices(self, since, until):\n return self._request('getInvoices', data={\n 'date_from': since.strftime('%d/%m/%Y'),\n 'date_to': until.strftime('%d/%m/%Y')\n })", "def count(self):\n return self.data_container.count", "def get_data_count(self, collection):\n # Use 'data_count' attribute when available. It is created in the\n # BaseCollectionViewSet class.\n return (\n collection.data_count\n if hasattr(collection, \"data_count\")\n else collection.data.count()\n )", "def data_count(self):\n return(len(self.data))" ]
[ "0.7744239", "0.73818606", "0.63354856", "0.6231307", "0.62079424", "0.6154787", "0.6135494", "0.6101816", "0.606694", "0.6054052", "0.60302943", "0.5938585", "0.5850497", "0.58089167", "0.57880515", "0.56934583", "0.568253", "0.56357646", "0.55606055", "0.55459076", "0.54968005", "0.5485106", "0.5460834", "0.54239196", "0.5421888", "0.54191077", "0.5399579", "0.5378061", "0.5360827", "0.53525543" ]
0.7667302
1
Returns number of customers from original dataset.
def get_customer_count(self): return self._df_invoice_original.CustomerID.unique().shape[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_num_of_sales_per_customer_ids():\n\n # your code", "def get_all_customers_count():\n data = user_obj.get_all_customers(\"1\")\n return data", "def customer_acccounting(customer_orders):", "def pdelements_num_customers(self) -> int:\n return self.dss_obj.PDElementsI(ctypes.c_int32(4), ctypes.c_int32(0))", "def pdelements_total_customers(self) -> int:\n return self.dss_obj.PDElementsI(ctypes.c_int32(5), ctypes.c_int32(0))", "def get_customer_orders_count(customerId):\n data = user_obj.get_customer_orders(customerId,\"1\")\n return data", "def bus_total_customers(self) -> int:\n return self.dss_obj.BUSI(4, 0)", "def get_total_trans(all_customers_data, trans_column):\n return all_customers_data.select(trans_column).distinct().count()", "def list_active_customers():\n try:\n active_customer_count = 0\n for _ in Customer.select().where(Customer.status == 'Active'):\n active_customer_count += 1\n logger.info(\n f\"Successfully counted active customers {active_customer_count}\"\n )\n return active_customer_count\n except Exception as unknown_error:\n logger.error(f\"Error. Failed to count customers. {unknown_error}\")\n print(\n f'Error. Not able to count number of active customers.'\n ' {unknown_error}'\n )", "def list_active_customers():\n try:\n active_customer_count = 0\n for _ in Customer.select().where(Customer.status == 'Active'):\n active_customer_count += 1\n return active_customer_count\n except Exception as unknown_error:\n print(f'Error. Not able to count number of active customers. {unknown_error}')", "def get_invoice_count(self):\n return self._df_invoice_original.InvoiceNo.unique().shape[0]", "def list_active_customers():\n db_customers = Customers.select()\n LOGGER.debug(\"Calculating number of active customers\")\n # Technically used this in Lesson 03, but it is a comprehension. Another method added below.\n number_active = sum([int(x.status) for x in db_customers])\n LOGGER.info(\"There are %d active customers\", number_active)\n\n return number_active", "def bus_interruptions_total_customers(self) -> float:\n return self.dss_obj.BUSF(9, 0)", "def bottom_twenty_customers(data_frame):\n data_frame.loc[:, 'total_payment'] = (data_frame['unit_rental_price']\n * data_frame['quantity_rented']\n * data_frame['rental_period_months'])\n data_set = data_frame.groupby(['customer_id']).agg({'total_payment': 'sum'})\n data_set = data_set.nsmallest(20, 'total_payment')\n return data_set", "def cluster_obs_count(self):\n return(self.merged_data.groupby(\n 'labels').count().transpose().iloc[0, :])", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def get_invl_count(self):\n return self._df_invoice_original.index.unique().shape[0]", "def top_ten_customers(data_frame):\n data_frame.loc[:, 'total_payment'] = (data_frame['unit_rental_price']\n * data_frame['quantity_rented']\n * data_frame['rental_period_months'])\n data_set = data_frame.groupby(['customer_id']).agg({'total_payment': 'sum'})\n data_set = data_set.nlargest(10, 'total_payment')\n return data_set", "def data_center_count(self) -> int:\n return pulumi.get(self, \"data_center_count\")", "def list_active_customers():\n active_customers = 0\n for customer in cm.Customers:\n if customer.status == \"Active \":\n active_customers += 1\n return active_customers", "def all_client_number():\n\n url = CMX_URL + '/api/location/v2/clients/count'\n header = {'content-type': 'application/json', 'accept': 'application/json'}\n response = requests.get(url, headers=header, auth=CMX_AUTH, verify=False)\n response_json = response.json()\n clients_number = response_json['count']\n return clients_number", "def count_target_class_data(data, target_class):\n count = 0\n for row in data:\n if row[0] == target_class:\n count += 1\n\n return count", "def data_count(self):\r\n\r\n shp = self.df.shape\r\n row_count = shp[0]\r\n return row_count", "def get_num_countries():\n num_countries = np.zeros(shape=(len(annual_files), 1))\n \n for year in annual_files:\n df = get_runners_data(year)\n country_count = df['countryCode'].value_counts()\n num_countries[annual_files.index(\n year)] = len(country_count.index)\n return num_countries", "def get_data_count(self, collection):\n # Use 'data_count' attribute when available. It is created in the\n # BaseCollectionViewSet class.\n return (\n collection.data_count\n if hasattr(collection, \"data_count\")\n else collection.data.count()\n )", "def list_active_customers():\n with database.transaction():\n query = (Customer\n .select(fn.COUNT(Customer.status).alias('count'))\n .where(Customer.status == 'Active'))\n LOGGER.info(query)\n\n customer_count = [item.count for item in query]\n LOGGER.info('Number of active customers: %s', customer_count[0])\n\n return customer_count[0]", "def list_active_customers():\n count_query = (Customer\n .select(Customer, fn.COUNT(Customer.name)\n .alias('cust_count'))\n .where(Customer.status == 'active'))\n for count in count_query:\n return count.cust_count", "def carn_count(self):\n return len(self.carnivores)", "def coauthor_count(self):\n return self._json.get('coauthor-count', '0')", "def get_count(cls):\n total = 0\n for counter in SimpleCounterShard.objects.all():\n total += counter.count\n return total" ]
[ "0.7528541", "0.741451", "0.7031075", "0.6915427", "0.68836576", "0.6863681", "0.6667784", "0.616603", "0.61287034", "0.6101881", "0.59332806", "0.5910718", "0.5855194", "0.58472735", "0.58410436", "0.5774803", "0.57345396", "0.57238173", "0.572342", "0.5717885", "0.56658965", "0.56646013", "0.56542027", "0.56331044", "0.55945414", "0.5580822", "0.5579558", "0.55575264", "0.55567914", "0.5548149" ]
0.8029719
0
Returns number of invoice lines (number of rows) from original dataset.
def get_invl_count(self): return self._df_invoice_original.index.unique().shape[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_invoice_count(self):\n return self._df_invoice_original.InvoiceNo.unique().shape[0]", "def getNumRows(self) -> int:\n ...", "def _compute_no_of_invoices(self):\n for record in self:\n record.invoice_count = len(record.invoice_ids)", "def getNoOfRows(self):\n return _patchExtractor.patchExtractor_getNoOfRows(self)", "def num_items(self):\n num_items = 0\n for line in self.lines.all():\n num_items += line.quantity\n return num_items", "def row_count(data):\n return int(arcpy.GetCount_management(data).getOutput(0))", "def __len__(self):\n nlines = self.get_endline() - self.get_startline() + 1\n if nlines < 0:\n nlines = 0\n return nlines", "def getNumRows(self):\n return self.__rows", "def get_table_nb_lines(self, table):\n sql = \"SELECT COUNT(*) FROM \" + table + \";\"\n cur = self._connection.cursor()\n cur.execute(sql)\n res = cur.fetchall()\n cur.close()\n return res[0][0]", "def get_line_count(blob):\n return len(blob.split('\\n'))", "def getRowCount(self) -> int:\n ...", "def n_lines(self):\n try: \n return self._n_lines\n except AttributeError:\n self._n_lines = len(self.lines())\n return self._n_lines", "def len(self, table):\n return self.get_table_nb_lines(table)", "def get_num_rows(self, data, omit_metric=False):\n if omit_metric:\n num_rows = int((len(data.keys())-1)/4)\n else:\n num_rows = int(len(data.keys())/4)\n if len(data.keys())%4 != 0:\n num_rows += 1\n return num_rows", "def get_customer_count(self):\n return self._df_invoice_original.CustomerID.unique().shape[0]", "def get_total_line_counts(self):\n return get_total_line_counts(self.files.all())", "def getNbRows(self):\n return self.data.shape[1]", "def num_lines(self, snapshot: Bug, filepath: str) -> int:\n return len(self._line_offsets(snapshot, filepath))", "def data_count(self):\r\n\r\n shp = self.df.shape\r\n row_count = shp[0]\r\n return row_count", "def cpp_getInRowCount(self, patchNo, rowsInPatch):\n return _patchExtractor.patchExtractor_cpp_getInRowCount(self, patchNo, rowsInPatch)", "def get_row_number(self):\n return int(len(self.data_items)/12)", "def get_rows(self) -> int:\r\n return 1 + self.display.get_rows() + 1", "def NumberOfRows(self):\n return _table.DSTable_NumberOfRows(self)", "def data_len(self):\n Nrows_data = 0\n with self._compression_safe_file_opener(self.input_fname, \"r\") as f:\n for i, l in enumerate(f):\n if (l[0 : len(self.header_char)] != self.header_char) and (l != \"\\n\"):\n Nrows_data += 1\n return Nrows_data", "def _get_line_no_(obj, line):\n \n iNo = 0\n for item in obj.order_line:\n iNo += 1\n if (item.id == line.id):\n break\n \n return iNo", "def countLines(file_name, start, end):\r\n\r\n with open(file_name, \"r\") as file:\r\n counter_lines = 0\r\n\r\n for line in islice(file, start, end):\r\n counter_lines += 1\r\n\r\n return counter_lines", "def linecounter(x):\n return linecount(x) + longlines(x)", "def get_number_lines(running_reward_file, running_loss_file, action_count_file):\n if Path(running_reward_file).exists():\n data = np.loadtxt(running_reward_file).reshape(-1,2)\n return data.shape[0]\n if Path(running_loss_file).exists():\n data = np.loadtxt(running_loss_file).reshape(-1,2)\n return data.shape[0]\n if Path(action_count_file).exists():\n data = np.loadtxt(action_count_file).reshape(-1,2)\n return data.shape[0]\n raise NameError(\"No files to count lines\")", "def linecount(x):\n return sum(1 for char in x if char == \"\\n\")", "def num_data_lines(filepath):\n\tif not file_exists(filepath):\n\t\treturn -1\n\tcount = 0\n\twith open(filepath, 'r') as f:\n\t\twhile read_floats(f):\n\t\t\tcount += 1\n\tf.close()\n\treturn count" ]
[ "0.7554764", "0.69679993", "0.69573605", "0.6792377", "0.6672587", "0.6513209", "0.649585", "0.6461261", "0.63839567", "0.63666666", "0.62937206", "0.62934756", "0.62633395", "0.6255323", "0.62531394", "0.6229491", "0.62243587", "0.62149376", "0.6199522", "0.61816734", "0.6143403", "0.6119764", "0.6068898", "0.6065976", "0.60635984", "0.6019333", "0.60095745", "0.6004078", "0.598484", "0.59634006" ]
0.7293856
1
Returns JSON structure issued form dataframe content given as parameter .
def json_df_builder(self, df, marketID, RFM=None): #------------------------------------------------------------------------- # Extract from dataframe content to be returned #------------------------------------------------------------------------- str_customerID = str(df.CustomerID.unique()[0]) invoice_count = len(df.InvoiceNo.unique()) item_count = df.Quantity.sum() invl_count = df.shape[0] ser_incomes = df.UnitPrice * df.Quantity incomes = ser_incomes.sum() str_incomes = "{0:1.2F}".format(incomes) mean_unit_price = incomes/item_count str_mean_unit_price = "{0:1.2F}".format(mean_unit_price) serInvoiceDate = df.InvoiceDate str_old_date = serInvoiceDate.map(str).min() str_new_date = serInvoiceDate.map(str).max() #------------------------------------------------------------------------- # Build JSON structure form content #------------------------------------------------------------------------- json_result = '{\n' json_result += '\t "_results":[\n' json_result += "{\n" json_result += "\t\t"+" \"customerID\":"+str_customerID+"\n" json_result += "\t\t"+",\"marketID\":"+str(marketID)+"\n" json_result += "\t\t"+",\"invoice_count\":"+str(invoice_count)+"\n" json_result += "\t\t"+",\"item_count\":"+str(item_count)+"\n" json_result += "\t\t"+",\"invl_count\":"+str(invl_count)+"\n" json_result += "\t\t"+",\"mean_unit_price\":"+str_mean_unit_price+"\n" json_result += "\t\t"+",\"incomes\":"+str_incomes+"\n" json_result += "\t\t"+",\"old_date\":"+str_old_date+"\n" json_result += "\t\t"+",\"new_date\":"+str_new_date+"\n" if RFM is not None: json_result += "\t\t"+",\"RFM\":"+RFM+"\n" else: pass json_result += "}\n" json_result += '\n\t]\n}' return json_result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_json(self):\n\t\treturn self._dataframe.reset_index().to_json(orient=\"records\")", "def Mydata():\n\n stmt = db.session.query(Appsdata).statement\n df = pd.read_sql_query(stmt, db.session.bind)\n \n return jsonify(df.to_dict())", "def get_data(dataframe,index=None):\n dflen = len(dataframe)\n if index==None or index <0 or index >= dflen:\n index = randint(0,dflen)\n return dataframe.iloc[index].to_json()", "def json_temp_ges(df):\n\n json_str = (\n df.groupby(\n [\n \"sensor_id\",\n \"measure_name\",\n \"run_id\",\n \"ventilation_rate\",\n \"num_dehumidifiers\",\n \"lighting_shift\",\n \"scenario_type\",\n ],\n as_index=True,\n )\n .apply(\n lambda x: x[\n [\n \"prediction_value\",\n \"prediction_index\",\n \"run_id\",\n \"time\",\n \"timestamp\",\n ]\n ].to_dict(orient=\"records\")\n )\n .reset_index()\n .rename(columns={0: \"Values\"})\n .to_json(orient=\"records\")\n )\n return json_str", "def receive_json_ids(dataframe, jsondata, just_headers = False):\n\n dict_data = ast.literal_eval(jsondata)\n jsondict = {1: 'Kingdom', 2: 'Phylum', 3: 'Class', 4: 'Order', 5: 'Family', 6: 'Genus', 7: 'Species', 8 : 'Strain'}\n # this checks how long the jsondata is and from this it selects the correct Letter out of the jsondict #\n suffix = jsondict[len(dict_data)]\n\n # This selects the data which has the same name as the recieved jsondata\n fw_subset = dataframe[(dataframe[\"fw_\"+ suffix] == dict_data[-1])] \n rv_subset = dataframe[(dataframe[\"rv_\"+suffix] == dict_data[-1])]\n\n # This is only used so that the columns can be easily renamed in something more generic so the append will merge the correct columns\n columns_rename = pd.DataFrame(columns=[\"bitscore\", \"identity\", \"length\"])\n\n # Get the specified data\n fw_sideDf = fw_subset[[\"fw_bit\", \"fw_id\", \"fw_coverage_length\"]]\n rv_sideDf = rv_subset[[\"rv_bit\", \"rv_id\", \"rv_coverage_length\"]]\n\n # Get headers\n fw_headers = fw_subset.index.values.tolist()\n rv_headers = rv_subset.index.values.tolist()\n\n if just_headers:\n return fw_headers, rv_headers\n \n # Rename the columns\n fw_sideDf.columns = columns_rename.columns\n rv_sideDf.columns = columns_rename.columns\n # Combine the two dataframes in one since they have the same column names it will merge completly\n sideDf = fw_sideDf.append(rv_sideDf)\n # Count and group the different entries also convert them into a json\n count_id = sideDf.round(0).groupby(['identity']).size().to_json(orient='table')\n count_bit = sideDf.round(0).groupby(['bitscore']).size().to_json(orient='table')\n count_length = sideDf.round(0).groupby(['length']).size().to_json(orient='table')\n fw_seqs = fw_subset[\"fw_seq\"].tolist()\n rv_seqs = rv_subset[\"rv_seq\"].tolist()\n\n # Get taxonomy id's\n tax_ids = set([*fw_subset.fw_accession.tolist(), *rv_subset.rv_accession.tolist()])\n tax_len = len(tax_ids)\n if tax_len == 0:\n tax_id = \"None\"\n elif tax_len == 1:\n tax_id = list(tax_ids)[0]\n else:\n tax_id = \"More\"\n\n response = {\n \"count_id\":count_id,\n \"count_bit\": count_bit,\n \"count_length\": count_length,\n \"node_name\": dict_data[-1],\n \"tax_id\": str(tax_id),\n \"fw_headers\": fw_headers,\n \"rv_headers\": rv_headers,\n \"fw_seqs\": fw_seqs,\n \"rv_seqs\": rv_seqs\n }\n return jsonify(response)", "def _data_frame(content):\n response = loads(content)\n key = [x for x in response.keys() if x in c.response_data][0]\n frame = DataFrame(response[key])\n final_frame = _convert(frame)\n return final_frame", "def write_jason(df):\n\n\t# set Country as index of dataframe\n\tdf = df.set_index('Country')\n\n\t# write datafram to jason file \n\tdf = df.to_json('eda.json', orient='index')", "async def full_report():\n return DF.to_dict(orient=\"records\")", "def convert_to_json(dataframe):\n dataframe = dataframe.set_index('YYYYMMDD').to_json('schiphol_windstoten.json', orient = 'index')", "def get_data(self)->pd.DataFrame:\n pass", "def _dict(content):\n response = _data_frame(content).to_dict(orient='records')\n return response", "def __parse_json(df):\n\t\tcol_names = ['genres', 'production_companies', 'production_countries', 'cast', 'crew', 'spoken_languages',\n\t\t\t\t\t 'Keywords']\n\t\tvalue_names = ['name', 'name', 'iso_3166_1', 'name', 'name', 'name', 'name']\n\t\tfor col_name, value_name in zip(col_names, value_names):\n\t\t\t# df[col_name] = df[col_name].fillna(\"{}\")\n\t\t\tdf[col_name] = df[col_name].apply(literal_eval_error_handling)\n\t\t\tdf[col_name] = df[col_name].apply(lambda x: [i[value_name] for i in x])\n\t\treturn df", "def toDataFrame(self):\r\n if self.output_format in ('json', 'jsonExtended'):\r\n return json.dumps(self.result)\r\n \r\n elif self.output_format in ('tab2', 'extendedTab2'):\r\n return StringIO('\\t'.join(self.headers) + self.result)", "def pandas_to_njson(df):\n \n #df['secao'] = df['secao'].astype(int)\n \n records = df.to_dict(orient='records')\n json_list = [json.dumps(add_process_date(record), ensure_ascii=False) for record in records]\n njson = '\\n'.join(json_list)\n \n return njson", "def names():\n\n df = pd.read_sql_query(f\"SELECT * FROM olympics_raw\", con = engine)\n print(df.head())\n \n\n # return jsonify(all_olympians)\n return jsonify(df.to_dict(orient='records'))", "def display_raw_data(df):\n raw_data_lenght=df.shape[0]\n #loop through from 0 to number of rows in steps of 5\n for i in range(0,raw_data_lenght,5):\n response=input('\\n Do you want examin a perticular user data? Type \\'yes \\'or \\'no \\'\\n')\n if response.lower()!='yes':\n break\n \n data=df.iloc[i: i+5].to_json(orient='records',lines=True).split('\\n')\n for row in data:\n passed=json.loads(row)\n j_row=json.dumps(passed,indent=3)\n print(j_row)", "def create_data_frame(input_filepath):\n df = pd.read_json(input_filepath)\n logger = logging.getLogger(__name__)\n logger.info('Imported dataframe:')\n logger.info(df.info())\n logger.info(df.describe())\n logger.info(df.head())\n return df", "def to_dataframe(self):\n return df_util.to_dataframe(requests.get(self.__url).json())", "def json2dataframe(data):\n # Load and parse the available streamflow data\n siteId = data['value']['timeSeries'][0]['sourceInfo']['siteCode'][0]['value']\n df = pd.DataFrame(data['value']['timeSeries'][0]['values'][0]['value'])\n df = df.set_index(df['dateTime'], drop=True)\n df['value'] = df['value'].astype('float32')\n df.index = pd.to_datetime(df.index)\n last_available_date = df.index[-1].strftime(\"%Y-%m-%d\")\n return df, siteId, last_available_date", "def converttojson(edge_df):\n\tedge_df_str = edge_df.copy()\n\tfor idx, col in enumerate(edge_df.columns):\n\t\tfirst_row_element = edge_df.iloc[0, idx]\n\t\tif isinstance(first_row_element, list) or isinstance(first_row_element, dict):\n\t\t\tedge_df_str[col] = edge_df[col].apply(json.dumps)\n\t\t\tprint('Field \"{}\" of class {} converted to json string'.format(col, type(first_row_element)))\n\t\t#else:\n\t\t#\tprint(col,type(edge_df[col][0]))\n\treturn edge_df_str", "def json(self) -> CellJson:\n\n return {\"id\": self.id, \"content\": self.content, \"data\": self.data}", "def insert_df_json(conn, table_name: str, df: pd.DataFrame):\n insert_json(conn=conn, table_name=table_name, data=df.reset_index().to_json(orient='records', lines=True))", "def createDataframe(httpData):\n jsonData = json.loads(httpData)\n return pd.json_normalize(jsonData['data'])", "def generate_df(js_dict, naming, value=\"value\"):\n\n values = []\n dimensions, dim_names = get_dimensions(js_dict, naming)\n values = get_values(js_dict, value=value)\n output = pd.DataFrame([category + [values[i]]\n for i, category in\n enumerate(get_df_row(dimensions, naming))])\n output.columns = dim_names + [value]\n output.index = range(0, len(values))\n return output", "def input_fn(request_body, request_content_type):\n if request_content_type == \"application/json\":\n json_load = json.loads(request_body)\n data=get_dataframe_from_dict(json_load)\n csv_data=data.to_csv(index=False,header=None)\n data=csv_data.replace(\"\\n\",\"\")\n s = StringIO(data)\n data = pd.read_csv(s, header=None)\n\n return data\n else:\n # Handle other content-types here or raise an Exception\n # if the content type is not supported.\n pass", "def json(path):\n try:\n # TODO: Check a better way to handle this Spark.instance.spark. Very verbose.\n df = Spark.instance.spark.read.json(path)\n except IOError as error:\n logging.error(error)\n raise\n return df", "def dataframe(self):\n\t\treturn self._dataframe", "def df():\n fs.df()", "def to_df(self) -> pd.DataFrame:\n data = []\n for action in self.actions:\n data.append(action.to_df())\n df = pd.read_json(json.dumps(data), orient=\"list\")\n return df[self.fields]", "def save_json(df):\n dict = {}\n for row in df.iterrows():\n dict[row[1]['Country']] = {'Region' : row[1]['Region'],\n 'Pop. Density (per sq. mi.)' : row[1]['Pop. Density (per sq. mi.)'],\n 'Infant mortality (per 1000 births)' : row[1]['Infant mortality (per 1000 births)'],\n 'GDP ($ per capita) dollars' : row[1]['GDP ($ per capita) dollars']\n }\n\n with open('data.json', 'w', encoding='utf8') as outfile:\n data = json.dumps(dict, indent=4, sort_keys=False, separators=(',', ': '), ensure_ascii=False)\n outfile.write(data)" ]
[ "0.64720726", "0.63688964", "0.62659526", "0.62423277", "0.61731887", "0.61728024", "0.6165461", "0.610696", "0.608971", "0.608501", "0.60734797", "0.6067765", "0.60032827", "0.5975856", "0.59611905", "0.5942278", "0.5940992", "0.59368914", "0.5936283", "0.5933788", "0.5910125", "0.58526474", "0.57914597", "0.578663", "0.57680786", "0.57611877", "0.5734831", "0.5733008", "0.5713259", "0.5712131" ]
0.6871927
0
This function is used for validation process. It returns a list of stockCode items and a list of quantities for each item.
def get_order_lists(self, n_items, n_quantities): arr_stock_code = self._df_invoice_original.StockCode.unique() arr_stock_code = np.random.choice(arr_stock_code, n_items) list_stockCode = list(arr_stock_code) list_quantities = np.ones(arr_stock_code.shape[0]) list_quantities *=n_quantities return list_stockCode, list_quantities
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_items(self):\n\n items = []\n\n params = self.request.query_params\n\n if 'items[]' in params:\n items = params.getlist('items[]', [])\n elif 'item' in params:\n items = [params.get('item', None)]\n\n if type(items) not in [list, tuple]:\n items = [items]\n\n valid_ids = []\n\n for item in items:\n try:\n valid_ids.append(int(item))\n except (ValueError):\n pass\n\n # List of StockItems which match provided values\n valid_items = StockItem.objects.filter(pk__in=valid_ids)\n\n return valid_items", "def parse_quantities(quantities):\n codes = []; names = []\n\n for q in quantities:\n c, n = parse_quantity(q)\n codes.append(c)\n names.append(n)\n\n return codes, names", "def test_find_stock_items(self):\n pass", "def quantities_available(quantities):\n available = []\n for q in quantities:\n available.append(quantity_available(q))\n return available", "def getUnitPriceList(self, list_stockCode):\n df = self._df_invoice_original\n\n list_unitPrice = list()\n \n for stockCode in list_stockCode:\n unitPrice = df[df.StockCode==stockCode].UnitPrice.unique()[0]\n list_unitPrice.append(unitPrice)\n return list_unitPrice", "def get_items(self):\n return [item for item in self.items if item.quantity > 0]", "def fill_item_list(self):\n return_list = []\n with Transaction().start(DBNAME, 1):\n self.productlist = self.Product.search([('description', '=', 'Stock'), ('type', '=', 'goods')])\n for i in self.productlist:\n return_list.append(i.template.name)\n return return_list", "def get_item_variants(self, item_id, item_name, start):\n\n item_url = f\"https://www.supremenewyork.com/shop/{item_id}.json\"\n\n item_variants = rq.get(item_url, headers=self.headers, proxies=self.proxy).json()\n\n for stylename in item_variants[\"styles\"]:\n for itemsize in stylename[\"sizes\"]:\n item = [item_name, stylename[\"name\"], itemsize['name'], item_variants[\"description\"], 'https:' + stylename[\"image_url\"], item_url.split('.json')[0]]\n if itemsize[\"stock_level\"] != 0:\n # Checks if it already exists in our instock\n if self.checker(item):\n pass\n else:\n # Add to instock dict\n self.instock.append(item)\n \n # Send a notification to the discord webhook with the in-stock product\n if start == 0:\n print('Sending new Notification')\n self.discord_webhook(item)\n logging.info(msg='Successfully sent Discord notification')\n\n else:\n if self.checker(item):\n self.instock.remove(item)", "def load_stock(self):\n lines = []\n with Transaction().start(DBNAME, 1):\n stock_lines = self.Inventory.search([('state', '=', 'done'), ('location', '=', self.location.id)])\n if stock_lines:\n for i in stock_lines:\n batch = i.batch_number\n for j in i.lines:\n if j.quantity <= 0:\n continue\n dictionary = {}\n dictionary['code'] = j.product.code\n dictionary['item'] = j.product.template.name\n dictionary[\n 'category'] = j.product.template.category.name if j.product.template.category else None\n dictionary['quantity'] = Decimal(j.quantity).quantize(Decimal('0.11')).to_eng()\n dictionary['batch_number'] = batch\n dictionary['supplier'] = j.supplier.name if j.supplier else None\n dictionary['expiry_date'] = j.expiry_date.strftime('%d-%m-%Y') if j.expiry_date else None\n lines.append(dictionary)\n return lines", "def clean(self):\n cleaned_data = super().clean()\n variant = cleaned_data.get('variant')\n quantity = cleaned_data.get('quantity')\n if variant and quantity is not None:\n try:\n variant.check_quantity(quantity)\n except InsufficientStock as e:\n error = forms.ValidationError(\n pgettext_lazy(\n 'Add item form error',\n 'Could not add item. '\n 'Only %(remaining)d remaining in stock.' %\n {'remaining': e.item.quantity_available}))\n self.add_error('quantity', error)\n return cleaned_data", "def getItemList(self):\r\n raise AbstractError\r\n return []", "def validate(self, attrs):\n exception_body = []\n for orderline in attrs.get('orderlines', []):\n product = orderline['product']\n\n # If orderline has less units than available, all good.\n if orderline['units'] <= product.units:\n continue\n\n # else error is accumulated\n if product.units > 0:\n exception_body.append({product.name: 'Only {0} units available.'.format(str(product.units))})\n else:\n exception_body.append({product.name: 'Out of stock'})\n\n # If any orderline has problem, reject order.\n if exception_body:\n raise exceptions.PermissionDenied({'errors': exception_body})\n\n return attrs", "def test_CalculateStockItemOrders(self):\n symbol = \"XXXX\"\n\n # Create ActiveStockItem\n activeStockItem = ActiveStockItem(symbol=symbol)\n quantity = 2\n buyStepSize = 1\n activeStockItem.SellStepSize = 2\n activeStockItem.SellStepType = SellDeltaType.FIXED\n activeStockItem.StartPrice = 20.55\n activeStockItem.QuantityMultiplier = 1\n activeStockItem.MaxActiveBuy = 2\n priceCoordinates:List[PriceCoordinate] = []\n priceCoordinates.append(PriceCoordinate(startPrice=0,quantity=quantity, \n buyDeltaType=BuyDeltaType.FIXED, fixedBuyDelta=buyStepSize))\n activeStockItem.PriceCoordinates = priceCoordinates\n\n # Create PortfolioPosition\n portfolioPosition = PortfolioPosition(symbol=symbol)\n portfolioPosition.Quantity = 9\n \n expectedLimitOrders:List[OrderInfo] = [\n OrderInfo(Settings.NewOrderId, symbol, 22.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 21.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 20.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 19.55, 2, False, True),\n OrderInfo(Settings.NewOrderId, symbol, 18.55, 1, True, True),\n OrderInfo(Settings.NewOrderId, symbol, 16.55, 1, True, False),\n OrderInfo(Settings.NewOrderId, symbol, 15.55, 2, False, False)\n ]\n\n possibleLimitOrders:List[OrderInfo] = self.manageOrdersHelpers.GeneratePossibleLimitOrders(activeStockItem, portfolioPosition.Quantity)\n\n self.assertSequenceEqual(expectedLimitOrders, possibleLimitOrders)\n\n placeOrders, cancelOrders = self.moneyMaker.CalculateStockItemOrders(activeStockItem, [], portfolioPosition)\n\n print(placeOrders)\n\n print(cancelOrders)\n\n for activeStockItem in ActiveStockItems:\n print(activeStockItem.Symbol)", "def stock_availability():\n\tdef update_reserved_qty(bin_data, updates):\n\t\tfor k, v in updates.items():\n\t\t\tif k in bin_data:\n\t\t\t\told_reserved = bin_data[k][\"reserved\"]\n\t\t\t\tnew_reserved = old_reserved + v\n\t\t\t\tbin_data[k][\"reserved\"] = new_reserved\n\t\treturn bin_data\n\n\ttry:\n\t\tstock_for_so = []\n\t\tquery = \"\"\"\n\t\t\tselect so.name, so.customer, soi.item_code, (soi.qty - soi.delivered_qty) as qty\n\t\t\tfrom `tabSales Order` so left join `tabSales Order Item` soi\n\t\t\ton so.name = soi.parent\n\t\t\twhere so.status not in ('Closed', 'Stopped') and so.docstatus = 1\n\t\t\tgroup by so.name, soi.item_code order by so.creation\n\t\t\"\"\"\n\t\tso_data = frappe.db.sql(query, as_dict=True)\n\n\t\t# formatting: sales_data => {\"sales_order\": [{\"item_code\": \"qty\"}]}\n\t\tsales_data = {}\n\t\tfor so in so_data:\n\t\t\tif so.get(\"name\") not in sales_data:\n\t\t\t\tsales_data[so.name] = [{so.item_code: so.qty}]\n\t\t\telse:\n\t\t\t\texisting = sales_data[so.name]\n\t\t\t\texisting.append({so.item_code:so.qty})\n\t\t\t\tsales_data[so.name] = existing\n\n\t\t# available stock\n\t\tbin_data = frappe.db.sql(\"\"\"select item_code, sum(actual_qty) as actual_qty\n\t\t\tfrom `tabBin` group by item_code\"\"\")\n\n\t\t# {\"item_code\": {\"bin_qty\", \"reserved\"}}\n\t\tbin_qty = { b[0]:{\"qty\": b[1], \"reserved\": 0} for b in bin_data if b[1] > 0}\n\n\t\t# check sales order wise availability\n\t\tfor so, items in sales_data.items():\n\t\t\tif not frappe.db.get_value(\"Sales Order\", so, \"stock_availability_mail\"):\n\t\t\t\titem_qty = {}\n\t\t\t\tis_stock_available = True\n\t\t\t\tfor item in items:\n\t\t\t\t\titem_code, qty = item.keys()[0], item.values()[0]\n\t\t\t\t\tif item_code in bin_qty:\n\t\t\t\t\t\tif qty <= bin_qty[item_code][\"qty\"] - bin_qty[item_code][\"reserved\"]:\n\t\t\t\t\t\t\titem_qty[item_code] = qty\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tis_stock_available = False\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tis_stock_available = False\n\t\t\t\t\t\tbreak\n\t\t\t\tif is_stock_available:\n\t\t\t\t\t# update_bit_qty_reserved\n\t\t\t\t\tbin_qty = update_reserved_qty(bin_qty, item_qty)\n\t\t\t\t\tstock_for_so.append(so)\n\t\tif len(stock_for_so):\n\t\t\tstock_availability_mail(stock_for_so)\n\texcept Exception as e:\n\t\tfrappe.log_error(message=frappe.get_traceback(), title=\"Stock availability Scheduler failed\")", "def _get_stock_item_ids(cls, *skus):\n return linnapi.inventory.get_stock_item_ids_by_sku(*skus)", "def getStockCodeList(self, list_description=None):\n list_stockCode = list()\n df = self._df_invoice_original\n \n if list_description is None:\n list_stockCode = list(df.StockCode.unique())\n else :\n for description in list_description:\n stockCode = df[df.Description==description].StockCode.unique()[0]\n list_stockCode.append(stockCode)\n return list_stockCode", "def stocks(self):\n return self.quantity - self.reserved", "def test_gather_success(self):\n gathered_items = self.Quant._gather(self.apple, self.test_stock_location_01)\n # Check the number of apple quants returned is correct\n self.assertEqual(len(gathered_items), 3)\n # Check that the products are all of expected type\n self.assertEqual(gathered_items.product_id, self.apple)\n\n # Unfold the returned quants\n _q1, second_quant, _q2 = gathered_items\n # Check when quant_ids is set in the context\n gathered_items_subset = self.Quant.with_context(quant_ids=[second_quant.id])._gather(\n self.apple, self.test_stock_location_01\n )\n self.assertEqual(len(gathered_items_subset), 1)\n self.assertEqual(gathered_items_subset.product_id, self.apple)\n self.assertEqual(gathered_items_subset, second_quant)", "def _validate_qty(values: dict):\n\n if not (quantity := values.get('quantity')):\n raise ValueError(\"Quantity attribute is required.\")\n\n if not (symbol := values.get('symbol')):\n raise ValueError(\"Symbol attribute is required.\")\n\n filter = symbol.filters.lot_size_filter\n # if ONE :=1 and not filter.min_qty <= quantity <= filter.max_qty:\n # ValueError(\"The quantity is not in valid range.\")\n\n if filter.step_size and not is_valid_significant_digits(\n quantity,\n symbol.qty_decimal_precision\n ):\n raise ValueError(\"The quantity precision is not valid.\")\n\n return values", "def test_shopping_cart_has_items(self):\n list_items = self.get_list_of_items()\n\n self.assertTrue(len(self.expected_contents) == len(list_items))\n\n for expected_item, list_item in zip(\n self.expected_contents, list_items):\n item_dict = self.get_item_dict(list_item)\n for key in expected_item:\n try:\n list_value = item_dict[key].text\n except AttributeError:\n list_value = item_dict[key]\n self.assertEqual(str(expected_item[key]), list_value)\n self.assertEqual(\n str(self.client.session['cart_cost']),\n self.browser.find_element_by_id('food-cost').text\n )", "def compute_items(self):\n rule = self.rule\n # self.items is the sub-objects, as a list\n if rule.is_terminal():\n self.the_items = [rule]\n elif rule.is_symbol_name():\n self.the_items = [rule]\n elif rule.is_empty():\n self.the_items = []\n elif isinstance(rule, Seq):\n self.the_items = [i for i in rule]\n else:\n raise RuntimeError(\"invalid item object: {}\".format(str(rule)))\n return self.the_items", "def clean_items(self):\n items = self.cleaned_data['items']\n if len(items) < 1:\n v_err('no_items')\n return items", "def _generate_native_quantity_list(self):\n\n return set(self._schema).union(self._native_filter_quantities)", "async def _get_stock_data(self, stocks: list):\n\t\tapi_url = 'https://sandbox.tradier.com/v1/markets/quotes'\n\t\tstocks = ','.join(stocks)\n\t\tif not stocks:\n\t\t\treturn []\n\t\ttoken = await self.bot.get_shared_api_tokens('stocks')\n\t\ttoken = token.get('key', None)\n\t\tif not token:\n\t\t\traise ValueError(\n\t\t\t\t'You need to set an API key!\\n'\n\t\t\t\t'Follow this guide for instructions on how to get one:\\n'\n\t\t\t\t'<https://github.com/Flame442/FlameCogs/blob/master/stocks/setup.md>'\n\t\t\t)\n\t\tparams = {'symbols': stocks}\n\t\theaders = {'Authorization': f'Bearer {token}', 'Accept': 'application/json'}\n\t\tasync with aiohttp.ClientSession() as session:\n\t\t\tasync with session.get(api_url, params=params, headers=headers) as r:\n\t\t\t\ttry:\n\t\t\t\t\tr = await r.json()\n\t\t\t\texcept aiohttp.client_exceptions.ContentTypeError:\n\t\t\t\t\t#This might happen when being rate limited, but IDK for sure...\n\t\t\t\t\traise ValueError('Could not get stock data. The API key entered is most likely not valid.')\n\t\tr = r['quotes']\n\t\tif 'quote' not in r:\n\t\t\treturn []\n\t\tr = r['quote']\n\t\tif not isinstance(r, list):\n\t\t\tr = [r]\n\t\tstock = {\n\t\t\tx['symbol']: {\n\t\t\t\t'price': max(1, int(x['last'] * 100)),\n\t\t\t\t#New API does not give this info.\n\t\t\t\t'total_count': None, #int(x['marketCap'] / x['last']) if x['marketCap'] else None\n\t\t\t} for x in r if 'last' in x and x['last'] is not None\n\t\t}\n\t\treturn stock", "def calc_multi_special(self):\r\n\r\n for special_type in self.specials_data_list:\r\n for num in range(special_type.get('num_to_apply', 0)):\r\n discounted_amount = self.basket_item['product_price'] - (self.basket_item['product_price'] *\r\n (1 - special_type['special_discount_rate']))\r\n\r\n try:\r\n self.processed_basket_item_list[num]['specials'].append({'special_code': special_type['special_code'],\r\n 'special_discount': '{0:.2f}'.format(discounted_amount)})\r\n except IndexError:\r\n self.processed_basket_item_list.append({'product_code': self.basket_item['product_code'],\r\n 'product_price': self.basket_item['product_price'],\r\n 'specials': [{'special_code': special_type['special_code'],\r\n 'special_discount': '{0:.2f}'.format(discounted_amount)}]})\r\n\r\n for item in self.processed_basket_item_list:\r\n yield item", "def get_data_of_stocks(self):\n\n indexes_to_remove = []\n # Request data for each stock\n for index, stock in enumerate(self.stock_list):\n stock.get_soups()\n stock.find_data()\n stock.print_report()\n self.print_progress(index)", "def getQuantitys(self, quantityNames):\n selectedQuantities = []\n for quantityName in quantityNames:\n foundQuantities = [q for q in self.quantityList if q.name == quantityName]\n if len(foundQuantities) > 0:\n selectedQuantities.append(foundQuantities[0])\n return selectedQuantities", "def portfolio_checkinput(stock_ticker_list):\n if not isinstance(stock_ticker_list, list):\n raise InvalidTickerlist\n return 0", "def parts_demand(request):\n critical_part = []\n quantity = None\n bom_name = None\n if request.method == 'POST':\n form = PartsDemandForm(request.POST)\n if form.is_valid():\n bom_name = form.cleaned_data['bom']\n quantity = int(form.cleaned_data['quantity'])\n warehouse = form.cleaned_data['warehouse']\n warehouse_obj = Warehouse.objects.get(warehouse_name=warehouse)\n stock = calculate_stock(warehouse_obj)\n parts = get_bom_parts(bom_name)\n print(stock)\n for part in parts:\n part_qty = float(part['Qty'])\n part_name = part['PartName']\n part_number = part['PartNumber']\n if stock.get(part_name):\n av_stock = stock.get(part_name)['total_usable_stock']\n # print(av_stock, quantity, part_qty, quantity * part_qty)\n else:\n av_stock = 0\n critical = int(av_stock) - int(quantity * part_qty)\n if critical <= 0:\n test = {\n \"critical_qty\": critical,\n \"part_number\": part_number,\n \"part_name\": part_name\n }\n critical_part.append(test)\n else:\n form = PartsDemandForm()\n context = {\n 'form': form,\n 'critical_part': critical_part,\n 'quantity': quantity,\n 'bom': bom_name,\n }\n\n return render(request, 'inventory/parts_demand.html', context)", "def __init__(self):\n # note: We could have implemented the list as a dictionary, with\n # the barcode as the key, however if the barcode for the item\n # changes we might have problems.\n self.stocklist = [] # a list of stock items" ]
[ "0.7183774", "0.62451553", "0.6027354", "0.5956186", "0.57162726", "0.5646305", "0.56151676", "0.55878735", "0.55574286", "0.54344946", "0.5425899", "0.54236686", "0.5393303", "0.539217", "0.5386628", "0.53802747", "0.5373666", "0.5363041", "0.53269744", "0.52845365", "0.52759457", "0.5270009", "0.5245161", "0.5220751", "0.5185271", "0.51629645", "0.5150221", "0.514785", "0.51308143", "0.51299" ]
0.6709346
1
Sentence generator for an entire corpus directory.
def sentences_for_dir(path='./',separate=True,gzipped=True): for filename in cowfiles(path): for metadata, data in sentence_generator(filename,separate,gzipped): yield metadata, data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data_sentences(dirname):\n sentence_list = []\n for fname in os.listdir(dirname):\n with open(os.path.join(dirname, fname)) as file:\n #sentence_list.append(gensim.models.word2vec.LineSentence(file))\n sentence_list.append(file)\n return sentence_list", "def sents(self, fileids=None, categories=None):\n for paragraph in self.paras(fileids, categories):\n for sentence in sent_tokenize(paragraph, language='russian'):\n yield sentence", "def process_docs(directory, vocab):\n for filename in listdir(directory):\n if not filename.endswith('.txt'):\n continue\n path = directory + '/' + filename\n add_doc_to_vocab(path, vocab)", "def generate_words(text='', train_path=None, case_sensitive=True, epochs=20, classifier=nlup.BinaryAveragedPerceptron, **kwargs):\n if train_path:\n generate_sentences.detector = Detector(slurp(train_path), epochs=epochs, nocase=not case_sensitive)\n # generate_sentences.detector = SentenceDetector(text=text, nocase=not case_sensitive, epochs=epochs, classifier=classifier)\n return iter(generate_sentences.detector.segments(text))", "def iter_documents(top_directory):\n for root, dirs, files in os.walk(top_directory):\n for file in filter(lambda file: file.endswith('.txt'), files):\n document = open(os.path.join(root, file)).read() # read the entire document, as one big string\n yield utils.tokenize(document, lower=True) # or whatever tokenization suits you", "def genSent(self, toTree, num):\n sentences = []\n\n\n while num > 0:\n words = []\n root = self.weightedRandomChoice(\"ROOT\")\n for w in root:\n subSentence = self.findSubSentence(toTree, w)\n if toTree:\n words.append((w, subSentence))\n else:\n words.extend(subSentence)\n if toTree:\n sentence = (\"ROOT\", tuple(words))\n else:\n sentence = \" \".join(words)\n\n if sentence not in sentences: # prevent storing duplicated sentence\n sentences.append(sentence)\n num -= 1\n\n # print all the generated sentences\n if toTree:\n for sentence in sentences:\n self.printTree(sentence, 0, 0)\n print \"\"\n # CFG.treeToSentence(sentence)\n # print \"\"\n else:\n for sentence in sentences:\n print sentence", "def generate_corpus(self, text):\n if isinstance(text, str):\n sentences = self.sentence_split(text)\n else:\n sentences = []\n for line in text:\n sentences += self.sentence_split(line)\n passing = filter(self.test_sentence_input, sentences)\n runs = map(self.word_split, passing)\n return runs", "def import_spontaneous_speech_corpus(corpus_name, directory, **kwargs):\n\n dialect = kwargs.pop('dialect', 'textgrid')\n stop_check = kwargs.pop('stop_check', None)\n call_back = kwargs.pop('call_back', None)\n speaker_source = kwargs.pop('speaker_source', None)\n delimiter = kwargs.pop('delimiter', None)\n\n corpus = SpontaneousSpeechCorpus(corpus_name,directory)\n\n words = []\n phones = []\n textgrids = []\n wavs = []\n if call_back is not None:\n call_back('Finding files...')\n call_back(0,1)\n cur = 0\n for root, subdirs, files in os.walk(directory):\n if stop_check is not None and stop_check():\n return\n for f in files:\n if dialect == 'textgrid' and f.lower().endswith('.textgrid'):\n textgrids.append(os.path.join(root,f))\n elif dialect == 'buckeye' and f.endswith('.words'):\n words.append(os.path.join(root,f))\n elif dialect == 'buckeye' and f.endswith('.phones'):\n phones.append(os.path.join(root,f))\n elif dialect == 'timit' and f.endswith('.wrd'):\n words.append(os.path.join(root,f))\n elif dialect == 'timit' and f.endswith('.phn'):\n phones.append(os.path.join(root,f))\n elif f.endswith('.wav'):\n wavs.append(os.path.join(root,f))\n if dialect == 'textgrid':\n word_tier_name = kwargs.pop('word_tier_name', None)\n phone_tier_name = kwargs.pop('phone_tier_name', None)\n dialogs = align_textgrid_info(textgrids, wavs, speaker_source, stop_check, call_back)\n else:\n dialogs = align_dialog_info(words, phones, wavs, speaker_source, stop_check, call_back)\n if call_back is not None:\n call_back('Processing discourses...')\n call_back(0,len(dialogs))\n cur = 0\n\n for d, v in dialogs.items():\n if stop_check is not None and stop_check():\n return\n if call_back is not None:\n cur += 1\n call_back(cur)\n discourse_info = {'name':d}\n if dialect == 'textgrid':\n if 'textgrid' not in v:\n continue\n data = textgrids_to_data(v['textgrid'], word_tier_name,\n phone_tier_name,\n v['speaker'], delimiter)\n else:\n if 'words' not in v:\n continue\n if 'phones' not in v:\n continue\n data = files_to_data(v['words'], v['phones'], dialect)\n discourse_info['speaker'] = Speaker(v['speaker'])\n\n if 'wav' in v:\n discourse_info['wav_path'] = v['wav']\n corpus.add_discourse(data, discourse_info,delimiter=delimiter)\n return corpus", "def process_docs(directory, vocab):\n for file_name in listdir(directory):\n file_path = directory + '/' + file_name\n add_doc_to_vocab(file_path, vocab)", "def word_runner(self):\n with open(self.filename) as doc:\n text = doc.readlines()\n for line in text:\n for word in line.split():\n yield word", "def get_corpus():\n all_text = []\n\n for _, _, files in os.walk(DATA_DIRECTORY):\n for f in files:\n with open(os.path.join(DATA_DIRECTORY, f), 'r') as article:\n # Quotation marks rarely come out as pairs in finished chains.\n # So we remove them before adding the article text:\n all_text.append(re.sub(r'[„“]', '', article.read()))\n\n return markovify.Text(\"\".join(all_text), state_size=2)", "def generate_sentences(text='', train_path=None, case_sensitive=True, epochs=20, classifier=nlup.BinaryAveragedPerceptron, **kwargs):\n if train_path:\n generate_sentences.detector = Detector(slurp(train_path), epochs=epochs, nocase=not case_sensitive)\n # generate_sentences.detector = SentenceDetector(text=text, nocase=not case_sensitive, epochs=epochs, classifier=classifier)\n return iter(generate_sentences.detector.segments(text))", "def make_sentences(self):\n\n if self.document == None:\n return\n\n sent = sent_tokenize(self.document) # contains raw sentences\n\n\n # Create parameters for NER and Dependency Parsing a\n # and pass it to the sentence objcet\n\n # set config file\n config = CP.RawConfigParser()\n config = config\n config.read('config.py')\n\n # Server for dependency parsing\n\n server = ServerProxy(JsonRpc20(),TransportTcpIp(addr=(\"127.0.0.1\", 8080), timeout=200.0))\n\n # Parameters for Named entitye recognition\n\n # get the classifier and tagger location from config file\n tagger = config.get('NER','tagger') # gets the path of the stanford tagger\n classifier = config.get('NER','classifier') # gets the path of the stanford classifier\n st = StanfordNERTagger(classifier,tagger)\n for i in range(len(sent)):\n s = Sentence(sent[i],i,server, st, 'test')\n self.sentences.append(s)", "def build_corpus(self):\n print(\"Inside the build_corpus >>>>>\")\n documentsCount = 0\n documents = self.documents\n\t\t\n with open(self.documents_path) as file:\n for documents in file.readlines():\n documents = documents.rstrip('}\\n ').strip('0\\t').strip('1\\t').split(' ')\n documentsCount = documentsCount +1\n self.documents.append(documents)\n\t\t\t\n self.number_of_documents = documentsCount", "def process_corpus(self):\n sentences = []\n sentence = []\n with open(str(self.file), encoding=self.encoding) as f:\n\n line = f.readline()\n\n while line:\n\n if line.startswith(\"#\"):\n line = f.readline()\n continue\n\n if line.strip().replace(\"\", \"\") == \"\":\n if len(sentence) > 0:\n self.infer_space_after(sentence)\n if self.tagging_scheme is not None:\n self.convert_tag_scheme(\n sentence, target_scheme=\"iobes\"\n )\n\n sentences.append(sentence)\n sentence = []\n\n else:\n fields = re.split(r\"\\s+\", line)\n token = fields[0] # text column\n token_tags = {\n v: fields[k]\n for k, v in self.columns.items()\n if v != \"text\"\n }\n sentence.append({\"name\": token, \"tags\": token_tags})\n\n line = f.readline()\n\n return sentences", "def process_docs_2(directory, vocab):\n lines = []\n for filename in listdir(directory):\n if not filename.endswith('.txt'):\n continue\n path = directory + '/' + filename\n line = doc_to_line(path, vocab)\n lines.append(line)\n return lines", "def _doc2vec_doc_stream(paths, n, tokenizer=word_tokenize, sentences=True):\n i = 0\n p = Progress()\n for path in paths:\n with open(path, 'r') as f:\n for line in f:\n i += 1\n p.print_progress(i/n)\n\n # We do minimal pre-processing here so the model can learn\n # punctuation\n line = line.lower()\n\n if sentences:\n for sent in sent_tokenize(line):\n tokens = tokenizer(sent)\n yield LabeledSentence(tokens, ['SENT_{}'.format(i)])\n else:\n tokens = tokenizer(line)\n yield LabeledSentence(tokens, ['SENT_{}'.format(i)])", "def generate(self, handle, occurrence_threshold=None): # silently creates other pieces of data\n print colors.yellow(\"generating corpus for {}...\\n\".format(handle))\n if occurrence_threshold: # if one was given, set it\n self.threshold = occurrence_threshold\n self.handle = handle\n self.path = \"bot_files/{0}/{0}\".format(handle)\n self.process_tweets()\n self.generate_vocab()\n self.generate_corpus()", "def sents(self):\n\n text = str()\n for file in os.listdir(self.path):\n # checks if the given path contains a text file and opens it\n if file.endswith(\".txt\"):\n with open(self.path + \"/\" + file) as connection:\n text += connection.read()\n\n # tokenizes the text to sentences and tokenizes the tokenized sentences to words\n sentences_list = nltk.sent_tokenize(text)\n word_list = [nltk.word_tokenize(sent) for sent in sentences_list]\n\n return word_list", "def sent_to_words(self, sentences):\n\n for sentence in sentences:\n yield(gensim.utils.simple_preprocess(str(sentence)))", "def load_sentences(path, lower, zeros=True):\n sentences = []\n sentence = []\n for line in codecs.open(path, 'r', 'utf8'):\n line = zero_digits(line.rstrip()) if zeros else line.rstrip()\n if not line:\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n sentence = []\n else:\n word = line.split()\n assert len(word) >= 2\n sentence.append(word)\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n return sentences", "def peoples_speech(\n corpus_dir: Pathlike,\n output_dir: Pathlike,\n):\n prepare_peoples_speech(\n corpus_dir,\n output_dir=output_dir,\n )", "def generate_corpus():\n data = load_data()\n questions = [s.split(' ', 1)[1].lower() for s in data]\n return questions", "def word_iterator(folder):\n for filename in glob.glob(os.path.join(folder, \"*.txt\")):\n with codecs.open(filename, \"r\", \"utf8\") as file:\n for line in file.readlines():\n for word in WORD_SPLIT_PATTERN.split(line.strip()):\n if word == \"\":\n continue\n yield slugify.slugify(word.lower())", "def generate_sentence():\n markov_chain = makeMarkovDict(\"text.txt\")\n\n # Pick a random word to begin with.\n first_word = random.choice(markov_chain.keys()) # Illegall\n\n # print first_word\n # random_choice = random.randint(0, len(markov_chain.keys()))\n # index = 0\n # first_word = \"\"\n # for word in markov_chain:\n # print word\n # if index == random_choice:\n # first_word = word\n # break\n # index += 1\n\n # Based on that word, call function to chose the next word.\n # print markov_chain[first_word]\n # print word_selection(markov_chain[first_word])\n\n lenght_of_sentence = 10\n sentence = [first_word] # First word already in there\n for i in range(lenght_of_sentence):\n sentence.append(word_selection(markov_chain[sentence[i]]))\n # Sentence after loop: ['fish', 'red', 'fish', 'two', 'fish', 'red', 'fish', 'red', 'fish', 'two', 'fish']\n\n # Cap with letter and add period at the end.\n final_sentece = \" \".join(sentence) + \".\"\n return final_sentece.capitalize()", "def read_corpus(dir):\n corpus = {}\n file_names = glob.glob(f\"{dir}/*\")\n for file_name in file_names:\n name = os.path.splitext(os.path.basename(file_name))[0]\n text = \" \".join(open(file_name, \"rt\").readlines())\n text = text.replace(\"\\n \\n\", \" \")\n text = text.replace(\"\\n\", \"\")\n text = text.replace(\" \", \" \")\n corpus[os.path.splitext(name)[0]] = text\n return corpus", "def generate_docs(root_dir, session):\n ...", "def load_sentences(path, zeros):\n sentences = []\n sentence = []\n for line in codecs.open(path, 'r', 'utf8'):\n line = zero_digits(line.rstrip()) if zeros else line.rstrip()\n if not line:\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n sentence = []\n else:\n word = line.split()\n sentence.append(word)\n if len(sentence) > 0:\n if 'DOCSTART' not in sentence[0][0]:\n sentences.append(sentence)\n return sentences", "def write_file(tweets):\n with open((folderlink + \"markov_sentences.txt\"), \"w\") as text_file:\n for tweet in tweets:\n text_file.write (tweet + '\\n')\n with file ((folderlink + \"markov_sentences.txt\"), 'r') as f:\n text = f.read()\n text_model = markovify.NewlineText(text)\n print \"model successful \\n\\n\\n\\n\"\n for i in range(5):\n print(text_model.make_short_sentence(140, tries=100))\n text_file.close()", "def write_doc(self, file=sys.stdout, tm=False):\n for sentence in self.sentences:\n if tm:\n print(\"<tu><tuv><seg>\", file=file)\n print(\"{}\".format(sentence.raw), file=file)\n if tm:\n print(\"</seg></tuv><tuv><seg>\", file=file)\n print(\"{}\".format(sentence.translation), file=file)\n if tm:\n print(\"</seg></tuv></tu>\", file=file)" ]
[ "0.6356309", "0.6221751", "0.6179384", "0.595983", "0.5889285", "0.5882137", "0.5873492", "0.58633006", "0.5855387", "0.5792811", "0.578709", "0.5780858", "0.5774546", "0.5762869", "0.5740365", "0.5727936", "0.5726981", "0.5708242", "0.56983536", "0.56931585", "0.569029", "0.56899077", "0.5688693", "0.568819", "0.56764174", "0.56647307", "0.56582177", "0.5655392", "0.5640177", "0.56345856" ]
0.71198934
0
Build each tree in the 'forest' of trees. After each iteration, evaluate the tree and reweight the input sample such that incorrect events are weighted up and correct events are weighted down
def build(self): # weights to apply to training samples, updated on each # iteration of the boosting algo, normalised to 1 sigWeights = np.ones(self.nSig, dtype=float) bkgWeights = np.ones(self.nBkg, dtype=float) reweight = 1.0/(np.sum(sigWeights)+np.sum(bkgWeights)) sigWeights *= reweight bkgWeights *= reweight # Weight of each tree, strong classifers have higher weight self.treeWeights = np.zeros(self.ntrees, dtype=float) for i in xrange(self.ntrees): # build new tree newTree = Tree() newTree.load(self.sigData,self.bkgData,weights=(sigWeights,bkgWeights)) newTree.build() self.dTrees.append(newTree) # evaluate trees # keep track of each event err = 0.0 sigWrong = np.zeros(self.nSig) bkgWrong = np.zeros(self.nBkg) for j in range(self.nSig): if newTree.classify(np.array((self.sigData[j,])))<0: sigWrong[i]=1 err+=sigWeights[j] for j in range(self.nBkg): if newTree.classify(np.array((self.bkgData[j,])))>0: bkgWrong[i]=1 err+=bkgWeights[j] alpha = self.beta*math.log((1.0-err)/err) print err,alpha corFactor = math.exp(-alpha) wrongFactor = math.exp(alpha) if (err<1e-20 or err >= 0.5): print "SOEMTHING WRONG!!" self.treeWeights[i] = alpha # reweight training samples for j in range(self.nSig): if sigWrong[j]: sigWeights[j]*=wrongFactor else : sigWeights[j]*=corFactor for j in range(self.nBkg): if bkgWrong[j]: bkgWeights[j]*=wrongFactor else : bkgWeights[j]*=corFactor # normalise weights reweight = 1.0/(np.sum(sigWeights)+np.sum(bkgWeights)) sigWeights *= reweight bkgWeights *= reweight
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_trees(tree, forest, X, Y, sample_weight, tree_idx, n_trees,\n n_samples_bootstrap=None):\n # Initialize the number of samples input data\n n_samples = X.shape[0]\n\n # If the samples are drawn with replacement, then,\n # weight the sample weights by the number of times\n # that each sample appears on the indexes\n if forest.bootstrap:\n # Check the sample weights, initializing them to an\n # uniform distribution if they are not provided and,\n # if provided, copying them to properly weight the\n # samples according to the bootstrap indexes\n if sample_weight is None:\n curr_sample_weight = np.ones(n_samples, dtype=np.float64)\n else:\n curr_sample_weight = np.array(sample_weight, dtype=np.float64)\n # Obtain the sample weights\n # from to the bootstrap indexes\n indexes = _generate_sample_indexes(tree.random_state, n_samples,\n n_samples_bootstrap)\n sample_counts = np.bincount(indexes, minlength=n_samples)\n curr_sample_weight *= sample_counts\n # Fit the estimator using the sample weight\n # obtained from the bootstrap indexes\n tree.fit(X, Y, curr_sample_weight)\n # Otherwise, directly use the sample\n # weight provided in the fit method\n else:\n tree.fit(X, Y, sample_weight)\n\n # Return the built tree\n return tree", "def __build_iteration(self) -> None:\n trees = [t for t in self.__trees.keys()]\n for tree in trees:\n heads = []\n branches = self.__trees[tree]\n for i in range(len(branches) - 1, -1, -1):\n if self.__trees.get(tree) and np.random.rand(1)[0] < self.__rate:\n heads += self.__branch_out(branches.pop(i), tree)\n self.__trees[self.__mappings[tree]] += heads\n\n # NB: this can cause errors when seeds spawn near the edge\n if len(self.__trees[self.__mappings[tree]]) == 0:\n logging.info(\"deleting tree with id {}\".format(tree))\n del self.__trees[self.__mappings[tree]]", "def _forest_nodes(self):\n\n self.arbor._grow_tree(self)\n root = self.root\n for link in root._links:\n yield self.arbor._generate_tree_node(self.root, link)", "def __build_tree__(self, features, classes, depth=0):\n\n # TODO: finish this.\n root = None\n if (len(set(classes)) <= 1) and (len(classes) != 0) :\n return DecisionNode(None,None,None,classes[0])\n elif (len(classes) == 0):\n return DecisionNode(None,None,None,2)\n elif depth == self.depth_limit:\n return DecisionNode(None,None,None,max(set(classes), key=list(classes).count))\n else:\n# if depth == 0:\n features = np.array(features)\n classes = np.array(classes).reshape(-1,1)\n feat_shape = features.shape\n sample_list = range(feat_shape[0])\n gains = np.zeros((feat_shape[1]))\n indices = np.zeros((feat_shape[1]))\n for i in range(feat_shape[1]):\n attribute = features[:,i]\n for j in range(20):\n split_indx = int(np.random.choice(sample_list, replace=False))\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n gain = gini_gain(list(classes.reshape(1,-1)[0]),[list(classes_below),list(classes_above)])\n if gain > gains[i]:\n gains[i] = gain\n indices[i] = split_indx\n indx = np.argmax(gains)\n split_indx = int(indices[indx])\n attribute = features[:,indx]\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0] \n features_below = features[idx_below,:]\n features_above = features[idx_above,:]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n if (len(classes_below) != 0) and (len(classes_above) != 0):\n root = DecisionNode(None,None,lambda feat:feat[indx] > features[split_indx,indx])\n root.left = self.__build_tree__(features_above, classes_above, depth+1)\n root.right = self.__build_tree__(features_below, classes_below, depth+1)\n return root\n elif (len(classes_below) == 0) and (len(classes_above) != 0):\n return DecisionNode(None,None,None,max(set(classes_above), key=list(classes_above).count))\n elif (len(classes_above) == 0) and (len(classes_below) !=0):\n return DecisionNode(None,None,None,max(set(classes_below), key=list(classes_below).count))\n else:\n return DecisionNode(None,None,None,2)", "def populate_synthetic_tree(self):\r\n logging.debug('populating synthetic tree...')\r\n a_data = self.realData\r\n ndata = a_data.shape[1]\r\n for i in range(ndata):\r\n ptx = a_data[0, i]\r\n pty = a_data[1, i]\r\n leaf = self.root.find_subnode(ptx, pty)\r\n leaf.n_count += 1\r\n\r\n # traverse the tree and update leaf counts\r\n stack = deque()\r\n stack.append(self.root)\r\n while len(stack) > 0:\r\n cur_node = stack.popleft()\r\n if cur_node.n_isLeaf is True: # leaf\r\n cur_node.n_count += self.differ.getNoise(1, 0.5 * self.param.Eps)\r\n else:\r\n stack.append(cur_node.nw)\r\n stack.append(cur_node.ne)\r\n stack.append(cur_node.sw)\r\n stack.append(cur_node.se)", "def build_random_forest(X_train, y_train):", "def build_tree(self, w):\n w_abs = np.abs(w)\n if sum(w_abs) != 1.:\n w_abs = w_abs / sum(w_abs)\n self.w = w_abs\n self.tree = np.zeros(w.shape)\n self._build_node(w_abs, 1)\n self.w_apx = extract_distribution(self.tree)\n\n n_levels = np.ceil(np.log2(len(w)))\n self.lfsr = []\n for n in range(int(n_levels)):\n seed = np.random.randint(1, int(2**(self.lfsr_nbits-n)-1))\n self.lfsr.append(LFSR(self.lfsr_nbits-n, seed))", "def buildTree(rows, maxDepth = None, scoref=entropy, depth = 0):\n #A base condition for the recursion. Check if this branch of a split has no data\n if len(rows)==0:\n return decisionNode( )\n newDepth = depth + 1 #Calculate the depth of the next split.\n #Check if the depth at the next split is greater than a maximum specified depth\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth): \n return decisionNode(results=__uniqueCounts(rows)) #If so, stop splitting.\n current_score=scoref(rows) #Calculate the current value of the score function.\n # Set up some variables to track the best criteria\n best_gain=0.0 #Initialize a value for the best gain from all possible splits\n best_criteria=None #Initialize a variable for the best column to split on\n best_sets=None #Initialize a variable for the best split's true and false data.\n\n #Count the number of columns in the row, minus the results column \n column_count=len(rows[0])-1\n for col in range(0,column_count): #Iterate over all the columns of the data\n #Generate the list of different values in this column\n column_values={} #Initialize a dictionary to store the column values\n for row in rows: \n #Iterate over each row, adding a key in the dict for each observed value\n column_values[row[col]]=1\n # Divide the dataset on each value in this column.\n for value in column_values.keys( ):\n (set1,set2)=__divideset(rows,col,value)\n #Calculate the fraction of data in the true branch\n p=float(len(set1))/len(rows) \n #Calculate the gain on the chosen score function using this split.\n gain=current_score-p*scoref(set1)-(1-p)*scoref(set2) \n #Check if this split provides a better gain than the best previous split\n if gain>best_gain and len(set1)>0 and len(set2)>0:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Recursively create the subbranches\n if best_gain>0:\n trueBranch=buildTree(best_sets[0], maxDepth = maxDepth, depth = newDepth)\n falseBranch=buildTree(best_sets[1], maxDepth = maxDepth, depth = newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCounts(rows))", "def __init__(self, dims, treeCount, incAdd = 1, testDims = 3, dimCount = 4, rotCount = 32):\n # Support structures...\n self.cats = dict() # Dictionary from cat to internal indexing number.\n self.treeCount = treeCount\n self.incAdd = incAdd\n \n # Setup the classification forest...\n self.classify = DF()\n self.classify.setInc(True)\n self.classify.setGoal(Classification(None, 1))\n self.classify.setGen(LinearClassifyGen(0, 1, testDims, dimCount, rotCount))\n \n self.classifyData = MatrixGrow()\n self.classifyTrain = self.treeCount\n \n # Setup the density estimation forest...\n self.density = DF()\n self.density.setInc(True)\n self.density.setGoal(DensityGaussian(dims))\n self.density.setGen(LinearMedianGen(0, testDims, dimCount, rotCount))\n self.density.getPruner().setMinTrain(48)\n \n self.densityData = MatrixGrow()\n self.densityTrain = self.treeCount", "def prep_tree_data(self, number: int):\n filename = \"data-before-normalization-{}-out-of-7.csv\".format(number)\n path = str(DATA_PATH.joinpath(\"data-splitted\", filename))\n df = pandas.read_csv(path)\n\n df.drop(df.columns[0], axis=1, inplace=True)\n assessments = [x for x in df.columns.values if x.split(\"_\")[0] == \"assessment\"]\n df['average_score'] = df[assessments].mean(skipna=True, axis=1)\n for assessment in assessments: # somehow he doesn't want to fillna in a batch?\n df[assessment].fillna(df['average_score'], inplace=True)\n clicks = [x for x in df.columns.values if x.split(\"_\")[0] == \"vle\"]\n df['vle_click_average'] = df[clicks].mean(skipna=True, axis=1)\n for click in clicks: # somehow he doesn't want to fillna in a batch?\n df[click].fillna(df['vle_click_average'], inplace=True)\n df.dropna()\n\n self.change_oh_cat(\"gender\", df)\n self.change_oh_cat(\"highest_education\", df)\n self.change_oh_cat(\"imd_band\", df)\n self.change_oh_cat(\"age_band\", df)\n self.change_oh_cat(\"disability\", df)\n result_order = {'final_result__Fail': 0, 'final_result__Withdrawn': 2,\n 'final_result__Pass': 1, 'final_result__Distinction': 3}\n self.change_oh_cat(\"final_result\", df, result_order)\n df[\"final_result\"].replace(2, 0, inplace=True)\n df[\"final_result\"].replace(3, 1, inplace=True)\n\n target = df[\"final_result\"]\n df.drop([\"final_result\"], axis=1, inplace=True)\n\n x_train, x_test, y_train, y_test = train_test_split(df, target, test_size=0.1,\n random_state=32, shuffle=True,\n stratify=target)\n\n return x_train, x_test, y_train, y_test", "def sample(tree, i, alpha=0.5, beta=0.5, only_tree=True):\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\" + str(list(n)[0]) + \")\"\n # tree.node[n] = {\"color\": \"black\", \"label\": lab}\n # print tree.nodes()\n\n if only_tree is True:\n tree_new = tree # Alter the input tree\n else:\n #tree_new = tree.subgraph(tree.nodes()) # nx < 2.0\n tree_new = tree.copy() # nx < 2.0\n\n #print(nocopy)\n #old_G = trilearn.graph.junction_tree.get_graph(tree)\n #(subtree, old_separators, probtree) = glib.random_subtree(tree, alpha, beta)\n\n # plotGraph(subtree, directory+\"subtree_\"+str(i)+\".eps\")\n # for n in subtree.nodes():\n # tree_old.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n # if n in tree.nodes():\n # tree.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n\n # plotGraph(tree_old.subgraph(tree_old.nodes()),\n # directory + \"tree(\" + str(i-1) + \")p.eps\")\n\n (_, subtree_nodes, subtree_edges, subtree_adjlist,\n old_separators, prob_subtree) = ss.random_subtree(tree, alpha, beta, i)\n\n (old_cliques,\n new_cliques,\n new_separators,\n P,\n neig) = sample_cond_on_subtree_nodes(i, tree_new, subtree_nodes, subtree_edges, subtree_adjlist)\n\n if only_tree is True:\n return tree_new\n #conn_nodes = set()\n #for clique in new_cliques:\n # conn_nodes |= clique\n\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\"+str(list(n)[0])+\")\"\n # if n in new_cliques:\n # tree.node[n] = {\"color\": \"red\", \"label\": lab}\n # plotGraph(tree.subgraph(tree.nodes()), directory+\"tree(\"+str(i)+\").eps\")\n\n #G = trilearn.graph.junction_tree.get_graph(tree)\n # G.node[i] = {\"color\": \"red\"}\n # for n in old_G:\n # if n in conn_nodes:\n # old_G.node[n] = {\"color\": \"blue\"}\n # G.node[n] = {\"color\": \"blue\"}\n\n # plotGraph(G, directory+\"G\"+str(i)+\".eps\")\n # plotGraph(old_G, directory+\"G\"+str(i-1)+\"p.eps\")\n\n # Proposal kernel\n K_st = None\n if len(subtree_nodes) == 1:\n # There might be two possible subtrees so\n # we calculate the probabilities for these explicitly\n K_st = pdf(tree, tree_new, alpha, beta, i)\n else:\n K_st = prob_subtree\n for c in P:\n K_st *= P[c] * neig[c]\n return tree_new, K_st, old_cliques, old_separators, new_cliques, new_separators", "def main():\n\n ''' Reading the training data file '''\n original_training_data = pd.read_csv(\"DT_Data_CakeVsMuffin_v012_TRAIN.csv\")\n\n ''' Storing the final decision tree '''\n final_tree = decision_tree(original_training_data,0)\n\n ''' Printing the final decision tree '''\n print(\"This is the resulting decision tree: \\n\")\n print(final_tree)\n\n ''' Iterating through the dictionary by using the key values '''\n for key in final_tree.keys():\n ''' Parent = Flour <= 5.1636'''\n parent = key\n ''' left_child = [{'Oils <= 3.1265': [{'Flour <= 2.7291': [{'Proteins <= 2.6527': ['Muffin', 'CupCake']}, 'Muffin']}, 'CupCake']}'''\n left_child = final_tree[parent][0]\n ''' right_child = {'Oils <= 7.7793': ['Muffin', {'Flour <= 8.2225': ['CupCake', 'Muffin']}]}]'''\n right_child = final_tree[parent][1]\n\n ''' Writing a file which generates code for classification '''\n file = open('HW06_Parchand_Nihal_Classifier.py','w+')\n file.write(\"'''Importing libraries''' \"\n \"\\n\\nimport pandas as pd \\n\\ndef main():\"\n \"\\n\\tdata_df = pd.read_csv('DT_Data_CakeVsMuffin_v012_TEST.csv')\"\n \"\\n\\tresult = []\"\n \"\\n\\tfor row in range(0,len(data_df)):\"\n \"\\n\\t\\tFlour = data_df.loc[row][0]\"\n \"\\n\\t\\tSugar = data_df.loc[row][1]\"\n \"\\n\\t\\tOils = data_df.loc[row][2]\"\n \"\\n\\t\\tProteins = data_df.loc[row][3]\"\n \"\\n\\t\\tif {}:\\n\".format(parent))\n\n ''' Iterating through the left_tree '''\n for key in left_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n\n ''' Iterating through the inner left_tree '''\n for inner_key in left_child[key][0].keys():\n file.write(\"\\t\\t\\t\\tif {}:\\n\".format(inner_key))\n\n for inner_inner_key in ((left_child[key][0])[inner_key])[0]:\n file.write(\"\\t\\t\\t\\t\\tif {}:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\t\\t\\telse:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(1)\\n\")\n\n file.write(\"\\t\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\telse:\\n\")\n\n ''' Iterating through the right_tree '''\n for key in right_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\")\n for inner_key in right_child[key][1].keys():\n file.write(\"\\t\\t\\telif {}:\\n\".format(inner_key))\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\\n\")\n\n ''' Writing the results of classifier to a csv file '''\n file.write(\n \"\\twith open('HW06_Parchand_Nihal_MyClassifications.csv', 'w+') as file2:\\n\"\n \"\\t\\tfor value in result:\\n\"\n \"\\t\\t\\tfile2.write(str(value))\\n\"\n \"\\t\\t\\tfile2.write('\\\\n')\\n\\n\"\n \"main()\")", "def get_featured_tree(self):\n\n for t in self.tree.get_terminals():\n t.sample_series = self.feature_table[t.name]\n self.feature_tree = self.recursion_tree(self.tree.root)\n for clade in self.feature_tree.find_clades(order='level'):\n clade.depth = 1+len(self.feature_tree.get_path(clade))\n \n #i = 0\n #for clade in self.feature_tree.find_clades(order='level'):\n # clade.ID_num = i \n #clade.abu = np.mean(clade.sample_series.values)\n #clade.domain_otu = clade.sample_series.idxmax()", "def fill(self):\n # Fail fast if num_classes or num_features isn't set.\n _ = getattr(self, 'num_classes')\n _ = getattr(self, 'num_features')\n\n self.training_library_base_dir = getattr(\n self, 'training_library_base_dir', '')\n self.inference_library_base_dir = getattr(\n self, 'inference_library_base_dir', '')\n\n self.bagged_num_features = int(self.feature_bagging_fraction *\n self.num_features)\n\n self.bagged_features = None\n if self.feature_bagging_fraction < 1.0:\n self.bagged_features = [random.sample(\n range(self.num_features),\n self.bagged_num_features) for _ in range(self.num_trees)]\n\n self.regression = getattr(self, 'regression', False)\n\n # Num_outputs is the actual number of outputs (a single prediction for\n # classification, a N-dimenensional point for regression).\n self.num_outputs = self.num_classes if self.regression else 1\n\n # Add an extra column to classes for storing counts, which is needed for\n # regression and avoids having to recompute sums for classification.\n self.num_output_columns = self.num_classes + 1\n\n # Allow each tree to be unbalanced by up to a factor of 2.\n self.max_depth = (self.max_depth or\n int(2 * math.ceil(math.log(self.max_nodes, 2))))\n\n # The Random Forest literature recommends sqrt(# features) for\n # classification problems, and p/3 for regression problems.\n # TODO(thomaswc): Consider capping this for large number of features.\n self.num_splits_to_consider = (\n self.num_splits_to_consider or\n max(10, int(math.ceil(math.sqrt(self.num_features)))))\n\n # max_fertile_nodes doesn't effect performance, only training speed.\n # We therefore set it primarily based upon space considerations.\n # Each fertile node takes up num_splits_to_consider times as much\n # as space as a non-fertile node. We want the fertile nodes to in\n # total only take up as much space as the non-fertile nodes, so\n num_fertile = int(math.ceil(self.max_nodes / self.num_splits_to_consider))\n # But always use at least 1000 accumulate slots.\n num_fertile = max(num_fertile, 1000)\n self.max_fertile_nodes = self.max_fertile_nodes or num_fertile\n # But it also never needs to be larger than the number of leaves,\n # which is max_nodes / 2.\n self.max_fertile_nodes = min(self.max_fertile_nodes,\n int(math.ceil(self.max_nodes / 2.0)))\n\n # We have num_splits_to_consider slots to fill, and we want to spend\n # approximately split_after_samples samples initializing them.\n num_split_initializiations_per_input = max(1, int(math.floor(\n self.num_splits_to_consider / self.split_after_samples)))\n self.split_initializations_per_input = getattr(\n self, 'split_initializations_per_input',\n num_split_initializiations_per_input)\n\n # If base_random_seed is 0, the current time will be used to seed the\n # random number generators for each tree. If non-zero, the i-th tree\n # will be seeded with base_random_seed + i.\n self.base_random_seed = getattr(self, 'base_random_seed', 0)\n\n return self", "def prepare_data_for_g(self):\n\n paths = []\n for i in self.root_nodes:\n if np.random.rand() < config.update_ratio:\n sample, paths_from_i = self.sample(i, self.trees[i], config.n_sample_gen, for_d=False)\n if paths_from_i is not None:\n paths.extend(paths_from_i)\n # for each root, we generate 20 samples, each sample is equal to one path from root to that sample\n # So, we will get maximum (num_root x 20) paths\n # path is a list with length = (N x num_sample), with num_sample = 20\n # paths =[[path_root1_to_sample1],[path_root1_to_sample2],....,[path_root1_to_sample20],\n # [path_root2_to_sample1],[path_root2_to_sample2],....,[path_root2_to sample20]\n # .\n # .\n # [path_rootN_to_sample1],[path_rootN_to_sample2],....,[path_rootN_to_sample20]]\n # get_node_pairs_from_path\n\n node_pairs = list(map(self.get_node_pairs_from_path, paths))\n # node_pairs = [[node pairs for path_root1_to_sample1],[node pairs for path_root1_to_sample2],....,[node pairs for path_root1_to_sample20],\n # [node_pairs for path_root2_to_sample1],[node pairs for path_root2_to_sample2],....,[node pairs for path_root2_to sample20],\n # .\n # .\n # [node pairs for path_rootN_to_sample1],[node pairs for path_rootN_to_sample2],....,[node pairs for path_rootN_to_sample20]]\n\n node_1 = []\n node_2 = []\n for i in range(len(node_pairs)):\n for pair in node_pairs[i]:\n node_1.append(pair[0])\n node_2.append(pair[1])\n # reward = self.sess.run(self.discriminator.reward,\n # feed_dict={self.discriminator.node_id: np.array(node_1),\n # self.discriminator.node_neighbor_id: np.array(node_2)})\n reward = self.discriminator.forward(node_1, node_2)\n return node_1, node_2, reward", "def grow_tree(self):\n\n decision_node = self.root\n internal_env = copy.copy(self.env)\n\n while (not decision_node.is_final) and decision_node.visits > 1:\n\n a = self.select(decision_node)\n\n new_random_node = decision_node.next_random_node(a, self._hash_action)\n\n (new_decision_node, r) = self.select_outcome(internal_env, new_random_node)\n\n new_decision_node = self.update_decision_node(new_decision_node, new_random_node, self._hash_space)\n\n new_decision_node.reward = r\n new_random_node.reward = r\n\n decision_node = new_decision_node\n\n decision_node.visits += 1\n cumulative_reward = self.evaluate(internal_env)\n\n while not decision_node.is_root:\n random_node = decision_node.father\n cumulative_reward += random_node.reward\n random_node.cumulative_reward += cumulative_reward\n random_node.visits += 1\n decision_node = random_node.father\n decision_node.visits += 1", "def create_trees(self, importance_values: List[int]) -> None:\n target_names = [\"Fail\", \"Pass\"]\n trees = defaultdict(list)\n for importance in importance_values:\n for i in range(7):\n print(f'making tree for week {i + 1} with importance {importance}')\n x_train, x_test, y_train, y_test = self.prep_tree_data(i + 1)\n tree = TreeClassifier(x_train, x_test, y_train, y_test, target_names, importance)\n tree.run_model()\n trees[importance].append(tree)\n\n self.trees = trees", "def grow_forest( n, records ):\n dataset = Dataset( records )\n record_number = dataset.size\n\n dts = []\n for i in xrange(n):\n print \"Training\", i\n # pick randomly as many records as the number in the dataset.\n picked_records = []\n for j in xrange( record_number ):\n ind_picked = randint(0, record_number-1)\n picked_records.append( dataset[ ind_picked ] )\n picked_records = Dataset( picked_records )\n # train a tree with these records and add it to the forest\n tree = train(picked_records)\n dts.append( tree )\n return dts", "def get_forest(self, verbose):\n _antecessors = []\n for key, cluster in self.clusters.items():\n if cluster.leaf_cluster is True:\n _antecessors.append(cluster.antecessor)\n _antecessors = remdup_preserve_order(_antecessors)\n _antecessors = sorted(_antecessors, key=get_cluster_idx, reverse=True)\n\n _tree_idx = 0\n\n print('Generating forest...')\n print('')\n count= 0.0\n if verbose:\n progress_bar = progress_bar = AnimatedProgressBar(end=len(_antecessors), width=50, \\\n fill='=', blank='.')\n for antecessor in _antecessors:\n if verbose and (count % 1 == 0):\n progress_bar + 1\n progress_bar.show_progress()\n tree = Tree(antecessor, idx = _tree_idx, acorns=self)\n self.forest[_tree_idx] = tree\n _tree_idx += 1\n\n if verbose:\n progress_bar.progress = 100 # Done\n progress_bar.show_progress()\n print('')\n print('')\n\n return", "def train(self):\n max_tuple = self.max_gain()\n # If that gain is 0 then every node should be a pure leaf (hopefully) and you can stop\n while max_tuple.gain != 0:\n max_tuple.node.split(max_tuple.attribute)\n max_tuple = self.max_gain()", "def fit(self, features, classes):\n\n self.root = self.__build_tree__(features, classes)", "def train(self):\n logger.info('TreeTrainer.train')\n set_random_seed(self._config['seed'])\n init_epochs = self._config['learning_init_epochs']\n full_epochs = self._config['learning_full_epochs']\n sample_tree_rate = self._config['learning_sample_tree_rate']\n num_rows = self._num_rows\n\n # Initialize using subsample annealing.\n assert len(self._added_rows) == 0\n schedule = make_annealing_schedule(num_rows, init_epochs,\n sample_tree_rate)\n for action, row_id in schedule:\n if action == 'add_row':\n self.add_row(row_id)\n elif action == 'remove_row':\n self.remove_row(row_id)\n elif action == 'sample_tree':\n edges, edge_logits = self.sample_tree()\n self.set_edges(edges)\n else:\n raise ValueError(action)\n\n # Run full gibbs scans.\n assert len(self._added_rows) == num_rows\n for step in range(full_epochs):\n edges, edge_logits = self.sample_tree()\n self.set_edges(edges)\n for row_id in range(num_rows):\n self.remove_row(row_id)\n self.add_row(row_id)\n\n # Compute optimal tree.\n assert len(self._added_rows) == num_rows\n edges, edge_logits = self.estimate_tree()\n if self._config['learning_estimate_tree']:\n self.set_edges(edges)\n\n self._tree.gc()\n\n return {\n 'config': self._config,\n 'tree': self._tree,\n 'edge_logits': edge_logits,\n }", "def generate(self, approx_n: int) -> Tuple[np.ndarray, float]:\n # number of samples per tree\n n_per_tree = approx_n // self._rf.n_estimators\n n = self._rf.n_estimators * n_per_tree # actual number of samples\n\n # default values (some features won't be set by the below algorithm)\n stds = np.sqrt(self._scaler.var_)\n X = np.random.normal(size=(n, self._dim)) * stds + self._scaler.mean_\n\n # generate n_per_tree samples from each tree\n for i_tree, estimator in enumerate(self._rf.estimators_):\n tree = estimator.tree_\n for i in range(n_per_tree):\n row_index = i_tree * n_per_tree + i\n node_index = 0\n right_bound = np.ones(self._dim) * np.inf\n left_bound = -right_bound\n\n # randomly pick one path in the tree\n while node_index != TREE_LEAF and \\\n tree.children_left[node_index] != tree.children_right[node_index]:\n threshold = tree.threshold[node_index]\n feature_i = tree.feature[node_index]\n\n # probability of branching left or right\n left_prob = self._counting_trees[i_tree].left_probability(node_index)\n\n # we pick a value close to the threshold...\n shift = 0.05 * np.abs(np.random.normal()) * stds[feature_i]\n if random.random() <= left_prob:\n value = threshold - shift\n else:\n value = threshold + shift\n # ... but still within the known bounds\n value = min(right_bound[feature_i], max(left_bound[feature_i], value))\n # alternatively, we could keep the value already set, but I believe\n # the chosen method restricts the value to be even closer to the\n # decision boundary\n X[row_index, feature_i] = value\n\n # branching\n if value <= threshold:\n node_index = tree.children_left[node_index]\n right_bound[feature_i] = min(right_bound[feature_i], threshold)\n else:\n node_index = tree.children_right[node_index]\n left_bound[feature_i] = max(left_bound[feature_i], threshold)\n\n return X, self._total_samples / X.shape[0]", "def __init__(self, X_init: np.ndarray, Y_init: np.ndarray, num_trees: int = 30,\n do_bootstrapping: bool = True, n_points_per_tree: int = 0, seed: int = None) -> None:\n super().__init__()\n\n # Set random number generator for the random forest\n if seed is None:\n seed = np.random.randint(10000)\n self.reg_rng = reg.default_random_engine(seed)\n\n self.n_points_per_tree = n_points_per_tree\n\n self.rf = reg.binary_rss_forest()\n self.rf.options.num_trees = num_trees\n\n self.rf.options.do_bootstrapping = do_bootstrapping\n\n self.rf.options.num_data_points_per_tree = n_points_per_tree\n\n self._X = X_init\n self._Y = Y_init\n\n if self.n_points_per_tree == 0:\n self.rf.options.num_data_points_per_tree = X_init.shape[0]\n\n data = reg.default_data_container(self._X.shape[1])\n\n for row_X, row_y in zip(X_init, Y_init):\n data.add_data_point(row_X, row_y)\n\n self.rf.fit(data, self.reg_rng)", "def random_forest(path_m1a, path_non_m1a, repetitions, splits, trees, outfile):\n\n # Path to the output file comprised of a 1:1 ratio of m1A and non-m1A\n m1a_list = fill_list(path_m1a)\n non_m1a_list = fill_list(path_non_m1a)\n\n predictor_number = []\n for predic in predictors_in_use:\n predictor_number.append(predic)\n\n predictor_string = []\n for j in range(len(predictors_in_use)):\n if predictors_in_use[j] != 'pre_base':\n predictor_string.append(predictors_in_use[j])\n if pre_base:\n predictor_string.extend(['A', 'C', 'G', 'T'])\n predictor_number.extend(['A', 'C', 'G', 'T'])\n mean_feature_importance = [0] * (len(predictor_number) - 1)\n else:\n mean_feature_importance = [0] * len(predictor_number)\n\n # List for mean scores\n mean_sensitivity, mean_specificity, mean_ppv, mean_npv, mean_roc_auc, mean_mcc = [], [], [], [], [], []\n\n outfile.write('AUC' + '\\t' + 'Sensitivity' + '\\t' + 'Specificity' + '\\t' + 'PPV' + '\\t' + 'NPV' + '\\t' +\n 'MCC' + '\\t')\n\n predictors_in_use.append('mod_type')\n\n for j in range(repetitions):\n random.shuffle(m1a_list)\n random.shuffle(non_m1a_list)\n\n # Write equal numbers of m1As and non-m1As into a file\n temp_list = []\n for i in range(len(m1a_list)):\n temp_list.append(m1a_list[i].strip().split())\n temp_list.append(non_m1a_list[i].strip().split())\n\n # Build data pandas frame using all columns from the input file\n df = pd.DataFrame.from_records(temp_list, columns=predictor_features)\n # Remove columns that are not used\n for column in df.columns:\n if column not in predictors_in_use:\n df.drop(column, 1, inplace=True)\n\n # Change the modification type to numerical value\n df['mod_type'] = df['mod_type'].map({temp_list[0][-1]: 1, temp_list[1][-1]: 0})\n\n # Get categorical values (pre_base). This function creates 4 more columns in the pandas data frame (A, C, G, T).\n # Column 'pre_base' will be removed\n if pre_base:\n one_hot = pd.get_dummies(df['pre_base'])\n df.drop('pre_base', 1, inplace=True)\n df = df.join(one_hot)\n\n df_clean = df.dropna()\n df_clean.describe()\n\n # Use all values except for 'mod_type' as predictors\n predictors = df_clean[predictor_string]\n predictors = predictors.as_matrix()\n\n targets = df_clean.mod_type\n\n skf = StratifiedKFold(n_splits=splits, shuffle=True, random_state=None)\n forest = RandomForestClassifier(n_estimators=trees, criterion='gini', max_depth=None, max_features='sqrt',\n n_jobs=-1, warm_start=True, oob_score=True, random_state=None)\n\n splits_mean_roc, splits_sensitivity, splits_specificity, splits_ppv, splits_npv, splits_mcc = 0, 0, 0, 0, 0, 0\n\n\tif pre_base:\n temp_feature_importance = [0] * (len(predictor_number) - 1)\n else:\n temp_feature_importance = [0] * len(predictor_number)\n\t\n\t# Random forest training + testing\n for train, test in skf.split(predictors, targets):\n x_train, x_test = predictors[train], predictors[test]\n y_train, y_test = targets[train], targets[test]\n\n forest.fit(x_train, y_train)\n test_prediction = forest.predict(x_test)\n\n false_pos, true_pos, _ = roc_curve(y_test, test_prediction)\n roc_auc = auc(false_pos, true_pos)\n splits_mean_roc = splits_mean_roc + roc_auc * 100\n for k in range(len(forest.feature_importances_)):\n temp_feature_importance[k] = temp_feature_importance[k] + forest.feature_importances_[k]\n\n false_pos, true_pos, _ = roc_curve(y_test, test_prediction)\n\n # Build confusion matrix and calculate relevant values for statistical analysis\n cm = pd.crosstab(y_test, test_prediction, rownames=['Actual Class'], colnames=['Predicted Class'])\n TN = cm[0][0]\n FP = cm[0][1]\n FN = cm[1][0]\n TP = cm[1][1]\n sensitivity = (TP / (TP + FN)) * 100\n specificity = (TN / (FP + TN)) * 100\n ppv = (TP / (TP + FP)) * 100\n npv = (TN / (TN + FN)) * 100\n mcc = ((TP * TN - FP * FN) / (sqrt((TP + FP)*(TP + FN)*(TN + FP)*(TN + FN)))) * 100\n\n splits_sensitivity = splits_sensitivity + sensitivity\n splits_specificity = splits_specificity + specificity\n splits_ppv = splits_ppv + ppv\n splits_npv = splits_npv + npv\n splits_mcc = splits_mcc + mcc\n\n # Calculate the averages of n splits\n mean_sensitivity.append(splits_sensitivity / skf.n_splits)\n mean_specificity.append(splits_specificity / skf.n_splits)\n mean_ppv.append(splits_ppv / skf.n_splits)\n mean_npv.append(splits_npv / skf.n_splits)\n mean_mcc.append(splits_mcc / skf.n_splits)\n mean_roc_auc.append(splits_mean_roc / skf.n_splits)\n for l in range(len(temp_feature_importance)):\n mean_feature_importance[l] = mean_feature_importance[l] + temp_feature_importance[l] / skf.n_splits\n\n # Calculate the overall averages of x repetitions\n print('Sensitivity: ', sum(mean_sensitivity) / repetitions)\n print('specificity: ', sum(mean_specificity) / repetitions)\n print('Positive predicted value (PPV): ', sum(mean_ppv) / repetitions)\n print('Negative predicted value (NPV): ', sum(mean_npv) / repetitions)\n print('MCC: ', sum(mean_mcc) / repetitions)\n print('AUC: ', sum(mean_roc_auc) / repetitions)\n\n outfile.write(str((sum(mean_sensitivity) / repetitions)) + '\\t' + str((sum(mean_specificity) / repetitions)) +\n '\\t' + str((sum(mean_ppv) / repetitions)) + '\\t' + str((sum(mean_npv) / repetitions)) + '\\t' +\n str((sum(mean_mcc) / repetitions)) + '\\t' + str((sum(mean_roc_auc) / repetitions)) + '\\t')\n for j in range(len(mean_feature_importance)):\n outfile.write(str(mean_feature_importance[j] / repetitions) + '\\t')\n outfile.write('\\n')\n \n\n with open(sys.argv[4], 'wb') as f:\n\tpickle.dump(forest, f)", "def reproduce(self):\n\n def compute_seeds(fitness):\n \"\"\" Computes the number of seeds given a fitness value. \"\"\"\n\n seeds = (fitness-min_fitness) / (max_fitness-min_fitness) * \\\n (self.max_seeds-self.min_seeds) + self.min_seeds\n\n return round(seeds)\n\n # evaluates max and min fitness for current year\n max_fitness = max(tree[0] for tree in self.population)\n min_fitness = min(tree[0] for tree in self.population)\n\n # computes the number of seeds produced per tree\n for tree in self.population:\n tree[1].seeds = int(compute_seeds(tree[0]))", "def _iter_build_most_significant_tree(ktree, stree, node):\n sch = find_significant_children(ktree, node)\n if sch is not None:\n small, big = sch\n stree.parents[small] = node\n stree.parents[big] = node\n stree.children[node] = [small, big]\n stree.population[node] = ktree.population[node]\n stree.descriptor[node] = ktree.descriptor[node]\n stree.weights[node] = ktree.weights[node]\n stree.slides[node] = ktree.slides[node]\n _iter_build_most_significant_tree(ktree, stree, small)\n _iter_build_most_significant_tree(ktree, stree, big)", "def decision_tree(original_training_data,call_depth):\n\n ''' Checking the stopping criterion. If yes then it returns the majority class (Muffin or CupCake) '''\n if check_if_stopping_criterion_is_met(original_training_data.values) or call_depth > 10:\n majority = classification(original_training_data)\n return majority\n\n else:\n ''' Each time we split the data and go deeper, we increment the depth of the tree '''\n call_depth += 1\n\n ''' Finding the best attribute, best threshold to split data, best minimum entropy '''\n best_split_index, best_attribute, best_threshold, best_minimum_entropy = find_best_attribute_threshold_entropy(original_training_data)\n original_training_data_values = original_training_data.values\n\n best_split_values = original_training_data_values[:,best_split_index]\n\n less_than_threshold = original_training_data[best_split_values <= best_threshold]\n more_than_threshold = original_training_data[best_split_values > best_threshold]\n\n ''' Initializing a variable called as condition which stores the format of the key for the resulting decision tree dictionary '''\n condition = original_training_data.columns[best_split_index] + \" <= \" + str(best_threshold)\n\n ''' Initializing a dictionary where key is condition and value is a list. This is the basic data structure in which the\n resulting decision tree is stored '''\n sub_tree = {condition: []}\n\n ''' Calling the decision tree recursively '''\n left_tree = decision_tree(less_than_threshold, call_depth)\n right_tree = decision_tree(more_than_threshold, call_depth)\n\n ''' For removing edge cases where on either split, the resulting decision tree gives the same result '''\n if left_tree == right_tree:\n sub_tree = left_tree\n else:\n ''' Appending the smaller trees in the final decision tree '''\n sub_tree[condition].append(left_tree)\n sub_tree[condition].append(right_tree)\n\n return sub_tree", "def _initialize_trees(self):", "def brute_tree(XTRAIN,istopTRAIN,XTEST,istopTEST):\n \n ntrain=XTRAIN.shape[0]\n ntest=XTEST.shape[0]\n \n if np.sum(istopTRAIN)==0:\n return 0,[]\n\n cost0=np.zeros(Ngammas*Nreps)\n cost1=np.zeros(Ngammas*Nreps)\n cost0test=np.zeros(Ngammas*Nreps)\n cost1test=np.zeros(Ngammas*Nreps)\n \n precisionTRAIN=np.zeros(Ngammas*Nreps)\n precisionTEST=np.zeros(Ngammas*Nreps)\n recallTEST=np.zeros(Ngammas*Nreps)\n rate=np.zeros(Ngammas*Nreps)\n \n for iii in range(Ngammas):\n \n gamma=GAMMA[iii]\n \n for jjj in range(Nreps):\n \n \"\"\" train a tree using training data with random splitting \"\"\"\n \n tree_hyperparameters['class_weight']={0:1,1:gamma}\n clf=tree.DecisionTreeClassifier(**tree_hyperparameters)\n clf.fit(XTRAIN,istopTRAIN)\n \n \"\"\"\" record costs and precision on validation data \"\"\"\n \n pTRAIN=clf.predict(XTRAIN)\n precisionTRAIN[iii*Nreps+jjj]=np.divide(sum(1 for i in range(ntrain) if pTRAIN[i] == 1 and istopTRAIN[i]==1),sum(pTRAIN))\n cost0[iii*Nreps+jjj]=sum(1 for i in range(ntrain) if pTRAIN[i] == 1 and istopTRAIN[i]==0)\n cost1[iii*Nreps+jjj]=sum(1 for i in range(ntrain) if pTRAIN[i] == 0 and istopTRAIN[i]==1)\n \n \"\"\" record precision on test data \"\"\"\n \n pTEST=clf.predict(XTEST)\n precisionTEST[iii*Nreps+jjj]=np.divide(sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==1),sum(pTEST))\n recallTEST[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==1)/sum(istopTEST)\n cost0test[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==0)\n cost1test[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 0 and istopTEST[i]==1)\n \n \"\"\" record positive rate on full data \"\"\"\n \n rate[iii*Nreps+jjj]=(sum(pTRAIN)+sum(pTEST))/(ntrain+ntest)\n \n \"\"\" Compute Pareto front for validation data \"\"\"\n \n Pareto = Lower_Convex_Hull(np.concatenate((cost0.reshape(-1,1),cost1.reshape(-1,1)),1))\n \n \"\"\" make some nice plots for whoever is watching \"\"\"\n \n plt.figure(figsize=(10,5))\n plt.subplot(121)\n plt.plot(cost0,cost1,'.')\n plt.plot(cost0[Pareto],cost1[Pareto],'d')\n plt.xlabel('errors on class zero training data')\n plt.ylabel('errors on class one training data')\n\n plt.subplot(122)\n plt.plot(cost0test,cost1test,'.')\n plt.plot(cost0test[Pareto],cost1test[Pareto],'d')\n plt.xlabel('errors on class zero test data')\n plt.ylabel('errors on class one test data')\n plt.show()\n \n plt.figure(figsize=(15,5))\n plt.subplot(131)\n plt.semilogy(precisionTRAIN,rate,'.')\n plt.semilogy(precisionTRAIN[Pareto],rate[Pareto],'d')\n plt.xlabel('precision on training data')\n plt.ylabel('positive rate')\n\n plt.subplot(132) \n plt.semilogy(precisionTEST,rate,'.')\n plt.semilogy(precisionTEST[Pareto],rate[Pareto],'d')\n plt.xlabel('precision on test data')\n plt.ylabel('positive rate')\n\n plt.subplot(133) \n plt.plot(precisionTEST,recallTEST,'.')\n plt.plot(precisionTEST[Pareto],recallTEST[Pareto],'d')\n plt.xlabel('precision on test data')\n plt.ylabel('recall on test data')\n plt.show() \n \n return {'cost0':cost0,'cost1':cost1,'cost0test':cost0test,'cost1test':cost1test,'precisionTRAIN':precisionTRAIN,'precisionTEST':precisionTEST,'recallTEST':recallTEST,'rate':rate,'Pareto':Pareto}" ]
[ "0.7096626", "0.6913265", "0.6533237", "0.6523216", "0.63476014", "0.6344354", "0.61861014", "0.61825013", "0.6182119", "0.61536306", "0.6152425", "0.61442596", "0.608408", "0.60710186", "0.60495037", "0.6023182", "0.60046387", "0.59995925", "0.5979982", "0.59393513", "0.58764184", "0.5813951", "0.58050126", "0.58028626", "0.57565653", "0.57516474", "0.5750774", "0.5747398", "0.574207", "0.5739161" ]
0.75269973
0
classify a given event. Iterates over each tree in the forest and then returns the weighted average of the results
def classify(self, event): results = np.zeros(self.ntrees, dtype=float) for i,dt in enumerate(self.dTrees): results[i] = self.treeWeights[i]*dt.classify(event) return np.sum(results)*(1.0/np.sum(self.treeWeights))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _classify(tree, x):\n # YOUR CODE HERE\n # begin answer\n feature_name=list(tree.keys())[0] #first element\n secondDict=tree[feature_name] \n key=x.loc[feature_name] #extract value from x\n for key_val in secondDict:\n feature_val=key_val[0]\n valueOfKey=secondDict[(feature_val, key>=feature_val)]\n if isinstance(valueOfKey,dict):\n label=_classify(valueOfKey,x)\n else:\n label=valueOfKey\n return label\n # end answer", "def classProbs(observation, tree, classes):\n res = classify(observation, tree) #res = results\n total = sum(res.values())\n probs = []\n for c in classes:\n if c in res.keys():\n probs.append(float(res[c])/total)\n else:\n probs.append(0)\n return probs", "def classify(observation,tree):\n if tree.results!=None:\n return tree.results\n else:\n v=observation[tree.col]\n branch=None\n if isinstance(v, int) or isinstance(v, float):\n if v>=tree.value:\n branch=tree.tb\n else: \n branch=tree.fb\n else:\n if v==tree.value: \n branch=tree.tb\n \n else: \n branch=tree.fb\n return classify(observation,branch)", "def predict(tree, dataSet):\n\n\tcount = 0 #used for tracking how many times we've correctly classified our data\n\tfor index in range(len(dataSet)):\n\t\tdataPoint = dataSet[index]\n\t\tprint \"Current dataPoint: \", dataPoint.retrieve('id').getValue()\n\t\tnode = 0\n\t\tfor i in tree.fields[tree.nType].keys():\n\t\t\tif NodeType.ROOT == tree.getNodeType(i):\n\t\t\t\tnode = i #basically an index\n\t\t\t\tprint \"root node: \", node\n\t\t\t\tbreak\n\t\t\t#keep going down the tree until no children exist, then get output classification\n\n\t\tprint \"node type\", tree.getNodeType(node)\n\n\t\twhile tree.getNodeType(node) != NodeType.LEAF:\n\t\t\tsplitVal = tree.getSplitValue(node)\n\t\t\tprint \"tree split value: \", splitVal\n\t\t\tsplitAttribute = tree.getSplitAtribute(node)\n\t\t\tprint \"tree split attribute: \", splitAttribute\n\t\t\tval = dataPoint.retrieve(splitAttribute).getValue()\n\t\t\tif val == None:\t\t\n\t\t\t\tval = np.median(retrieveDataFromColumn(dataSet, splitAttribute))\n\n\t\t\tprint \"data point value for split attribute: \", val\n\t\t\tif FeatureType.CONTINUOUS == tree.getSplitType(node): \n\t\t\t\tif val >= splitVal:\n\t\t\t\t\tnode = tree.getChild0(node)\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\t\t\tprint \"greater than\", \"going to next node\", node\n\t\t\t\telse:\n\t\t\t\t\tnode = tree.getChild1(node)\n\t\t\t\t\tprint \"lesser than\", \"going to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\telif FeatureType.DISCRETE == tree.getSplitType(node):\n\t\t\t\tif val != splitVal:\n\t\t\t\t\tnode = tree.getChild0(node)\n\t\t\t\t\tprint \"not equal\", \" going to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\t\telse:\n\t\t\t\t\tnode = tree.getChild1(node)\n\t\t\t\t\tprint \"equal\", \"goint to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\tleafClass = tree.getMajorityClassification(node)\n\t\tprint \"leaf classification: \", leafClass\n\t\tleafAttribute = tree.getSplitAtribute(node)\n\t\tprint \"leaf attribute: \", leafAttribute\n\t\t\n\t\t# Need to fill the last column (which is the same column as leafAttribute) with the \n\t\t# value of the leaf (i.e. classify as winner or not)\n\t\tdataPoint.retrieve(leafAttribute).addValue(leafClass)\n\t\tprint \"prediction is: \", dataPoint.retrieve(leafAttribute).getValue()\n\n\tcreateFileCSV(dataSet)\n\treturn dataSet", "def predict(self,x):\n preds = [tree.predict(x) for tree in self.forest]\n if self.classify:\n cls_counts = [0] * self.param['numClasses']\n for p in preds:\n cls_counts[p] += 1\n return argmax(cls_counts)\n else:\n return sum(preds) / (len(preds)*1.0)", "def classify(series, tree):\n feature = tree[0]\n subtree = tree[1]\n\n answer = series[feature]\n response = subtree[answer]\n\n if type(response) != list: #base case\n return subtree[answer]\n else:\n return classify(series, response) #recursive case", "def classifyAll(self,tree,data):\n\n\t\tresults = []\n\t\tfor i in range(len(data)):\n\t\t\tresults.append(self.classify(tree,data[i]))\n\t\treturn results", "def classify(self, tree, datapoint):\n\n\t\tif type(tree) == type(\"string\"):\n\t\t\treturn tree\n\t\telse:\n\t\t\ta = list(tree.keys())[0]\n\t\t\tfor i in range(len(self.featureNames)):\n\t\t\t\tif self.featureNames[i]==a:\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\ttry:\n\t\t\t\tt = tree[a][datapoint[i]]\n\t\t\t\treturn self.classify(t,datapoint)\n\t\t\texcept:\n\t\t\t\treturn None", "def classify(tree, input):\n\n #if this is a leaf node, return its value\n if tree in [True, False]:\n return tree\n\n #otherwise this tree consists of an attribute to split on\n #and a dict whose keys are values of that attribute\n #and whose values are subtrees to consider next\n attribute, subtree_dict = tree\n\n subtree_key = input.get(attribute) #None if input is missing\n\n if subtree_key not in subtree_dict: #if no subtree for key, use None\n subtree_key = None\n\n subtree = subtree_dict[subtree_key] # choose the appropriate subtree\n return classify(subtree, input) # and use it to classify the input", "def classify(observations, tree, dataMissing=False):\n\n def classifyWithoutMissingData(observations, tree):\n if tree.results != None: # leaf\n return tree.results\n else:\n v = observations[tree.col]\n branch = None\n #if isinstance(v, int) or isinstance(v, float):\n #if v >= tree.value: branch = tree.trueBranch\n #else: branch = tree.falseBranch\n #else:\n if v == tree.value: branch = tree.trueBranch\n else: branch = tree.falseBranch\n return classifyWithoutMissingData(observations, branch)\n\n\n def classifyWithMissingData(observations, tree):\n if tree.results != None: # leaf\n return tree.results\n else:\n v = observations[tree.col]\n if v == None:\n tr = classifyWithMissingData(observations, tree.trueBranch)\n fr = classifyWithMissingData(observations, tree.falseBranch)\n tcount = sum(tr.values())\n fcount = sum(fr.values())\n tw = float(tcount)/(tcount + fcount)\n fw = float(fcount)/(tcount + fcount)\n result = collections.defaultdict(int) # Problem description: http://blog.ludovf.net/python-collections-defaultdict/\n for k, v in tr.items(): result[k] += v*tw\n for k, v in fr.items(): result[k] += v*fw\n return dict(result)\n else:\n branch = None\n #if isinstance(v, int) or isinstance(v, float):\n # if v >= tree.value: branch = tree.trueBranch\n # else: branch = tree.falseBranch\n #else:\n if v == tree.value: branch = tree.trueBranch\n else: branch = tree.falseBranch\n return classifyWithMissingData(observations, branch)\n\n # function body\n if dataMissing:\n return classifyWithMissingData(observations, tree)\n else:\n return classifyWithoutMissingData(observations, tree)", "def traverse_tree(self, example):\n current_node = self.root\n while not current_node.is_leaf:\n feature_value = example[self.get_feature_index(current_node.feature)]\n current_node = current_node.children[feature_value]\n\n return current_node.pred", "def decision_tree_prediction(example, root, attributes):\n # If reached a leaf node, return the label\n if isinstance(root, str):\n return root\n\n # Attribute that was split on\n attribute = root.attribute\n # Column of the attribute that was split on\n i = get_index(attribute, attributes)\n testValue = example[i]\n # Check every child to see what path the example must take in the decision tree\n for child in root.children:\n if isinstance(child.branch, int):\n if int(testValue) <= child.branch:\n return decision_tree_prediction(example, child.nextTree, attributes)\n elif isinstance(child.branch, float):\n if int(testValue) > child.branch:\n return decision_tree_prediction(example, child.nextTree, attributes)\n# -----------------------------------------------Naive Bayes-------------------------------------------------\n # Naive bayes\n elif child.branch == \"Naive\":\n yes_probability = child.histogram[0]\n no_probability = child.histogram[2]\n i = 0\n for feature in example:\n if feature == \"yes\" or feature == \"no\":\n continue\n if i == 0 or i == 2 or i == 4 or i == 10 or i == 11 or i == 12:\n j = 0\n # Its a float so check\n for key in child.histogram[1][attribute_index[i]]:\n if float(feature) <= float(key) and j == 0:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][key]\n elif j == 1:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][key]\n j += 1\n for key in child.histogram[1][attribute_index[i]]:\n if float(feature) <= float(key) and j == 0:\n no_probability = no_probability * child.histogram[1][attribute_index[i]][key]\n elif j == 1:\n no_probability = no_probability * child.histogram[1][attribute_index[i]][key]\n j += 1\n else:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][feature]\n no_probability = no_probability * child.histogram[3][attribute_index[i]][feature]\n i += 1\n if yes_probability > no_probability:\n return \"yes\"\n elif no_probability >= yes_probability:\n return \"no\"\n# -----------------------------------------------End Naive Bayes-------------------------------------------------\n else:\n if child.branch == testValue:\n return decision_tree_prediction(example, child.nextTree, attributes)", "def classify(self, features):\n node = self.tree\n answer = node.right_label + node.left_label\n while len(answer)>1:\n if node.model.classify(features)==+1:\n answer=node.left_label\n node=node.left\n else:\n answer=node.right_label\n node=node.right \n return answer[0]", "def _predict(self, treenode, X):\n if treenode.is_leaf:\n return treenode.leaf_score\n elif pd.isnull(X[1][treenode.feature]):\n if treenode.nan_direction == 0:\n return self._predict(treenode.left_child, X)\n else:\n return self._predict(treenode.right_child, X)\n elif X[1][treenode.feature] < treenode.threshold:\n return self._predict(treenode.left_child, X)\n else:\n return self._predict(treenode.right_child, X)", "def classify(self, document, tree):\n if type(tree) is ClassTreeNode:\n return tree.c\n else:\n if tree.word in document.bag_of_words:\n return self.classify(document, tree.children[0])\n else:\n return self.classify(document, tree.children[1])", "def _classify(self, sample):\n # This function is used so that we can reduce each row with respect \n # to the sample.\n def calc_dist(vector):\n return distance_utils.euclidean(vector, sample)\n\n distances = self.training_set.reduce_rows(calc_dist)\n \n votes = self._tally_votes(self.training_set.get_labels(), distances)\n \n return collection_utils.get_key_with_highest_value(votes)", "def predStat(self,x,f):\n return f([tree.predict(x) for tree in self.forest])", "def classify(self, instance):\n numerator = 0\n denominator = 0\n for training_instance in self.training_data:\n h_value = self._h_function(instance, training_instance[0])\n numerator = numerator + h_value*training_instance[1]\n denominator = denominator + h_value\n return numerator/denominator", "def find_shrunken_averages(tuple_input):\n #The categorical level.\n level = tuple_input[0]\n # The labels list (y varaibale) from a map function.\n labels = tuple_input[1]\n # The total number of level occurances in the frame (ie count)\n level_n = len(labels)\n level_mean = sum(labels) / level_n\n\n # Determine if there enough occurances of a level. If NOT return overall_mean\n if level_n >= threshold:\n return(level,level_mean)\n else:\n return(level, ((1 - lambda_) * level_mean) +\\\n (lambda_ * overall_mean) )", "def classify(self, ep):\n # just here for defining the interface; work is done in subclasses\n pass", "def classify(cls, i):\r\n sums = [0,0]\r\n sums[int(WekaClassifier_0.classify(i))] += 1.2134644010075073\r\n sums[int(WekaClassifier_1.classify(i))] += 0.57177685574344\r\n sums[int(WekaClassifier_2.classify(i))] += 0.40154496884580815\r\n sums[int(WekaClassifier_3.classify(i))] += 0.35999934750119333\r\n sums[int(WekaClassifier_4.classify(i))] += 0.36937329276984643\r\n sums[int(WekaClassifier_5.classify(i))] += 0.16351990613377496\r\n sums[int(WekaClassifier_6.classify(i))] += 0.1396078832952814\r\n sums[int(WekaClassifier_7.classify(i))] += 0.15882943193304253\r\n sums[int(WekaClassifier_8.classify(i))] += 0.1284505298097081\r\n sums[int(WekaClassifier_9.classify(i))] += 0.09903161346969916\r\n sums[int(WekaClassifier_10.classify(i))] += 0.19672733155497407\r\n sums[int(WekaClassifier_11.classify(i))] += 0.17672847093616786\r\n sums[int(WekaClassifier_12.classify(i))] += 0.18729151620386228\r\n sums[int(WekaClassifier_13.classify(i))] += 0.24810462685136855\r\n sums[int(WekaClassifier_14.classify(i))] += 0.23706555932983922\r\n sums[int(WekaClassifier_15.classify(i))] += 0.14276017880034322\r\n sums[int(WekaClassifier_16.classify(i))] += 0.2655207144416779\r\n sums[int(WekaClassifier_17.classify(i))] += 0.24759035974335297\r\n sums[int(WekaClassifier_18.classify(i))] += 0.14255881855351965\r\n sums[int(WekaClassifier_19.classify(i))] += 0.1181101393342422 \r\n return float(sums[0] - sums[1])", "def predict_from_all_children ( self, node: TreeSplits ):\n # Collect the children\n children_values = BaseTree.collect_children ( node )\n # Aggregate the leaf values\n return self.agg_function ( children_values )\n # End predict_from_all_children", "def _predict(self, inputs):\n node = self.tree_\n while node.left:\n if inputs[node.feature_index] < node.split:\n node = node.left\n else:\n node = node.right\n return node.predicted_class", "def main():\n\n ''' Reading the training data file '''\n original_training_data = pd.read_csv(\"DT_Data_CakeVsMuffin_v012_TRAIN.csv\")\n\n ''' Storing the final decision tree '''\n final_tree = decision_tree(original_training_data,0)\n\n ''' Printing the final decision tree '''\n print(\"This is the resulting decision tree: \\n\")\n print(final_tree)\n\n ''' Iterating through the dictionary by using the key values '''\n for key in final_tree.keys():\n ''' Parent = Flour <= 5.1636'''\n parent = key\n ''' left_child = [{'Oils <= 3.1265': [{'Flour <= 2.7291': [{'Proteins <= 2.6527': ['Muffin', 'CupCake']}, 'Muffin']}, 'CupCake']}'''\n left_child = final_tree[parent][0]\n ''' right_child = {'Oils <= 7.7793': ['Muffin', {'Flour <= 8.2225': ['CupCake', 'Muffin']}]}]'''\n right_child = final_tree[parent][1]\n\n ''' Writing a file which generates code for classification '''\n file = open('HW06_Parchand_Nihal_Classifier.py','w+')\n file.write(\"'''Importing libraries''' \"\n \"\\n\\nimport pandas as pd \\n\\ndef main():\"\n \"\\n\\tdata_df = pd.read_csv('DT_Data_CakeVsMuffin_v012_TEST.csv')\"\n \"\\n\\tresult = []\"\n \"\\n\\tfor row in range(0,len(data_df)):\"\n \"\\n\\t\\tFlour = data_df.loc[row][0]\"\n \"\\n\\t\\tSugar = data_df.loc[row][1]\"\n \"\\n\\t\\tOils = data_df.loc[row][2]\"\n \"\\n\\t\\tProteins = data_df.loc[row][3]\"\n \"\\n\\t\\tif {}:\\n\".format(parent))\n\n ''' Iterating through the left_tree '''\n for key in left_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n\n ''' Iterating through the inner left_tree '''\n for inner_key in left_child[key][0].keys():\n file.write(\"\\t\\t\\t\\tif {}:\\n\".format(inner_key))\n\n for inner_inner_key in ((left_child[key][0])[inner_key])[0]:\n file.write(\"\\t\\t\\t\\t\\tif {}:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\t\\t\\telse:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(1)\\n\")\n\n file.write(\"\\t\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\telse:\\n\")\n\n ''' Iterating through the right_tree '''\n for key in right_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\")\n for inner_key in right_child[key][1].keys():\n file.write(\"\\t\\t\\telif {}:\\n\".format(inner_key))\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\\n\")\n\n ''' Writing the results of classifier to a csv file '''\n file.write(\n \"\\twith open('HW06_Parchand_Nihal_MyClassifications.csv', 'w+') as file2:\\n\"\n \"\\t\\tfor value in result:\\n\"\n \"\\t\\t\\tfile2.write(str(value))\\n\"\n \"\\t\\t\\tfile2.write('\\\\n')\\n\\n\"\n \"main()\")", "def predict_one(tree, sample):\n if tree['leaf']:\n return tree['class']\n\n else:\n if sample[tree['feature']] <= tree['split']:\n return predict_one(tree['left'], sample)\n else:\n return predict_one(tree['right'], sample)", "def build(self):\n # weights to apply to training samples, updated on each\n # iteration of the boosting algo, normalised to 1\n sigWeights = np.ones(self.nSig, dtype=float)\n bkgWeights = np.ones(self.nBkg, dtype=float)\n reweight = 1.0/(np.sum(sigWeights)+np.sum(bkgWeights))\n sigWeights *= reweight\n bkgWeights *= reweight \n\n # Weight of each tree, strong classifers have higher weight\n self.treeWeights = np.zeros(self.ntrees, dtype=float)\n\n for i in xrange(self.ntrees):\n\n # build new tree\n newTree = Tree()\n newTree.load(self.sigData,self.bkgData,weights=(sigWeights,bkgWeights))\n newTree.build()\n self.dTrees.append(newTree) \n\n # evaluate trees\n # keep track of each event\n err = 0.0\n sigWrong = np.zeros(self.nSig)\n bkgWrong = np.zeros(self.nBkg)\n\n for j in range(self.nSig):\n if newTree.classify(np.array((self.sigData[j,])))<0:\n sigWrong[i]=1\n err+=sigWeights[j]\n\n for j in range(self.nBkg):\n if newTree.classify(np.array((self.bkgData[j,])))>0:\n bkgWrong[i]=1\n err+=bkgWeights[j]\n\n alpha = self.beta*math.log((1.0-err)/err)\n print err,alpha\n corFactor = math.exp(-alpha)\n wrongFactor = math.exp(alpha)\n\n if (err<1e-20 or err >= 0.5):\n print \"SOEMTHING WRONG!!\"\n\n self.treeWeights[i] = alpha\n\n # reweight training samples\n for j in range(self.nSig):\n if sigWrong[j]:\n sigWeights[j]*=wrongFactor\n else :\n sigWeights[j]*=corFactor\n\n for j in range(self.nBkg):\n if bkgWrong[j]:\n bkgWeights[j]*=wrongFactor\n else :\n bkgWeights[j]*=corFactor\n\n # normalise weights\n reweight = 1.0/(np.sum(sigWeights)+np.sum(bkgWeights))\n sigWeights *= reweight\n bkgWeights *= reweight", "def classify(data_point, tree):\r\n current = tree\r\n while(current.is_leaf == False): #while we're not at a leaf\r\n q = tree.issue\r\n v = data_point.dat_votes[ord(q) - 97]\r\n if(current is None): pass\r\n current = current.get_classification(v)\r\n #we should now be at a Leaf\r\n if(current is None): print(\"FATAL\")\r\n c =current.get_classification(\"\")\r\n # print(\"classified: \" + str(data_point) + \" as \" + str(c))\r\n return c", "def classify(self, row, node):\n\n # Base case: we've reached a leaf\n if isinstance(node, Leaf):\n return node.predictions\n\n # Decide whether to follow the true-branch or the false-branch.\n # Compare the feature / value stored in the node,\n # to the example we're considering.\n if node.question.match(row):\n return self.classify(row, node.true_branch)\n else:\n return self.classify(row, node.false_branch)", "def average_impurity(self):\n children = tf.squeeze(tf.slice(self.variables.tree, [0, 0], [-1, 1]),\n squeeze_dims=[1])\n is_leaf = tf.equal(LEAF_NODE, children)\n leaves = tf.to_int32(tf.squeeze(tf.where(is_leaf), squeeze_dims=[1]))\n counts = tf.gather(self.variables.node_sums, leaves)\n impurity = self._weighted_gini(counts)\n return tf.reduce_sum(impurity) / tf.reduce_sum(counts + 1.0)", "def entropy_gain(node,attribute):\n data_subset1 = filter_data(node.data,node.ancestors)\n data_counts = list(Counter(data_subset1['Class']).values())\n base_entropy = entropy(data_counts,base=2)\n num_values = len(data_subset1)\n entropy_sum = 0\n \n for value in [0,1]:\n data_subset2 = filter_data(node.data, node.ancestors + [(attribute,value)])\n subset_counts = list(Counter(data_subset2['Class']).values())\n entropy_sum += (len(data_subset2)/num_values) * entropy(subset_counts,base=2)\n \n return base_entropy - entropy_sum" ]
[ "0.6027639", "0.5959959", "0.5897434", "0.5878511", "0.58665943", "0.58361167", "0.5765075", "0.5708641", "0.5605437", "0.55976415", "0.5585661", "0.55516666", "0.5534588", "0.55194116", "0.5513875", "0.5391394", "0.5380517", "0.5374991", "0.5372819", "0.5360519", "0.535744", "0.53571343", "0.5342949", "0.5313004", "0.5307396", "0.52854604", "0.52740604", "0.52416605", "0.5233034", "0.52207005" ]
0.8286223
0
Node frontiers generator using breadthfirst search.
def bfs_nodes_generator(graph, source, reverse=...): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def breadthfirst(self):\n import os\n cwd = os.getcwd()\n os.chdir('/Users/raj/Documents/algorithms_in_python/linked_lists/')\n from linked_collections import LinkedQueue\n os.chdir(cwd) # change to cwd\n if not self.is_empty():\n lq = LinkedQueue()\n lq.enqueue(self.root())\n while not lq.is_empty():\n p = lq.dequeue()\n yield p\n for c in self.children(p):\n lq.enqueue(c)", "def _breadthfirst(self,root, action=lambda nodes: print(nodes)):\n nodes = []\n breadth = Queue()\n visited = []\n\n breadth.enqueue(root)\n visited.append(root)\n\n while breadth.front:\n front = breadth.dequeue()\n nodes.append(front.value)\n\n for child in self._adjacency_list.get(front.value):\n if not child.start_vertex in visited:\n visited.append(child.start_vertex)\n breadth.enqueue(child.start_vertex) \n\n return nodes", "def breadthfirst(self):\n if not self.is_empty():\n fringe = LinkedQueue() # known positions not yet yielded\n fringe.enqueue(self.root()) # starting with the root\n while not fringe.is_empty():\n p = fringe.dequeue() # remove from front of the queue\n yield p # report this position\n for c in self.children(p):\n fringe.enqueue(c) # add children to back of queue", "def breadth_first_search(self):\r\n queue = [self.root]\r\n while queue:\r\n node = queue.pop()\r\n yield node\r\n queue.extend(node.children)", "def breadth_first(self):\n nodes_to_vist = []\n curr = self._root\n nodes_to_vist.append(curr)\n while len(nodes_to_vist):\n curr = nodes_to_vist[0]\n if curr._lkid:\n nodes_to_vist.append(curr._lkid)\n if curr._rkid:\n nodes_to_vist.append(curr._rkid)\n yield curr._data\n nodes_to_vist.remove(curr)", "def breadth_first(self):\n q = Queue()\n q.enqueue(self)\n while q.size() > 0:\n node = q.dequeue()\n yield node.val\n if node.left:\n q.enqueue(node.left)\n if node.right:\n q.enqueue(node.right)", "def breadth_first_traverse(self) -> Generator:\n assist_queue = deque()\n assist_queue.append(self.root_node)\n while assist_queue:\n current_node = assist_queue.popleft()\n yield current_node\n if current_node.children:\n for child in current_node.children:\n assist_queue.append(child)", "def breadthFirstSearch(problem):\n #import pdb;pdb.set_trace()\n frontier = util.Queue()\n start_node = problem.getStartState()\n if problem.isGoalState(start_node):\n return ['Stop']\n frontier.push((start_node,()))\n explored = set()\n while True:\n if frontier.isEmpty():\n return []\n node = frontier.pop()\n\n explored.add(node[0])\n # exploratory code for SUPER-optimal solution:\n # by saving the path in explored, we assure that we explore the same cell even if\n # two different actions go through it:\n #explored.add(node)\n for successor in problem.getSuccessors(node[0]):\n nextState, action, cost = successor\n if nextState in explored or nextState in [f[0] for f in frontier.list]:\n continue\n actions = node[1]\n next_actions = actions + (action,)\n new_node = (nextState, next_actions)\n if problem.isGoalState(new_node[0]):\n return new_node[1]\n frontier.push(new_node)\n #print frontier.list\n return []", "def breadth_first(self):\n import queue\n keeper = queue.Queue()\n keeper.enqueue(self)\n while(keeper.size() != 0):\n temp = keeper.dequeue()\n if temp.val is not None:\n yield temp.val\n if temp.left is not None:\n keeper.enqueue(temp.left)\n if temp.right is not None:\n keeper.enqueue(temp.right)", "def breadth_first_search(root_node):\n if root_node.goal_test():\n return root_node\n\n frontier = [root_node]\n explored = []\n\n while frontier:\n node = frontier.pop(0)\n explored.append(node)\n\n for successor in node.generate_successors():\n if not successor:\n continue\n if not (successor.is_in(frontier) and successor.is_in(explored)):\n if successor.goal_test():\n return successor\n frontier.append(successor)\n return None # No Solution", "def breadth_first_traversal(self, start):\n visited = []\n visited.append(start)\n start_visited = visited\n while True:\n temp = []\n for node_ in start_visited:\n for i in self.neighbors(node_):\n if i not in visited:\n visited.append(i)\n temp.append(i)\n start_visited = temp\n if not temp:\n break\n return visited", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state\n self.parent = parent\n self.action = action\n self.pathCost = pathCost\n\n def solution(self):\n path = list()\n tempNode = self\n while tempNode.state != problem.getStartState():\n path.insert(0, tempNode.action)\n tempNode = tempNode.parent\n return path\n\n\n\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost\n child = Node(successor, parent, action, pathCost)\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0)\n if problem.isGoalState(initialNode.state):\n return initialNode.solution()\n\n frontier = util.Queue() #bfs uses a queue\n frontier.push(initialNode)\n explored = set()\n\n while not frontier.isEmpty() :\n nextNode = frontier.pop() #extract from the start of the queue\n explored.add(nextNode.state)\n for successor, action, stepCost in problem.getSuccessors(nextNode.state):\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored and child not in frontier.list:\n if problem.isGoalState(child.state):\n return child.solution()\n frontier.push(child)\n return []\n util.raiseNotDefined()", "def breadth_first(self, start_node):\n \n # try:\n if start_node not in self._adjacency_list:\n raise KeyError('Nodes are not in the graph')\n\n q = Queue()\n q.enqueue(start_node)\n visited_nodes = {}\n visited_nodes[start_node] = True\n output = []\n\n while len(q):\n cur = q.dequeue()\n output.append(cur)\n neighbors = self._adjacency_list[cur]\n for n in neighbors:\n if n[0] not in visited_nodes:\n q.enqueue(n[0]) \n visited_nodes[n[0]] = True\n return output\n # except Exception as error:\n # return(f'{error}')", "def bft(self, starting_vertex):\n # Create a q and enqueue starting vertex\n qq = Queue()\n qq.enqueue([starting_vertex])\n # Create a set of traversed vertices\n visited = set()\n # eldest = []\n depth_counter = {} \n starter = 0 \n # visited = []\n # While queue is not empty:\n while qq.size() > 0:\n # dequeue/pop the first vertex\n path = qq.dequeue()\n # if not visited\n # print(visited)\n starter += 1\n if path[-1] not in visited:\n # DO THE THING!!!!!!!\n # print(path[-1])\n depth_counter[starter] = path[-1]\n # mark as visited\n visited.add(path[-1])\n # visited.append(path[-1])\n # enqueue all neightbors\n \n if not self.get_neighbors(path[-1]):\n \n if starting_vertex == path[-1]:\n return -1\n else:\n # print(\"eldest ancestor:\",path[-1])\n depth_counter[starter] = path[-1]\n # print(depth_counter)\n # eldest.append(path[-1])\n else:\n # starter += 1\n for next_vert in self.get_neighbors(path[-1]): \n new_path = list(path)\n new_path.append(next_vert)\n qq.enqueue(new_path)\n\n\n return depth_counter[starter]", "def breadthFirstSearchPaths(problem):\n #import pdb;pdb.set_trace()\n frontier = util.Queue()\n start_node = problem.getStartState()\n if problem.isGoalState(start_node):\n return ['Stop']\n frontier.push((start_node,()))\n explored = set()\n while True:\n if frontier.isEmpty():\n return []\n node = frontier.pop()\n\n #explored.add(node[0])\n # exploratory code for SUPER-optimal solution:\n # by saving the path in explored, we assure that we explore the same cell even if\n # two different actions go through it:\n explored.add(node)\n for successor in problem.getSuccessors(node[0]):\n nextState, action, cost = successor\n if nextState in explored or nextState in [f[0] for f in frontier.list]:\n continue\n actions = node[1]\n next_actions = actions + (action,)\n new_node = (nextState, next_actions)\n if problem.isGoalState(new_node[0]):\n return new_node[1]\n frontier.push(new_node)\n #print frontier.list\n return []", "def _bfs_nodes(cls, graph, source, size, **kwargs):\n if size < 1:\n return iter(())\n\n return itertools.chain(\n (source,),\n itertools.islice((v for u, v in nx.bfs_edges(graph, source)), size-1)\n )", "def bfs_edges_generator(graph, source, reverse=...):\n ...", "def breadthFirstSearch(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n node = problem.getStartState()\r\n if (problem.isGoalState(node)):\r\n return [] # no need to make any moves of the start state is goal\r\n start = (node, 'NoDirection',0)\r\n\r\n frontier_queue = Queue() # queue for frontier\r\n frontier_queue.push(start) # frontier consists of only the start state\r\n\r\n explored_nodes = set()\r\n explored_track = {start:None} # keep a track of parent, parent of root node is None\r\n\r\n while not frontier_queue.isEmpty():\r\n state = frontier_queue.pop() # pop the top element from the queue \r\n explored_nodes.add(state)\r\n\r\n if problem.isGoalState(state[0]):\r\n return get_track(explored_track, state)\r\n\r\n neighbors_state = problem.getSuccessors(state[0])\r\n for neighbor in neighbors_state: # neighbor will be something like this ((34, 15), 'South', 1)\r\n if neighbor not in frontier_queue.list and neighbor not in explored_nodes:\r\n frontier_queue.push(neighbor)\r\n explored_track[neighbor] = state\r\n\r\n\r\n def get_track(explored_track, state):\r\n from game import Directions\r\n track_history = [state]\r\n track_history_direction = []\r\n leaf = state\r\n while (explored_track[leaf]) != start:\r\n track_history.append(explored_track[leaf])\r\n leaf = explored_track[leaf]\r\n\r\n for j in range (len(track_history),-1,-1):\r\n this_step = track_history[j-1]\r\n this_step = this_step[1]\r\n track_history_direction.append(this_step)\r\n return track_history_direction[:-1]", "def breadth_first_iterate(execution_graph, starting_node, direction,\n through_flows=True, through_retries=True,\n through_tasks=True):\n initial_nodes_iter, connected_to_functors = _extract_connectors(\n execution_graph, starting_node, direction,\n through_flows=through_flows, through_retries=through_retries,\n through_tasks=through_tasks)\n q = collections.deque(initial_nodes_iter)\n while q:\n node = q.popleft()\n node_attrs = execution_graph.nodes[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n q.extend(connected_to_functor(node))", "def breadth_first_list(graph, current=\"a\"):\n queue = []\n queue.append(current)\n while queue:\n current = queue.pop(0)\n print(current)\n for node in graph.get(current):\n queue.append(node)", "def bfs_iterative(graph,start):\n\tvisited = set()\n\twatched = set()\n\tnodes_queue = [start] # List that helps as queue\n\twatched.add(start)\n\t\n\twhile nodes_queue:\n\t\tcurrent_node = nodes_queue.pop(0)\n\n\t\tprint(\"visiting\",current_node)\n\t\tvisited.add(current_node)\n\t\t\n\t\tfor adjacent_node in graph[current_node]:\n\t\t\tif (adjacent_node not in watched) and (adjacent_node not in visited):\n\t\t\t\tnodes_queue.append(adjacent_node)\n\t\t\t\t#path.add(adjacent_node)", "def bfs(self):\r\n Q = [self.head()]\r\n visited = []\r\n while Q != []:\r\n cur = Q[0]\r\n visited.append(cur)\r\n Q = Q[1:]\r\n Q.extend([ch for ch in self.get_deps(cur.get_field('index'))])\r\n for x in reversed(visited):\r\n yield x", "def breadthFirstSearch(problem):\n\n frontier = util.Queue()\n # print 'Create frontier'\n initial_node = node(problem.getStartState(), 0, [], 0)#(state,depth,path_actions,path_cost)\n frontier.push(initial_node)\n # print 'Push ',repr(initial_node.state)\n frontierSet = set([initial_node.state])\n explored = set() #initialize the explored set to be empty\n\n while True:\n if frontier.isEmpty() == True: raise Exception, \"The frontier was emptied\"#if the frontier is empty then return failure\n currNode = frontier.pop()#HERE1\n frontierSet.remove(currNode.state)\n # print 'Remove',repr(currNode.state)\n # print 'State: ' + repr(currNode.state) + '. Depth: ' + repr(currNode.depth) + '. Path Cost: ' + repr(currNode.path_cost) + '. Path Actions: ' + repr(currNode.path_actions) + '.\\n'\n if problem.isGoalState(currNode.state) == True:\n print 'Goal reached!'\n return currNode.path_actions\n explored.add(currNode.state)\n for succ in problem.getSuccessors(currNode.state):\n # print 'Succ: ',repr(succ[0])\n succNode = node(succ[0], currNode.depth + 1, currNode.path_actions + [succ[1],], currNode.path_cost + succ[2])\n if (succNode.state not in explored) and (succNode.state not in frontierSet):\n \"\"\"Aca si hay que verificar si es que ya esta en la frontera porque es formato FIFO. Entonces los nodos que estan en la lista\n necesariamente van a ser verificados antes de que se vuelva a insertar otro.\n \"\"\"\n frontier.push(succNode)\n # print 'Push ',repr(succNode.state)\n frontierSet.add(succNode.state)", "def breadth_first_search(initial_state):\n list_of_processed_nodes = []\n num_unprocessed_nodes = 0#\n num_unconsidered_children = 0#\n\n initial_node = Node(state=initial_state)\n node_deque = collections.deque()\n node_deque.append(initial_node)\n goal_state_found = False\n goal_node = None\n\n while len(node_deque) > 0 and not goal_state_found:\n e = node_deque.popleft()\n #pdb.set_trace()\n if e in list_of_processed_nodes:\n num_unprocessed_nodes += 1\n continue\n else:\n list_of_processed_nodes.append(e)\n\n list_of_children_nodes, num_unconsidered_children = generate_children_nodes(\n curr_node=e, list_of_processed_nodes=list_of_processed_nodes,\n running_count_of_children_dups=num_unconsidered_children#\n )\n \n for child_node in list_of_children_nodes:\n #print 'Node {0} with goal status {1}'.format(child_node.index, child_node.state.snake_ate_food)\n if child_node.state.goal_state_reached():\n #print \"Goal state reached with node index {0}\".format(child_node.index)\n goal_state_found = True\n goal_node = child_node\n break\n else:\n #print \"Adding to deque node index {0}\".format(child_node.index)\n node_deque.append(child_node)\n\n if len(node_deque) == 0 and not goal_state_found:\n print '*'*40\n print 'NO SOLUTION PATH FOUND'\n print '*'*40\n sys.exit(0)\n\n #pdb.set_trace()#\n # Summary & results\n #print '{0} nodes processed!'.format(len(list_of_processed_nodes))\n #print '{0} nodes already visited, skipped!'.format(num_unprocessed_nodes)\n #print '{0} node children skipped!'.format(num_unconsidered_children)\n #os.system('say -v \"Victoria\" \"done\"')\n\n return goal_node, list_of_processed_nodes", "def breadthFirstSearch(problem):\r\n\t\"*** YOUR CODE HERE ***\"\r\n\tfrom game import Directions\r\n\t#i = 0\r\n\tfrontera=util.Queue()\r\n\testadoInicial= problem.getStartState()\r\n\tfrontera.push((estadoInicial, [],0))\r\n\tvisitados=[]\r\n\tvisitados.append(estadoInicial)\r\n\r\n\twhile not(frontera.isEmpty()):\r\n\t\t(estado, camino, costo) =frontera.pop()\r\n\t\tif(problem.isGoalState(estado)):\r\n\t\t\tbreak\r\n\r\n\t\tsucesores=problem.getSuccessors(estado)\r\n\t\tfor sucesor in sucesores:\r\n\t\t\t#i = i+1\r\n\t\t\t#print (i)\r\n\t\t\tif sucesor[0] not in visitados:\r\n\t\t\t\tfrontera.push((sucesor[0], camino + [sucesor[1]], costo + sucesor[2]))\r\n\t\t\t\tvisitados.append(sucesor[0])\r\n\tprint ('Cantidad de nodos en memoria: {}').format(len(frontera.list))\r\n\treturn camino", "def breadth_first(graph,start, end):\n queue = []\n queue.append([start])\n while queue:\n path = queue.pop(0)\n node = path[-1]\n if node == end:\n return path\n for adjacent in graph.get(node, []):\n new_path = list(path)\n new_path.append(adjacent)\n queue.append(new_path)", "def topological_nodes_generator(graph, reverse=...):\n ...", "def breadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n startState = problem.getStartState()\n explored = set()\n Frontier = util.Queue()\n Frontier.push([[startState,None,0]])\n while not Frontier.isEmpty():\n StateTriples = Frontier.pop()\n node = StateTriples[-1][0]\n if problem.isGoalState(node):\n solution = []\n for i in StateTriples[1:]:\n solution = solution + [i[1]]\n return solution\n if node not in explored:\n explored.add(node)\n for i in problem.getSuccessors(node):\n Frontier.push(StateTriples+[list(i)])\n print(Frontier.isEmpty())\n util.raiseNotDefined()", "def _create_node_iterator(self) -> Iterator[GraphNode]:\n return\n yield", "def bft(self, starting_vertex):\n \"\"\" FIFO is LILO\n Create a queue\n Enqueue starting Vertex\n Create a set top store visited\n \n While the queue is NOT empty: e.g. > 0\n Dequeue the first Vertex\n Check IF NOT visited:\n Mark as visited\n enqueue ALL neighbors found if not already in queue\n \"\"\"\n # FIFO \n q = Queue() # create a queue ( e.g. empty [] )\n q.enqueue(starting_vertex) # Enqeue starting at vertex\n visited = set() # Create a set to store visited\n\n while q.size() > 0: # While the queue is NOT empty:\n # while q: # ERROR: Will add None into v, breaks _get_neighbors\n v = q.dequeue() # dequeue the first vertex\n\n if v not in visited: # Check IF NOT visited: \n print(v)\n visited.add(v) # if NOT visited, add to visited set\n\n for n in self.get_neighbors(v): # loop through all neighbors of v \n # if n not in q.queue: # !!! OPTIMIZATION !!!\n # q.enqueue(n) # enqueue ALL neighbors found (ex. add to end of queue)\n\n q.enqueue(n) # enqueue ALL neighbors found (ex. add to end of queue)" ]
[ "0.71898806", "0.7085778", "0.70385784", "0.6970572", "0.6837096", "0.67863566", "0.6713473", "0.66816336", "0.6657518", "0.6647682", "0.66333216", "0.6629434", "0.6573334", "0.6550337", "0.6511887", "0.65066606", "0.64886534", "0.6471162", "0.64685374", "0.64553446", "0.64019185", "0.6400742", "0.6380698", "0.6369547", "0.6360887", "0.6345833", "0.629878", "0.62940055", "0.62487954", "0.62337357" ]
0.74444324
0
Edges frontiers generator using breadthfirst search.
def bfs_edges_generator(graph, source, reverse=...): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bfs_nodes_generator(graph, source, reverse=...):\n ...", "def breadthfirst(self):\n import os\n cwd = os.getcwd()\n os.chdir('/Users/raj/Documents/algorithms_in_python/linked_lists/')\n from linked_collections import LinkedQueue\n os.chdir(cwd) # change to cwd\n if not self.is_empty():\n lq = LinkedQueue()\n lq.enqueue(self.root())\n while not lq.is_empty():\n p = lq.dequeue()\n yield p\n for c in self.children(p):\n lq.enqueue(c)", "def breadthfirst(self):\n if not self.is_empty():\n fringe = LinkedQueue() # known positions not yet yielded\n fringe.enqueue(self.root()) # starting with the root\n while not fringe.is_empty():\n p = fringe.dequeue() # remove from front of the queue\n yield p # report this position\n for c in self.children(p):\n fringe.enqueue(c) # add children to back of queue", "def bft(self, starting_vertex):\n \"\"\"\n Loop over every vertex in the queue. Print each vertex\n as we come to it. Find all the edges of the current vertex\n and add them to the queue and the cache.\n \"\"\" \n queue = [starting_vertex]\n isQueued = {starting_vertex}\n for vertex in queue:\n print(vertex)\n for edge in self.get_neighbors(vertex):\n if edge not in queue:\n queue.append(edge)\n isQueued.add(edge)", "def bfs(graph, start_vertex):\n\n queue = deque()\n queue.appendleft(start_vertex)\n explored_vertices = [start_vertex]\n\n while len(queue) != 0:\n vertex = queue.pop()\n neighbours = graph.neighbours(vertex)\n for neighbour in neighbours:\n if neighbour not in explored_vertices:\n explored_vertices.append(neighbour)\n queue.appendleft(neighbour)\n\n return explored_vertices", "def dfs_edges_generator(graph, source, reverse=...):\n ...", "def bft(self, starting_vertex):\n # create an empty queue and enqueue the starting vertex ID\n queue = Queue()\n queue.enqueue(starting_vertex)\n # create an emtpy Set to stoe the visited vertices\n visited = set()\n # while the queue is not empty ...\n while queue.size() > 0:\n # dequeue the first vertex\n vert = queue.dequeue()\n # if that vertex has not been visited..\n if vert not in visited:\n # mark it as visited\n visited.add(vert)\n print(vert)\n # then add all of its neighbors to the back of the queue\n for neighbor in self.vertices[vert]: # self.get_neighbors(vert)\n queue.enqueue(neighbor)", "def bft(self, starting_vertex):\n \"\"\" FIFO is LILO\n Create a queue\n Enqueue starting Vertex\n Create a set top store visited\n \n While the queue is NOT empty: e.g. > 0\n Dequeue the first Vertex\n Check IF NOT visited:\n Mark as visited\n enqueue ALL neighbors found if not already in queue\n \"\"\"\n # FIFO \n q = Queue() # create a queue ( e.g. empty [] )\n q.enqueue(starting_vertex) # Enqeue starting at vertex\n visited = set() # Create a set to store visited\n\n while q.size() > 0: # While the queue is NOT empty:\n # while q: # ERROR: Will add None into v, breaks _get_neighbors\n v = q.dequeue() # dequeue the first vertex\n\n if v not in visited: # Check IF NOT visited: \n print(v)\n visited.add(v) # if NOT visited, add to visited set\n\n for n in self.get_neighbors(v): # loop through all neighbors of v \n # if n not in q.queue: # !!! OPTIMIZATION !!!\n # q.enqueue(n) # enqueue ALL neighbors found (ex. add to end of queue)\n\n q.enqueue(n) # enqueue ALL neighbors found (ex. add to end of queue)", "def breadth_first_traversal(self, start):\n visited = []\n visited.append(start)\n start_visited = visited\n while True:\n temp = []\n for node_ in start_visited:\n for i in self.neighbors(node_):\n if i not in visited:\n visited.append(i)\n temp.append(i)\n start_visited = temp\n if not temp:\n break\n return visited", "def bfs(self):\r\n Q = [self.head()]\r\n visited = []\r\n while Q != []:\r\n cur = Q[0]\r\n visited.append(cur)\r\n Q = Q[1:]\r\n Q.extend([ch for ch in self.get_deps(cur.get_field('index'))])\r\n for x in reversed(visited):\r\n yield x", "def bft(self, starting_vertex):\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n q = Queue() # create an empty Queue \n q.enqueue(starting_vertex) # set enqueue with the starting vertex\n\n while q.size() > 0: # loop if the size is greater than 0\n v = q.dequeue() # dequeue and store \n\n if v not in visited: # if v has not in the set \n visited.add(v) # add v to the set \n print(v) \n # Then add all of its neighbors to the back of the queue\n for neighbor in self.vertices[v]: # loop through neighbors \n q.enqueue(neighbor) # add each neighbor to the end of the que ", "def _bfs_nodes(cls, graph, source, size, **kwargs):\n if size < 1:\n return iter(())\n\n return itertools.chain(\n (source,),\n itertools.islice((v for u, v in nx.bfs_edges(graph, source)), size-1)\n )", "def bfs_iterative(graph,start):\n\tvisited = set()\n\twatched = set()\n\tnodes_queue = [start] # List that helps as queue\n\twatched.add(start)\n\t\n\twhile nodes_queue:\n\t\tcurrent_node = nodes_queue.pop(0)\n\n\t\tprint(\"visiting\",current_node)\n\t\tvisited.add(current_node)\n\t\t\n\t\tfor adjacent_node in graph[current_node]:\n\t\t\tif (adjacent_node not in watched) and (adjacent_node not in visited):\n\t\t\t\tnodes_queue.append(adjacent_node)\n\t\t\t\t#path.add(adjacent_node)", "def _breadthfirst(self,root, action=lambda nodes: print(nodes)):\n nodes = []\n breadth = Queue()\n visited = []\n\n breadth.enqueue(root)\n visited.append(root)\n\n while breadth.front:\n front = breadth.dequeue()\n nodes.append(front.value)\n\n for child in self._adjacency_list.get(front.value):\n if not child.start_vertex in visited:\n visited.append(child.start_vertex)\n breadth.enqueue(child.start_vertex) \n\n return nodes", "def bft(self, starting_vertex):\n # Create a q and enqueue starting vertex\n qq = Queue()\n qq.enqueue([starting_vertex])\n # Create a set of traversed vertices\n visited = set()\n # eldest = []\n depth_counter = {} \n starter = 0 \n # visited = []\n # While queue is not empty:\n while qq.size() > 0:\n # dequeue/pop the first vertex\n path = qq.dequeue()\n # if not visited\n # print(visited)\n starter += 1\n if path[-1] not in visited:\n # DO THE THING!!!!!!!\n # print(path[-1])\n depth_counter[starter] = path[-1]\n # mark as visited\n visited.add(path[-1])\n # visited.append(path[-1])\n # enqueue all neightbors\n \n if not self.get_neighbors(path[-1]):\n \n if starting_vertex == path[-1]:\n return -1\n else:\n # print(\"eldest ancestor:\",path[-1])\n depth_counter[starter] = path[-1]\n # print(depth_counter)\n # eldest.append(path[-1])\n else:\n # starter += 1\n for next_vert in self.get_neighbors(path[-1]): \n new_path = list(path)\n new_path.append(next_vert)\n qq.enqueue(new_path)\n\n\n return depth_counter[starter]", "def edges(self):\n\t\tleftback = self.center + self.left*self.wr - self.forward*self.hr\n\t\tleftfront = self.center + self.left*self.wr + self.forward*self.hr\n\t\trightfront = self.center - self.left*self.wr + self.forward*self.hr\n\t\trightback = self.center - self.left*self.wr - self.forward*self.hr\n\t\tyield (leftback, leftfront)\n\t\tyield (leftfront, rightfront)\n\t\tyield (rightfront, rightback)\n\t\tyield (rightback, leftback)", "def breadth_first(self):\n nodes_to_vist = []\n curr = self._root\n nodes_to_vist.append(curr)\n while len(nodes_to_vist):\n curr = nodes_to_vist[0]\n if curr._lkid:\n nodes_to_vist.append(curr._lkid)\n if curr._rkid:\n nodes_to_vist.append(curr._rkid)\n yield curr._data\n nodes_to_vist.remove(curr)", "def breadth_first(graph,start, end):\n queue = []\n queue.append([start])\n while queue:\n path = queue.pop(0)\n node = path[-1]\n if node == end:\n return path\n for adjacent in graph.get(node, []):\n new_path = list(path)\n new_path.append(adjacent)\n queue.append(new_path)", "def bfs(maze):\n # TODO: Write your code here\n frontier = Queue()\n visited = []\n path = []\n ret = []\n objectives = maze.getObjectives()\n start = State(maze.getStart()[0], maze.getStart()[1], objectives.copy())\n frontier.put(start)\n explored = []\n \n\n while not frontier.empty(): # while frontier queue is not empty\n\n currentState = frontier.get()\n currentCell = currentState.cell()\n objectivesLeft = currentState.objectives()\n\n if objectivesLeft.count(currentCell) != 0:\n\n objectivesLeft.remove(currentCell)\n \n # all objectives found, initialise backtrace and exit loop\n # if len(objectivesLeft) == 0:\n path.append(currentState)\n ret.append(currentCell)\n visited.append(currentState)\n break\n\n # current cell is not objective nor visited\n if visited.count(currentState) == 0:\n explored.append(currentCell)\n neighbors = maze.getNeighbors(currentCell[0], currentCell[1])\n\n for i in neighbors:\n\n neighbor = State(i[0], i[1], objectivesLeft)\n\n # if neighbor is not visited, add it to the frontier\n if visited.count(neighbor) == 0:\n neighbor.setParent(currentState)\n frontier.put(neighbor)\n\n visited.append(currentState)\n\n #backtrace\n while path[0] != start:\n\n currentState = path[0]\n path.insert(0, currentState.parent())\n ret.insert(0, currentState.parent().cell())\n\n return ret", "def BFS(self, start_vertex):\n yield from self._search(start_vertex, kind='BFS')", "def BreadthFirstSearch(graph, source):\r\n \r\n # Dictionary dataInfo will be used to store the information about each vertex. (Ancestors, descendants, distance from source, and color)\r\n dataInfo = {} \r\n \r\n # List queue will be used to store the vertices currently in the queue, these vertices will all be gray.\r\n queue = []\r\n \r\n # Loops through the vertices in the graph, creates a key in the dictionary for each vertice, with default values.\r\n for vertex in graph[\"V\"]:\r\n dataInfo[str(vertex)] = {\"ancestor\": \"\", \"descendants\": [], \"distance\": \"\", \"color\": \"white\"}\r\n \r\n # At key source (variable) in dataInfo dictionary, key ancestor is set to have no value other than \"NA\" (as it is the starting point), and distance to 0 (as it will always be zero as it is the source).\r\n dataInfo[str(source)][\"ancestor\"] = \"NA\"\r\n dataInfo[str(source)][\"distance\"] = 0\r\n\r\n def symmetricVertex(edge, otherVertex):\r\n \r\n \"\"\"\r\n Function symmetricVertex takes arguments edge, a list of an edge from the graph dictionary, and otherVertex, an integer that is the other vertex in the edge with the sourceVertex. The function will return the point other than the otherVertex, and will be used to find adjacent vertices relative to the current vertex in the queue. Example: edge ([1, 2]), otherVertex (1), the function will return 2.\r\n \"\"\"\r\n \r\n for num in edge:\r\n if num != otherVertex:\r\n return num\r\n \r\n\r\n def pathFinder(graph, sourceVertex):\r\n \r\n \"\"\"\r\n Function pathFinder takes arguments graph, a dictionary, with the same keys for the edges and the vertices and sourceVertex, an integer. The function will loop through all of the edges in the graph and find adjacent vertices relative to the current sourceVertex. sourceVertex values will be in the queue. The function will edit dictionaries and lists, not return any value.\r\n \"\"\"\r\n \r\n # List removeEdges will be used to store the edges that will be removed from the graph dictionary after the loop ends. Makes the code more efficient, as you don't want to loop through a million vertices every time, now do you?\r\n removeEdges = []\r\n \r\n # Loop through edges in the graph, will be used to find adjacent vertices.\r\n for edge in graph[\"E\"]:\r\n \r\n # If the sourceVertex is in the edge and the edge is not discovered yet, then edit and change values in the main dictionary, dataInfo.\r\n if (sourceVertex in edge) and (dataInfo[str(symmetricVertex(edge, sourceVertex))] != \"gray\"):\r\n otherVertex = symmetricVertex(edge, sourceVertex)\r\n \r\n # Adds variable otherVertex to the descendants of the sourceVertex.\r\n dataInfo[str(sourceVertex)][\"descendants\"].append(otherVertex)\r\n \r\n # Updates key(otherVertex) to correct values. Ancestor is always the sourceVertex, the distance is always the distance of sourceVertex incremented by one, and the color is updated to gray as it is added to the queue.\r\n dataInfo[str(otherVertex)] = {\"ancestor\": sourceVertex, \"descendants\": [], \"distance\": (dataInfo[str(sourceVertex)][\"distance\"] + 1), \"color\": \"gray\"}\r\n \r\n # Edge includes two discovered edges, so it will be removed to stop redundancy. It is added to the removeEdges list.\r\n removeEdges.append(edge)\r\n \r\n # Appends the discovered vertex to the queue.\r\n queue.append(otherVertex)\r\n \r\n # After the loop ends, the edges that contain the source vertex have been exhausted, so the color is updated to black.\r\n dataInfo[str(sourceVertex)][\"color\"] = \"black\" \r\n \r\n # If the sourceVertex is in the queue, it is removed, as all of the edges containing it have been exhausted.\r\n if sourceVertex in queue:\r\n queue.remove(sourceVertex)\r\n \r\n # Loop through the edges in the removeEdges list, each edge will be removed.\r\n for edge in removeEdges:\r\n graph[\"E\"].remove(edge)\r\n \r\n # The function pathFinder is called on the graph and the source vertex, which sets up the queue.\r\n pathFinder(graph, source)\r\n \r\n # While the list queue contains values, the pathFinder function is called on the graph, and the queue value at index 0.\r\n while len(queue) != 0:\r\n pathFinder(graph, queue[0])\r\n \r\n # Loop below is for formatting of the data, makes it easier to read.\r\n for key in dataInfo:\r\n print \"Vertex: \" + key + \", Distance: \" + str(dataInfo[key][\"distance\"]) + \", Ancestor: \" + str(dataInfo[key][\"ancestor\"]) + \", Descendants: \" + str(dataInfo[key][\"descendants\"]) + \", Color: \" + str(dataInfo[key][\"color\"]) + \".\" \r\n \r\n # Returns dictionary dataInfo.\r\n return dataInfo", "def breadth_first_search(self):\r\n queue = [self.root]\r\n while queue:\r\n node = queue.pop()\r\n yield node\r\n queue.extend(node.children)", "def breadth_first_iterate(execution_graph, starting_node, direction,\n through_flows=True, through_retries=True,\n through_tasks=True):\n initial_nodes_iter, connected_to_functors = _extract_connectors(\n execution_graph, starting_node, direction,\n through_flows=through_flows, through_retries=through_retries,\n through_tasks=through_tasks)\n q = collections.deque(initial_nodes_iter)\n while q:\n node = q.popleft()\n node_attrs = execution_graph.nodes[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n q.extend(connected_to_functor(node))", "def breadth_first(self):\n q = Queue()\n q.enqueue(self)\n while q.size() > 0:\n node = q.dequeue()\n yield node.val\n if node.left:\n q.enqueue(node.left)\n if node.right:\n q.enqueue(node.right)", "def bft(self, starting_vertex):\n # create an empty queueueueueueueueueue class\n to_visit = Queue()\n # create an empty set\n visited = set()\n # populating the queueueueueueue with our starting vertex\n to_visit.enqueue(starting_vertex)\n\n # while loop to run while the queueueueueue is not empty\n while to_visit.size() > 0:\n v = to_visit.dequeue()\n # checking to see if the dequeueueued vertex is in our set or not\n if v not in visited:\n # if it is then it gets printed out\n print(v)\n # it then gets added to the visited set\n visited.add(v)\n # now we are checking the neighbors of the vertex and adding them\n # to the queueueueueueue\n for n in self.vertices[v]:\n to_visit.enqueue(n)", "def breadth_first(self):\n import queue\n keeper = queue.Queue()\n keeper.enqueue(self)\n while(keeper.size() != 0):\n temp = keeper.dequeue()\n if temp.val is not None:\n yield temp.val\n if temp.left is not None:\n keeper.enqueue(temp.left)\n if temp.right is not None:\n keeper.enqueue(temp.right)", "def bfs(maze):\n # TODO: Write your code here.\n start = maze.getStart() \n frontier = [] \n path = [] \n dim = maze.getDimensions()\n objs = maze.getObjectives()\n rows = dim[0]\n cols = dim[1]\n visited = {} # visited as a dictionary\n for i in range (0, rows):\n for j in range (0, cols):\n visited[(i,j)] = (-1, -1)\n frontier.append(start)\n visited[(i,j)] = (-2, -2)\n while frontier:\n pt = frontier.pop(0)\n if maze.isObjective(pt[0], pt[1]) == True:\n break\n else:\n list_of_neighbors = maze.getNeighbors(pt[0], pt[1])\n for i in list_of_neighbors:\n if visited.get(i) == (-1, -1): \n frontier.append(i)\n visited[i] = pt \n while pt != start:\n path.append(pt)\n pt = visited.get(pt)\n path.append(start)\n path.reverse()\n return path", "def bfs(graph,start):\n #keeps track of nodes to be visited\n queue = []\n #keeps track of nodes already visited\n explored = []\n queue.append(start)\n while queue:\n #remove first node from queue\n curr_node = queue.pop(0)\n #check if node is visited\n if curr_node not in explored:\n explored.append(curr_node)\n adjacent_nodes = graph[curr_node]\n #add adjacent nodes to queue\n for i in adjacent_nodes:\n queue.append(i)\n return explored", "def breadthFirstSearch(problem):\n #import pdb;pdb.set_trace()\n frontier = util.Queue()\n start_node = problem.getStartState()\n if problem.isGoalState(start_node):\n return ['Stop']\n frontier.push((start_node,()))\n explored = set()\n while True:\n if frontier.isEmpty():\n return []\n node = frontier.pop()\n\n explored.add(node[0])\n # exploratory code for SUPER-optimal solution:\n # by saving the path in explored, we assure that we explore the same cell even if\n # two different actions go through it:\n #explored.add(node)\n for successor in problem.getSuccessors(node[0]):\n nextState, action, cost = successor\n if nextState in explored or nextState in [f[0] for f in frontier.list]:\n continue\n actions = node[1]\n next_actions = actions + (action,)\n new_node = (nextState, next_actions)\n if problem.isGoalState(new_node[0]):\n return new_node[1]\n frontier.push(new_node)\n #print frontier.list\n return []", "def bfs(self, starting_vertex, destination_vertex):\n pass # TODO" ]
[ "0.7032013", "0.65871656", "0.6558607", "0.6494155", "0.6486353", "0.64773285", "0.6464884", "0.6462215", "0.64562446", "0.64025325", "0.63937354", "0.6391087", "0.6381573", "0.6365035", "0.6322902", "0.6297517", "0.6248023", "0.623094", "0.6225877", "0.62127507", "0.6212067", "0.62103426", "0.6187688", "0.6183143", "0.61755496", "0.61587507", "0.6153769", "0.61276865", "0.6111982", "0.60608596" ]
0.7154378
0
Node frontiers generator using topological traversal.
def topological_nodes_generator(graph, reverse=...): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bfs_nodes_generator(graph, source, reverse=...):\n ...", "def _create_node_iterator(self) -> Iterator[GraphNode]:\n return\n yield", "def pre_order(self):\n for node_data in self._pre_order_helper(self._root):\n yield node_data", "def breadth_first_iterate(execution_graph, starting_node, direction,\n through_flows=True, through_retries=True,\n through_tasks=True):\n initial_nodes_iter, connected_to_functors = _extract_connectors(\n execution_graph, starting_node, direction,\n through_flows=through_flows, through_retries=through_retries,\n through_tasks=through_tasks)\n q = collections.deque(initial_nodes_iter)\n while q:\n node = q.popleft()\n node_attrs = execution_graph.nodes[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n q.extend(connected_to_functor(node))", "def depth_first_iterate(execution_graph, starting_node, direction,\n through_flows=True, through_retries=True,\n through_tasks=True):\n initial_nodes_iter, connected_to_functors = _extract_connectors(\n execution_graph, starting_node, direction,\n through_flows=through_flows, through_retries=through_retries,\n through_tasks=through_tasks)\n stack = list(initial_nodes_iter)\n while stack:\n node = stack.pop()\n node_attrs = execution_graph.nodes[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n stack.extend(connected_to_functor(node))", "def topological_sort_generator(self):\n from sage.graphs.linearextensions import LinearExtensions\n try:\n return LinearExtensions(self).list()\n except TypeError:\n raise TypeError('Digraph is not acyclic; there is no topological sort (or there was an error in sage/graphs/linearextensions.py).')", "def dfs_edges_generator(graph, source, reverse=...):\n ...", "def nodes_iter(topology):\n return topology.nodes_iter()", "def nodes_iter(self) -> Generator:\n for n in self.graph.nodes(data=True):\n yield n", "def _depth_first_iterate(graph, connected_to_functors, initial_nodes_iter):\n stack = list(initial_nodes_iter)\n while stack:\n node = stack.pop()\n node_attrs = graph.node[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n stack.extend(connected_to_functor(node))", "def _forest_nodes(self):\n\n self.arbor._grow_tree(self)\n root = self.root\n for link in root._links:\n yield self.arbor._generate_tree_node(self.root, link)", "def __iter__(self):\n # set current node to front node\n current = self.front\n # while current != None\n while current:\n # send out current node's data\n yield current.data\n # move to next node\n current = current.prior", "def predecessors(self, node: Node):\n return iter(self.get_node(node_id) for node_id in node.in_nodes_ids)", "def build_node_chains(self):\n\n self.node_chain_lookup = -np.ones(self.tri.npoints, dtype=np.int)\n self.node_chain_list = []\n\n node_chain_idx = 1\n\n self.node_chain_list.append([]) # placeholder for any isolated base-level nodes\n\n for node1 in self.node_high_to_low: \n if (self.node_chain_lookup[node1] != -1): \n continue\n\n junction, this_chain = self._node_walk_downhill(node1)\n\n if len(this_chain) > 1:\n self.node_chain_list.append(this_chain)\n \n self.node_chain_lookup[this_chain[0:-1]] = node_chain_idx \n if self.node_chain_lookup[this_chain[-1]] == -1:\n self.node_chain_lookup[this_chain[-1]] = node_chain_idx\n\n node_chain_idx += 1\n\n else: \n self.node_chain_list[0].append(this_chain[0])\n self.node_chain_lookup[this_chain[0]] = 0\n\n return", "def preorder_iterator(node):\n yield node\n for child in node.children:\n yield from preorder_iterator(child)", "def gen_graph(self):", "def _prog_nodes(self):\n\n self.arbor._grow_tree(self)\n my_node = self\n while my_node is not None:\n yield my_node\n ancestors = list(my_node.ancestors)\n if ancestors:\n my_node = my_node.arbor.selector(ancestors)\n else:\n my_node = None", "def get_predecessors(self, node): \n preds = []\n child_state = self.node_to_state(node)\n for it in self.predecessors:\n parent_node = (node[0] + it[0], node[1] + it[1])\n parent_state = self.node_to_state(parent_node)\n edge = self.interpolate(child_state, parent_state, self.distance_bw_states(child_state, parent_state)/self.path_resolution)\n preds.append([parent_node, edge])\n return preds", "def node_gen(self):\n for n in self.child_list:\n yield from n.node_gen\n yield self", "def gen_nodes(self):\n self.nodes = []\n for i in range(self.num_nodes):\n self.nodes.append(Node(self.fk))", "def iteredges(self):\n for source, targets in self.successors.items():\n for target in targets:\n yield source, target", "def predecessors(self):\n predecessors = []\n for inst in self.inst.uses():\n if inst.op_name != 'OpPhi':\n predecessors.append(inst.basic_block)\n return predecessors", "def _create_rel_iterator(self) -> Iterator[GraphRelationship]:\n for downstream_key in self.downstream_deps:\n relationship = GraphRelationship(\n start_key=self.table_key,\n start_label=TableMetadata.TABLE_NODE_LABEL,\n end_label=TableMetadata.TABLE_NODE_LABEL,\n end_key=downstream_key,\n type=TableLineage.ORIGIN_DEPENDENCY_RELATION_TYPE,\n reverse_type=TableLineage.DEPENDENCY_ORIGIN_RELATION_TYPE,\n attributes={}\n )\n yield relationship", "def generate(self,state0):\n knodes = []\n state = state0.copy()\n for node in self.nodes:\n zrun = node.zrun\n ok,state,F,Q = self.model.propagate(state,zrun)\n if (not ok): \n warning(\"kfilter.generate end due to propagation at \",zrun)\n debug('kfilter.generate nodes ',len(knodes))\n return knodes\n knode = node.generate(state)\n knodes.append(knode)\n state = knode.getstate('true').copy()\n debug('kfilter.generate nodes ',len(knodes))\n return knodes", "def each_step(graph):\n\n steps = graph.topological_sort()\n steps.reverse()\n\n for step in steps:\n deps = graph.downstream(step.name)\n yield (step, deps)", "def __generator(self, inp):\n nodes_input = 1\n for i in range(len(self.arch_G)):\n nodes_output = self.arch_G[i]\n inp = fc_layer(inp, nodes_input, nodes_output, 'G_' + str(i + 1) + '_')\n nodes_input = self.arch_G[i]\n\n return fc_layer(inp, self.arch_G[-1], 1,\n 'G_end_',\n final_layer=True)", "def _anchored_predecessors(self, n):\n\n # loop on all incoming edges\n for t in self.predecessors(n):\n \n # if predecessor is anchored\n # stop looking for (necessarily earlier) predecessors\n if t.anchored:\n yield t\n continue\n \n # if neighbor is not anchored\n # look one level deeper\n for tt in self._anchored_predecessors(t):\n yield tt", "def forward_graph(self):\n raise NotImplementedError", "def bfs_edges_generator(graph, source, reverse=...):\n ...", "def pipeline_dependencies_tasks(g):\n deps = dict()\n for step_name in nx.topological_sort(g):\n deps[step_name] = list(g.predecessors(step_name)) # copy list\n return deps" ]
[ "0.65818083", "0.631488", "0.60523885", "0.60450137", "0.60366327", "0.5984799", "0.59832644", "0.5886222", "0.5862201", "0.5833745", "0.57818055", "0.5777673", "0.57532954", "0.5735733", "0.5725604", "0.5718208", "0.5704789", "0.5663648", "0.5657863", "0.56538856", "0.5643337", "0.56393594", "0.56135976", "0.5611871", "0.5610509", "0.5605352", "0.5598771", "0.55880463", "0.5564255", "0.556156" ]
0.75308436
0
Edge frontiers generator using depthfirstsearch (DFS). Multiple source nodes can be specified to start the DFS traversal. One needs to make sure that each source node belongs to different connected component, so the frontiers can be easily merged. Otherwise, the behavior is undefined.
def dfs_edges_generator(graph, source, reverse=...): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bfs_nodes_generator(graph, source, reverse=...):\n ...", "def main():\n n = int(input(\"Enter the number of nodes: \"))\n m = int(input(\"Enter the number of edges: \"))\n \n adjList = [[] for i in range(n)]\n \n print(\"Enter the edges: \")\n for i in range(m):\n x, y = input().split(\" \")\n x = int(x)\n y = int(y)\n adjList[x].append(y)\n adjList[y].append(x)\n \n s = int(input(\"Enter the source: \"))\n \n DFS(adjList, s, n)", "def iterative_dfs(starting_vertex, graph):\n starting_vertex.discovered = True\n starting_vertex.discovery_edge = Graph.Edge(starting_vertex, None, None) # Dummy edge\n walk = starting_vertex\n\n while walk is not None:\n has_to_go_back = True\n for edge in graph.incident_edges(walk):\n opposite = edge.opposite(walk)\n if not opposite.discovered:\n opposite.discovered = True\n opposite.discovery_edge = edge\n walk = opposite\n has_to_go_back = False\n break\n\n if has_to_go_back:\n walk = walk.discovery_edge.opposite(walk)\n\n starting_vertex.discovery_edge = None # Remove dummy edge", "def dft_recursive(self, starting_vertex, visited=None):\n if visited is None: # if visited is None\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n if starting_vertex not in visited: # if starting_vertex has not been visited yet\n print(starting_vertex)\n visited.add(starting_vertex) # add to the set \n\n for neighbor in self.vertices[starting_vertex]: # loop through each neighbor \n self.dft_recursive(neighbor, visited) # call the dft_recursive method on each neighbor ", "def DFS(self, start_vertex):\n yield from self._search(start_vertex, kind='DFS')", "def bfs_edges_generator(graph, source, reverse=...):\n ...", "def DFS(self, start_vertex, verbose=True):\n if start_vertex is None:\n return None\n traversal = []\n visited = set()\n for vertex in self.vertices():\n if vertex not in visited:\n self._DFS(vertex, visited, traversal.append)\n if verbose:\n print('DFS(Graph) =', traversal)\n return traversal", "def dfs_recursive(self, starting_vertex, destination_vertex):\n pass # TODO", "def dfs_recursive(self, starting_vertex, destination_vertex):\n pass # TODO", "def dfs(self, starting_vertex, destination_vertex):\n # create an empty stack \n stack = Stack()\n #push the starting vertex ID as list\n stack.push([starting_vertex])\n # create an empty Set to store the visited vertices\n visited = set()\n # while the stack is not empty ...\n while stack.size() > 0:\n # pop the first vertex\n path = stack.pop()\n vert = path[-1]\n # if that vertex has not been visited ..\n if vert not in visited:\n #check for target\n if vert == destination_vertex:\n return path\n # mark it is visited\n visited.add(vert)\n # then add all of its neighbors to the top of the stack\n for neighbor in self.vertices[vert]: #self.get_neighbors(vert)\n #copy path to avoid pass by reference\n new_path = list(path) # make a copy\n new_path.append(neighbor)\n stack.push(new_path)", "def dfs(self, starting_vertex, destination_vertex):\n pass # TODO", "def dfs(self, starting_vertex, destination_vertex):\n \"\"\" LIFO\n Create a stack\n Create a set to store visited\n PUSH starting vertex into an array (STACK)\n While the STACK is NOT empty \n get((pop) first PATH vertex\n get Vertex from END of PATH\n check if NOT visited\n mark as visited\n check if vertex is destination_vertex\n If TRUE, return path \n PUSH path to ALL of neighbors\n make copy of current path\n add neighbor to path copy\n PUSH path copy\n \"\"\" \n s = Stack() # Create a stack\n s.push([starting_vertex]) # PUSH starting vertex into an array (STACK)\n visited = set() # Create a set to store visited\n\n while s.size() > 0: # While the STACK is NOT empty\n path = s.pop() # get(pop) first PATH vertex)\n v = path[-1] # get Vertex from END of PATH \n\n while v not in visited: # check if NOT visited\n visited.add(v) # mark as visited\n\n if v == destination_vertex: # check if vertex is destination_vertex\n return path # If TRUE, return path \n\n for n in self.get_neighbors(v): # PUSH path to ALL of neighbors\n path_c = path[:] # make copy of current path\n # path_c.extend([n]) # add neighbor to path copy\n path_c.append(n) # add neighbor to path copy\n s.push(path_c) # PUSH path copy", "def _bfs_nodes(cls, graph, source, size, **kwargs):\n if size < 1:\n return iter(())\n\n return itertools.chain(\n (source,),\n itertools.islice((v for u, v in nx.bfs_edges(graph, source)), size-1)\n )", "def dfs(self, starting_vertex, destination_vertex):\n # TODO", "def dft(self, starting_vertex):\n \"\"\" LIFO\n Create a stack \n Push starting Vertex\n Create a set to store visited\n While the stack is NOT empty: e.g. > 0\n Pop the last added Vertex\n Check IF NOT visited:\n Mark as visited\n\n\n Push ALL of neighbors\n \"\"\"\n s = Stack() # Create a stack\n s.push(starting_vertex) # Push starting Vertex\n visited = set() # Create a set to store visited\n\n while s.size() > 0: # While the stack is NOT empty: e.g. > 0\n v = s.pop() # Pop the last added Vertex\n\n if v not in visited: # Check IF NOT visited: e.g. > 0\n print(v)\n visited.add(v) # Mark as visited\n\n for n in self.get_neighbors(v): # Check IF NOT visited:\n s.push(n) # Push ALL of neighbors ", "def dft_recursive(self, starting_vertex, visited = None):\n \"\"\"\n Check if Vertex is in visited\n if NOT visited, add to visited set\n Call dft_recursive on every neighbor \n \n\n \"\"\"\n # 1) base case >> where to stop recursion\n # 2) calls itself from within\n # 3) each iteration approaches base case\n\n # 1) base case >> where to stop recursion\n\n # init a set that persists after recursions loops to save visited\n if visited == None:\n visited = set()\n\n if starting_vertex not in visited: # 1) & 3) Check if vertex has NOT been visited\n visited.add(starting_vertex) # if True, add to visited set\n\n print(starting_vertex)\n\n # perform recursion on neighbor\n for n in self.get_neighbors(starting_vertex):\n self.dft_recursive(n, visited) # 2) ", "def dft_recursive(self, starting_vertex, visited=None):\n \n # for vertex in self.get_neighbors(starting_vertex):\n # if vertex not in visited:\n # visited.add(vertex)\n # self.dft_recursive(vertex, visited)\n # return visited\n if visited == None:\n visited = set()\n print(starting_vertex)\n visited.add(starting_vertex)\n for v in self.get_neighbors(starting_vertex):\n if v not in visited:\n self.dft_recursive(v, visited)", "def dft_recursive(self, starting_vertex, visited=None):\n if visited is None:\n visited = set()\n visited.add(starting_vertex)\n print(starting_vertex)\n for neighb_vert in self.vertices[starting_vertex]:\n if neighb_vert not in visited:\n self.dft_recursive(neighb_vert, visited)", "def BreadthFirstSearch(graph, source):\r\n \r\n # Dictionary dataInfo will be used to store the information about each vertex. (Ancestors, descendants, distance from source, and color)\r\n dataInfo = {} \r\n \r\n # List queue will be used to store the vertices currently in the queue, these vertices will all be gray.\r\n queue = []\r\n \r\n # Loops through the vertices in the graph, creates a key in the dictionary for each vertice, with default values.\r\n for vertex in graph[\"V\"]:\r\n dataInfo[str(vertex)] = {\"ancestor\": \"\", \"descendants\": [], \"distance\": \"\", \"color\": \"white\"}\r\n \r\n # At key source (variable) in dataInfo dictionary, key ancestor is set to have no value other than \"NA\" (as it is the starting point), and distance to 0 (as it will always be zero as it is the source).\r\n dataInfo[str(source)][\"ancestor\"] = \"NA\"\r\n dataInfo[str(source)][\"distance\"] = 0\r\n\r\n def symmetricVertex(edge, otherVertex):\r\n \r\n \"\"\"\r\n Function symmetricVertex takes arguments edge, a list of an edge from the graph dictionary, and otherVertex, an integer that is the other vertex in the edge with the sourceVertex. The function will return the point other than the otherVertex, and will be used to find adjacent vertices relative to the current vertex in the queue. Example: edge ([1, 2]), otherVertex (1), the function will return 2.\r\n \"\"\"\r\n \r\n for num in edge:\r\n if num != otherVertex:\r\n return num\r\n \r\n\r\n def pathFinder(graph, sourceVertex):\r\n \r\n \"\"\"\r\n Function pathFinder takes arguments graph, a dictionary, with the same keys for the edges and the vertices and sourceVertex, an integer. The function will loop through all of the edges in the graph and find adjacent vertices relative to the current sourceVertex. sourceVertex values will be in the queue. The function will edit dictionaries and lists, not return any value.\r\n \"\"\"\r\n \r\n # List removeEdges will be used to store the edges that will be removed from the graph dictionary after the loop ends. Makes the code more efficient, as you don't want to loop through a million vertices every time, now do you?\r\n removeEdges = []\r\n \r\n # Loop through edges in the graph, will be used to find adjacent vertices.\r\n for edge in graph[\"E\"]:\r\n \r\n # If the sourceVertex is in the edge and the edge is not discovered yet, then edit and change values in the main dictionary, dataInfo.\r\n if (sourceVertex in edge) and (dataInfo[str(symmetricVertex(edge, sourceVertex))] != \"gray\"):\r\n otherVertex = symmetricVertex(edge, sourceVertex)\r\n \r\n # Adds variable otherVertex to the descendants of the sourceVertex.\r\n dataInfo[str(sourceVertex)][\"descendants\"].append(otherVertex)\r\n \r\n # Updates key(otherVertex) to correct values. Ancestor is always the sourceVertex, the distance is always the distance of sourceVertex incremented by one, and the color is updated to gray as it is added to the queue.\r\n dataInfo[str(otherVertex)] = {\"ancestor\": sourceVertex, \"descendants\": [], \"distance\": (dataInfo[str(sourceVertex)][\"distance\"] + 1), \"color\": \"gray\"}\r\n \r\n # Edge includes two discovered edges, so it will be removed to stop redundancy. It is added to the removeEdges list.\r\n removeEdges.append(edge)\r\n \r\n # Appends the discovered vertex to the queue.\r\n queue.append(otherVertex)\r\n \r\n # After the loop ends, the edges that contain the source vertex have been exhausted, so the color is updated to black.\r\n dataInfo[str(sourceVertex)][\"color\"] = \"black\" \r\n \r\n # If the sourceVertex is in the queue, it is removed, as all of the edges containing it have been exhausted.\r\n if sourceVertex in queue:\r\n queue.remove(sourceVertex)\r\n \r\n # Loop through the edges in the removeEdges list, each edge will be removed.\r\n for edge in removeEdges:\r\n graph[\"E\"].remove(edge)\r\n \r\n # The function pathFinder is called on the graph and the source vertex, which sets up the queue.\r\n pathFinder(graph, source)\r\n \r\n # While the list queue contains values, the pathFinder function is called on the graph, and the queue value at index 0.\r\n while len(queue) != 0:\r\n pathFinder(graph, queue[0])\r\n \r\n # Loop below is for formatting of the data, makes it easier to read.\r\n for key in dataInfo:\r\n print \"Vertex: \" + key + \", Distance: \" + str(dataInfo[key][\"distance\"]) + \", Ancestor: \" + str(dataInfo[key][\"ancestor\"]) + \", Descendants: \" + str(dataInfo[key][\"descendants\"]) + \", Color: \" + str(dataInfo[key][\"color\"]) + \".\" \r\n \r\n # Returns dictionary dataInfo.\r\n return dataInfo", "def dft(self, starting_vertex):\n # Create a s and push starting vertex\n ss = Stack()\n ss.push([starting_vertex])\n # Create a set of traversed vertices\n visited = []\n eldest = [] \n # While stack is not empty:\n while ss.size() > 0:\n # dequeue/pop the first vertex\n path = ss.pop()\n if path[-1] not in visited:\n # DO THE THING!!!!!!!\n # print(path[-1])\n # mark as visited\n visited.append(path[-1])\n print(visited)\n # enqueue all neightbors\n if not self.get_neighbors(path[-1]):\n if starting_vertex == path[-1]:\n return -1\n else:\n # print(\"eldest ancestor:\",path[-1])\n eldest.append(path[-1])\n\n for next_vert in self.get_neighbors(path[-1]):\n new_path = list(path)\n # print(new_path)\n new_path.append(next_vert)\n ss.push(new_path)\n \n return min(eldest)", "def dft_recursive(self, starting_vertex, visited=None):\n # First, we set our initial condition\n if visited is None:\n # If no nodes have been visited, we create a set to store the nodes we visit\n visited = set()\n\n # Then we add the starting vertex to the visited set\n visited.add(starting_vertex)\n print(starting_vertex)\n\n # Call the function recursively on neighbors not visited\n # Lastly we write a for loop that will recursively call dft_recursive()\n for neighbor in self.vertices[starting_vertex]:\n # For each vertex, we check to see if any of the neighbors have already been visited\n if neighbor not in visited:\n # And if we find a neighbor that has not been visited, we recursively call dft_recursive() and pass it the neighbor and updated visited set\n self.dft_recursive(neighbor, visited)", "def _dfs_cycle_forest(G, root=None):\n # Create a directed graph from the depth-first search tree with\n # root node `root` in which tree edges are directed toward the\n # root and nontree edges are directed away from the root. For\n # each node with an incident nontree edge, this creates a\n # directed cycle starting with the nontree edge and returning to\n # that node.\n #\n # The `parent` node attribute stores the parent of each node in\n # the DFS tree. The `nontree` edge attribute indicates whether\n # the edge is a tree edge or a nontree edge.\n #\n # We also store the order of the nodes found in the depth-first\n # search in the `nodes` list.\n H = nx.DiGraph()\n nodes = []\n for u, v, d in nx.dfs_labeled_edges(G, source=root):\n if d == 'forward':\n # `dfs_labeled_edges()` yields (root, root, 'forward')\n # if it is beginning the search on a new connected\n # component.\n if u == v:\n H.add_node(v, parent=None)\n nodes.append(v)\n else:\n H.add_node(v, parent=u)\n H.add_edge(v, u, nontree=False)\n nodes.append(v)\n # `dfs_labeled_edges` considers nontree edges in both\n # orientations, so we need to not add the edge if it its\n # other orientation has been added.\n elif d == 'nontree' and v not in H[u]:\n H.add_edge(v, u, nontree=True)\n else:\n # Do nothing on 'reverse' edges; we only care about\n # forward and nontree edges.\n pass\n return H, nodes", "def depth_first_iterate(execution_graph, starting_node, direction,\n through_flows=True, through_retries=True,\n through_tasks=True):\n initial_nodes_iter, connected_to_functors = _extract_connectors(\n execution_graph, starting_node, direction,\n through_flows=through_flows, through_retries=through_retries,\n through_tasks=through_tasks)\n stack = list(initial_nodes_iter)\n while stack:\n node = stack.pop()\n node_attrs = execution_graph.nodes[node]\n if not node_attrs.get('noop'):\n yield node\n try:\n node_kind = node_attrs['kind']\n connected_to_functor = connected_to_functors[node_kind]\n except KeyError:\n pass\n else:\n stack.extend(connected_to_functor(node))", "def dfs_recursive(self, starting_vertex, destination_vertex, visited=None, path=None):\n if visited is None: # if visited is not empty\n visited = set() # create an empty 'set' to store visisted vertex, set sorts \n\n if path is None: # if the path is empty \n path = [] # create an empty list \n visited.add(starting_vertex) # add the starting_vertex to the set \n path = path + [starting_vertex] # set the path \n\n if starting_vertex == destination_vertex: # if the starting_vertex is equal to the destination_vertex\n return path # return the path \n\n for neighbor in self.vertices[starting_vertex]: # loop through neighbors \n if neighbor not in visited: # if the neighbor has not been visited \n new_path = self.dfs_recursive(neighbor, destination_vertex, visited, path) # create a new path using the dfs_recursive method\n\n if new_path: # if there is a new_path \n return new_path # return the new path \n\n return None # return None ", "def dfs(visited: list, graph: AdjList, node: int):\n if node not in visited:\n visited.append(node)\n for neighbour in graph[node]:\n dfs(visited, graph, neighbour)", "def dfs(starting_vertex):\n s = Stack()\n\n s.push([starting_vertex])\n\n while s.size() > 0:\n p = s.pop()\n l = p[-1]\n\n if l not in new_visited_rooms:\n return p\n neighbors = set(get_neighbors(l))\n \n for n in neighbors:\n new_path = p.copy()\n new_path.append(n)\n s.push(new_path)", "def dfs(self, starting_vertex, destination_vertex):\n # This solution takes a slightly different approach as to how we are storing the path\n # Now, we always queue up the next vertex we want to see, and a list of all the vertices we looked at to get here\n # so if we are queueing up vertex 3 from our example, the tuple we create will be (3, [1,2])\n # because we had to go through 1 and 2 to get here\n neighbors_to_visit = Stack()\n visited = set()\n # add the first vertex, and an empty list indicating that we have not been to any other vertices yet\n neighbors_to_visit.push([starting_vertex])\n # loop through the stack\n while neighbors_to_visit.size() > 0:\n path = neighbors_to_visit.pop()\n # pull out the current vertex so its easier to read\n vertex = path[-1] # last one in the path is our current vertex\n # if the vertex is the destination return it plus the path we took to get here\n if vertex == destination_vertex:\n return path\n # make sure the vertex isnt something we have seen already\n if vertex not in visited:\n # mark the vertex as visited\n visited.add(vertex)\n # add neighbors to the stack\n for neighbor in self.get_neighbors(vertex):\n new_path = path\n new_path.append(neighbor)\n neighbors_to_visit.push(new_path)", "def dfs(get_neighbors, source, target):\n\n parents = {}\n visited = set()\n stack = collections.deque()\n stack.append(source)\n while stack:\n vertex = stack.pop()\n if vertex == target:\n return _backtrack(target, lambda v: parents.get(v))\n visited.add(vertex)\n neighbors = [n for n in get_neighbors(vertex) if n not in visited]\n if neighbors:\n stack.append(vertex)\n stack.append(neighbors[0])\n parents[neighbors[0]] = vertex\n return []", "def dft(self, starting_vertex):\n \n visited = []\n stack = Stack()\n\n stack.add(starting_vertex)\n\n while len(stack):\n current = stack.pop()\n\n if current not in visited:\n print(current)\n visited.append(current)\n \n for child in self.vertices[current]:\n if child not in visited:\n stack.add(child)", "def directed_dfs(self,\n node_or_name: Union[str, Node],\n stop_at: Optional[Set[Node]] = None,\n go_up: bool = False,\n yield_start_node=False,\n visited=None):\n node = resolve_node_or_str(node_or_name, G=self)\n if visited is None:\n visited = {node}\n started = False\n if stop_at is None:\n stop_at = {}\n else:\n started = True\n if node in stop_at:\n return\n if started or yield_start_node:\n yield node\n if not go_up:\n for edge in self.out_edges(node.name):\n if edge.to_node in visited:\n continue\n visited.add(edge.to_node)\n yield from self.directed_dfs(edge.to_node, stop_at=stop_at, go_up=go_up, visited=visited)\n if go_up:\n for edge in self.in_edges(node.name):\n if edge.from_node in visited:\n continue\n visited.add(edge.from_node)\n yield from self.directed_dfs(edge.from_node, stop_at=stop_at, go_up=go_up, visited=visited)" ]
[ "0.66623896", "0.65276676", "0.64679134", "0.64623046", "0.6430159", "0.63167006", "0.6277863", "0.625629", "0.625629", "0.6231696", "0.6226393", "0.6226135", "0.6202045", "0.6167126", "0.61345613", "0.61269605", "0.61263084", "0.6116274", "0.61129403", "0.60858756", "0.60563964", "0.6041509", "0.6039684", "0.5970186", "0.5969727", "0.5965366", "0.59308577", "0.592485", "0.5922951", "0.5893463" ]
0.71058327
0
find the feature to use for the next node split and also find where the plit should be in that feature This loops through the split options within a feature to find the best gini score, then it loops through each feature to compare optimal gini scores
def find_split(self, X, y): choices = y.size if choices <= 1: return None, None # find the number of each option in the current node. options_parent = [np.sum(y == c) for c in range(self.num_outcomes)] # find the gini of current node. best_gini = 1.0 - sum((n / choices) ** 2 for n in options_parent) best_idx, best_split = None, None # loop through the features to get splits and options. for idx in range(self.num_features): splits, options = zip(*sorted(zip(X[:, idx], y))) num_left = [0] * self.num_outcomes num_right = options_parent.copy() for i in range(1, choices): c = options[i - 1] num_left[c] += 1 num_right[c] -= 1 gini_left = 1.0 - sum( (num_left[x] / i) ** 2 for x in range(self.num_outcomes) ) gini_right = 1.0 - sum( (num_right[x] / i) ** 2 for x in range(self.num_outcomes) ) gini = (i * gini_left + (choices - i) * gini_right) / choices if splits[i] == splits[i - 1]: continue if gini < best_gini: best_gini = gini best_idx = idx best_split = (splits[i] + splits[i - 1]) / 2 return best_idx, best_split
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_split(data):\n \"\"\" gets the best feature, and best value \"\"\"\n\n best_feature = None\n best_value = 0.0\n columns = data.columns\n gini_base = gini_impurity(data)\n n_rows = len(data.index) # total number of rows of data before split\n\n # Fininding which split yields the best gini gain\n max_gain = 0\n\n for i in range(len(columns)-1): # -1 b.c. class is final column\n xs = data[columns[i]].unique() # get values to test\n for x in xs: # test values\n # split dataset\n df_left = data[data[columns[i]] < x]\n df_right = data[data[columns[i]] >= x]\n\n # get gini impurities\n gini_left = gini_impurity(df_left)\n gini_right = gini_impurity(df_right)\n \n\n # Calculated weighted gini impurity\n w_left = len(df_left.index) / n_rows\n w_right = len(df_right.index) / n_rows\n\n w_gini = gini_left * w_left + gini_right * w_right\n \n\n # Calculate gini gain (we want to minimize w_gini for the smallest impurity. Ideal split is perfect Left=c1, Right=c2)\n # why not just find min w_gin instead of uding gini_gain and gini_base vaiables?\n gini_gain = gini_base - w_gini\n\n # check if this is the best split so far, store values, update max_gini\n if gini_gain > max_gain:\n best_feature = columns[i]\n best_value = x\n max_gain = gini_gain\n\n df_left = data.loc[data[best_feature] < best_value]\n df_right = data.loc[data[best_feature] >= best_value]\n \n\n return best_feature, best_value, df_left, df_right", "def __get_split_feature(self, data_set, target_feature, tree_features):\n\n if self.__criterion == 'entropy':\n feature_gains = {feature: self.__gain(data_set, feature, target_feature) for (feature) in tree_features}\n split_feature = max(feature_gains, key=feature_gains.get)\n return split_feature\n elif self.__criterion == 'gini':\n feature_ginis = {feature: self.__gini(data_set, feature, target_feature) for (feature) in tree_features}\n split_feature = min(feature_ginis, key=feature_ginis.get)\n return split_feature\n # TODO: I should check this (gini index).", "def determine_best_split(data, potential_splits, mltask):\n\n first_iteration = True\n for column_index in potential_splits:\n for value in potential_splits[column_index]:\n data_below,data_above = split_data(data, column_index, value)\n \n if mltask == 'regression':\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_mse)\n \n # classification\n else:\n current_overall_metric = calculate_overall_metric(data_below, data_above, metric_function = calculate_entropy)\n \n \n if first_iteration or current_overall_metric <= best_overall_metric:\n first_iteration = False\n \n best_overall_metric = current_overall_metric\n best_split_column = column_index\n best_split_value = value\n \n \n return best_split_column,best_split_value", "def find_split(eps, nfeats_test):\n # TODO your code here\n # Consider a random subset of features of the specified size\n total_feature_num = len(eps[0].values)\n if nfeats_test <= total_feature_num:\n features_to_test = sorted(random.sample(range(total_feature_num), nfeats_test))\n else:\n features_to_test = range(total_feature_num)\n\n # For each feature under consideration, and each splitting value:\n # various values that show up in the profiles for the feature at hand, and take the midpoints between adjacent values.\n # calculate the impurity of each of the two subsets of profiles split accordingly.\n res = None\n for feature in features_to_test:\n values_for_split = [ep[feature] for ep in eps]\n splitting_points = sorted(list(set(values_for_split)))\n # take the midpoints between adjacent values\n for i in range(len(splitting_points) - 1):\n splitting_points[i] = (splitting_points[i] + splitting_points[i + 1]) / 2\n splitting_points.pop()\n\n # calculate the impurity of each of the two subsets of profiles split accordingly.\n for sp_value in splitting_points:\n subset1 = [ep for ep in eps if ep[feature] < sp_value]\n gini1 = cal_gini(subset1)\n subset2 = [ep for ep in eps if ep[feature] >= sp_value]\n gini2 = cal_gini(subset2)\n # Sum these, weighted by the fraction of profiles in each subset.\n gini = len(subset1) / len(eps) * gini1 + len(subset2) / len(eps) * gini2\n if res is None:\n res = (feature, sp_value, gini)\n elif gini < res[2]:\n res = (feature, sp_value, gini)\n\n return res", "def choose_best_split(self, X_subset, y_subset):\n # YOUR CODE HERE\n feature_index = None\n threshold = None\n best_G = np.inf\n N = len(X_subset)\n \n for current_feature in range(X_subset.shape[1]):\n thresholds = np.unique(X_subset[:, current_feature])\n \n for t in thresholds:\n y_left, y_right = self.make_split_only_y(current_feature, t, X_subset, y_subset)\n H_L = self.H(y_left)\n H_R = self.H(y_right)\n \n G = (len(y_left) / N) * H_L + (len(y_right) / N) * H_R\n \n if G < best_G:\n best_G = G\n feature_index = current_feature\n threshold = t\n \n return feature_index, threshold", "def __find_best_split(self, x, y):\n data = np.transpose(np.vstack((np.transpose(x), y)))\n num_features = data.shape[1] - 1\n\n # initialise splitting rule components\n integer_splitting_rule = None\n feature_index_to_split = None\n max_info_gain = 0\n\n # iterate over all the features and find best splits within these\n for feature in range(num_features):\n info_gain, split_int = self.__find_best_split_in_feature(\n data[:, [feature, -1]])\n if info_gain is None:\n continue\n # update max info gain so far as it iterates over features\n if info_gain > max_info_gain:\n max_info_gain = info_gain\n feature_index_to_split = feature\n integer_splitting_rule = int(split_int)\n\n return feature_index_to_split, integer_splitting_rule", "def __find_best_split_in_feature(self, feature_and_class):\n\n # sort the feature and class and use changes in the class to reduce\n # number of potential split info gain calculations\n sorted_data = feature_and_class[\n feature_and_class[:, 0].astype(np.int).argsort()]\n potential_splits = self.__find_integers_with_class_change(sorted_data)\n info_gains = self.__info_gain_from_splits(potential_splits,\n sorted_data)\n\n # returning nothing in no information gains are found\n if len(info_gains) == 0:\n return None, None\n\n index = info_gains.index(max(info_gains))\n return info_gains[index], potential_splits[index]", "def pick_best_split(self,db,labels,ids,features=None):\n idlabels = [labels[id] for id in ids]\n if misclassification_error(idlabels) == 0:\n #base case: no misclassifications\n self.type = 'v'\n self.value = idlabels[0]\n return 0\n best = None\n bestCost = 0\n splitval = None\n discrete = True\n if features == None:\n if len(ids) < db.numFeatures():\n #look at all present features in the training set\n features = db.getPresentFeatures(ids)\n #print len(features),\"of\",db.numFeatures(),\"features selected\"\n else:\n features = range(db.numFeatures())\n elif callable(features):\n features = features()\n for i in features:\n if len(db.entryLists[i]) == 0: continue\n idiscrete = db.discreteFeature[i]\n if idiscrete:\n #count number of labels of a certain value\n splitter = defaultdict(lambda:defaultdict(int))\n #count of labels for missing values\n nmissing = defaultdict(int)\n for id in ids:\n val = db[i,id]\n if val is None:\n #missing values go down to all splits\n nmissing[labels[id]] += 1\n continue\n splitter[val][labels[id]] += 1\n if len(splitter) > continuous_variable_threshold:\n #print \"Determined to be a continuous variable\"\n idiscrete = False\n break\n if idiscrete:\n if len(splitter) <= 1:\n #only a single value\n continue\n #count number of missing values in all splits\n cmax = 0\n for k in splitter:\n for l,v in nmissing.iteritems():\n splitter[k][l] += v\n cmax = max(cmax,sum(splitter[k].values()))\n #shrink by fraction of (# of ids - largest child)/(# of ids)\n scale = (1.0-float(cmax)/float(len(ids)))*len(splitter)\n #evaluate cost\n cost = split_cost(splitter.values())*scale\n #print \"Split on\",i,\"information gain\",-cost,splitter.values()\n else:\n #continuous, need to learn the best split\n vals = []\n presentlabels = []\n nonelabels = []\n for id in ids:\n val = db[i,id]\n if val is None:\n nonelabels.append(labels[id])\n continue\n vals.append(val)\n presentlabels.append(labels[id])\n if len(vals) <= 1:\n print \"No values for feature\",i,\"?\"\n print vals\n continue\n #print \"Considering continuous split on\",i\n s,cost = best_split(vals,presentlabels,nonelabels)\n scale = (1.0-float(len(presentlabels)/2+len(nonelabels))/float(len(ids)))*2\n cost *= scale\n #print \"Result\",s,\"Information gain\",-cost\n \n if cost < bestCost:\n best = i\n bestCost = cost\n discrete = idiscrete\n if not idiscrete:\n splitval = s\n \n if best is None:\n self.type = 'v'\n if len(ids) > 0:\n self.value = vote(idlabels)\n return misclassification_error(idlabels)\n else:\n self.value = None\n return 0\n else:\n self.feature = best\n #discrete or inequality split\n if discrete:\n self.type = 's'\n else:\n self.type = 'i'\n self.value = splitval\n return bestCost", "def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err", "def greedy_learn(self,node,db,labels,ids):\n if node.depth >= self.maxdepth or len(ids) <= self.minexamples:\n #terminate recursion\n node.pick_best_label(db,labels,ids)\n err = misclassification_error([labels[id] for id in ids])\n if err > 0:\n print \"Reached a leaf and had to make some sacrifices, cost\",err\n print \" depth\",node.depth\n print \" labels\",[labels[id] for id in ids]\n return err\n\n features = self.feature_subset(node,db,labels,ids)\n cost = node.pick_best_split(db,labels,ids,features)\n \n #do a split\n if node.type == 'v':\n #base case: no misclassifications\n \"\"\"\n if cost>0:\n print \"greedy_learn: Warning, pick_best_split indicates a leaf but the cost is nonzero\"\n print \"cost=\",cost,\"misclassification=\",misclassification_error([labels[id] for id in ids])\n print \"# of ids:\",len(ids)\n for i in ids:\n print \"id\",i,\",\",\n for k in range(db.numFeatures()):\n if db[k,i] != None:\n print k,\"=\",db[k,i],\",\",\n print \"label\",labels[i]\n raw_input()\n \"\"\"\n return 0\n elif node.type == 's':\n #print \"Picked feature\",node.feature,\"split\"\n #do a discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in ids:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #print \" split sizes:\",[len(x) for x in Eids.values()]\n #print \" None ids:\",len(noneids)\n ids = None\n errors = 0\n for v,vids in Eids.iteritems():\n #recurse\n c = DecisionTreeNode(node)\n #print \"Recursing on value\",v\n #print \" ids:\",vids\n errors += self.greedy_learn(c,db,labels,vids+noneids)\n node.children[v] = c\n if c.depth > self.deepest:\n self.deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors\n else:\n #do an inequality split\n assert node.type == 'i'\n #print \"Picked feature\",node.feature,\"inequality value\",node.value,\"cost\",cost\n leftids = []\n rightids = []\n for id in ids:\n if db[node.feature,id] is not None:\n if db[node.feature,id] <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(rightids) == len(ids) or len(leftids) == len(ids):\n #due to missing values, this split is useless\n errors = misclassification_error([labels[id] for id in ids])\n print \"useless split on feature\",node.feature,\"value\",node.value,\"misclassification error\",errors\n print \"Left size\",len(leftids),\"right size\",len(rightids)\n raw_input()\n node.pick_best_label(db,labels,ids)\n return errors\n #clear memory associated with ids list\n del ids[:]\n ids = None\n #print \"Left size\",len(leftids),\"right size\",len(rightids)\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n #left side\n errors = self.greedy_learn(c1,db,labels,leftids)\n #right side\n errors += self.greedy_learn(c2,db,labels,rightids)\n #restore index\n node.children = {0:c1,1:c2}\n if c1.depth > self.deepest:\n self.deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",self.deepest\n return errors", "def findBestValueSplitByGini(self, data, structure, colIndex):\n minGini, bestSplit = 1, []\n for i in range(0, len(data)-1):\n split = (float(data[i][colIndex]) + float(data[i+1][colIndex])) / 2\n giniSplit = self.calcGiniSplitBySplitValue(data, structure, colIndex, split)\n if giniSplit <= minGini:\n minGini = giniSplit\n bestSplit = [split, giniSplit]\n return bestSplit", "def detect_splits(self):\n logg.info(' abstracted graph will have {} nodes'.format(self.n_splits+1))\n indices_all = np.arange(self.X.shape[0], dtype=int)\n segs = [indices_all]\n if False: # this is safe, but not compatible with on-the-fly computation\n tips_all = np.array(np.unravel_index(np.argmax(self.Dchosen), self.Dchosen.shape))\n else:\n if self.iroot is not None:\n tip_0 = np.argmax(self.Dchosen[self.iroot])\n else:\n tip_0 = np.argmax(self.Dchosen[0]) # just a random index, here fixed to \"0\"\n tips_all = np.array([tip_0, np.argmax(self.Dchosen[tip_0])])\n # we keep a list of the tips of each segment\n segs_tips = [tips_all]\n if self.clusters_precomputed_names:\n self.segs_names_original = [', '.join(self.clusters_precomputed_names)]\n segs_undecided = [True]\n segs_adjacency = [[]]\n segs_distances = np.zeros((1, 1))\n segs_adjacency_nodes = [{}]\n # logg.info(' do not consider groups with less than {} points for splitting'\n # .format(self.min_group_size))\n for ibranch in range(self.n_splits):\n if self.clusters == 'unconstrained_segments':\n iseg, new_tips = self.select_segment(segs, segs_tips, segs_undecided)\n if iseg == -1:\n logg.info('... partitioning converged')\n break\n logg.info('... branching {}:'.format(ibranch + 1),\n 'split group', iseg)\n segs_distances = self.do_split(segs, segs_tips,\n segs_undecided,\n segs_adjacency,\n segs_distances,\n iseg, new_tips)\n else:\n logg.msg(' split', ibranch + 1, v=4)\n stop, segs_distances = self.do_split_constrained(segs, segs_tips,\n segs_adjacency,\n segs_adjacency_nodes,\n segs_distances)\n if stop: break\n\n # segments\n self.segs = segs\n self.segs_tips = segs_tips\n self.segs_sizes = []\n for iseg, seg in enumerate(self.segs): self.segs_sizes.append(len(seg))\n\n # the full, unscaled adjacency matrix\n self.segs_adjacency_full_attachedness = 1/segs_distances\n # if self.attachedness_measure == 'connectedness':\n # norm = np.sqrt(np.multiply.outer(self.segs_sizes, self.segs_sizes))\n # self.segs_adjacency_full_attachedness /= norm\n self.segs_adjacency_full_confidence, self.segs_adjacency_tree_confidence \\\n = self.compute_adjacency_confidence(\n self.segs_adjacency_full_attachedness,\n segs_adjacency,\n self.tree_based_confidence)\n np.fill_diagonal(self.segs_adjacency_full_attachedness, 0)", "def find_split(x, y):\n\n # Need the starting entropy so we can measure improvement...\n start_entropy = calculate_entropy(y)\n\n # Best thus far, initialised to a dud that will be replaced immediately...\n best = {'infogain': -np.inf}\n\n # Randomly allocate the splits to be traversed (without replacement)\n feature_total = x.shape[1]\n feature_subset_count = int(np.sqrt(feature_total))\n feature_subset = np.random.permutation(feature_total)[:feature_subset_count]\n\n # Loop every possible split of every feature...\n for feature_index in feature_subset:\n for split in np.unique(x[:, feature_index]):\n\n left_indices = []\n right_indices = []\n\n # Get index of rows where x[row_index,feature_index] <= split\n for row_index,row in enumerate(x):\n left_indices.append(row_index) if x[row_index,feature_index] <= split else right_indices.append(row_index)\n\n left_ys = y[left_indices]\n right_ys = y[right_indices]\n\n nleft = len(left_ys)\n nright = len(right_ys)\n ntotal = nleft + nright\n infogain = start_entropy - (nleft / ntotal) * calculate_entropy(left_ys) - (\n nright / ntotal) * calculate_entropy(right_ys)\n\n if infogain > best['infogain']:\n best = {'feature': feature_index,\n 'split': split,\n 'infogain': infogain,\n 'left_indices': left_indices,\n 'right_indices': right_indices}\n return best", "def _choose_best_feature(self, X, y, label, sample_weights=None):\n best_feature_idx = 0\n # YOUR CODE HERE\n # Note that you need to implement the sampling feature part here for random forest!\n # Hint: You may find `np.random.choice` is useful for sampling.\n # begin answer\n n_features = X.shape[1]\n if self.sample_feature:\n max_features=max(1, min(n_features, int(np.round(np.sqrt(n_features)))))\n new_features=np.random.choice(n_features, max_features, replace=False)\n new_X=X[:, new_features]\n else:\n new_X=X\n n_new_features=new_X.shape[1]\n #new_features=np.random.choice(n_features, n_features, replace=False)\n #old_cost=self.entropy(y, sample_weights)\n #use C4.5 algorirhm\n best_impurity=None\n best_feature_idx=0\n best_feature_val=X[0, 0]\n for i in range(n_new_features):\n unique_vals=np.unique(X[:,i])\n for value in unique_vals:\n sub1_X, sub1_y, label1, sub1_sample_weights, sub2_X, sub2_y, label2, sub2_sample_weights=self._split_dataset(X, y, label, i, value, sample_weights)\n if len(sub1_y)>0 and len(sub2_y)>0:\n new_impurity=self._impurity(y, sub1_y, sub2_y)\n if best_impurity is None or new_impurity > best_impurity:\n best_impurity=new_impurity\n best_feature_idx=i\n best_feature_val=value \n # end answer\n return best_feature_idx, best_feature_val", "def best_split(self):\n sub_group = []\n\n current_entropy = self.entropy(self._Passengers)\n best_gain = 0 # holds the best entropy difference so far\n best_split = self._Attr[0].get_name()\n relative_entropy = 0 # entropy while taking account for the size of the population\n\n for Attribute in self._Attr:\n relative_entropy = 0\n print(\"Attr considered: \" + Attribute.get_name())\n for Attr_option in Attribute.get_options():\n sub_group = []\n for Passenger in self._Passengers:\n if self.passenger_attr_option_check(Passenger,\n Attribute.get_name(),\n Attr_option): # if P.A = V\n sub_group.append(Passenger)\n if len(sub_group) > 0 and len(self._Passengers) > 0:\n relative_entropy += self.entropy(sub_group) * (len(sub_group)/len(self._Passengers))\n\n if current_entropy - relative_entropy > best_gain:\n best_gain = current_entropy - relative_entropy\n best_split = Attribute.get_name()\n\n print(f\"best split:{best_split} \\n with entropy gain of:\\n {best_gain}\")\n\n return best_split", "def best_iterative_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):\n\titerative_based_score = 0\n\t# given that all pairs use the same amount of features, the position 0 was arbitrarily selected to compute the number of features being used\n\tmin_number_features = int(0.15*len(train_features[0]))\n\tmax_number_features = int(0.85*len(train_features[0]))\n\n\t# min_number_features = 19\n\t# max_number_features = 20\n\n\titerative_based_selector = None\n\titerative_based_train_features_selected = None\n\titerative_based_test_features_selected = None\n\n\tfor i in range(min_number_features, max_number_features):\n\t\tprint(i)\n\t\ttemp_iterative_based_selector = RFE(RandomForestRegressor(n_estimators=100), n_features_to_select=i)\n\t\ttemp_iterative_based_selector.fit(train_features, train_similarity_target)\n\t\ttemp_iterative_based_train_features_selected = temp_iterative_based_selector.transform(train_features)\n\t\ttemp_iterative_based_test_features_selected = temp_iterative_based_selector.transform(test_features)\n\n\t\tregressor.fit(temp_iterative_based_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_iterative_based_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Iterative Based Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > iterative_based_score:\n\t\t\titerative_based_score = temp_score\n\t\t\titerative_based_selector = temp_iterative_based_selector\n\t\t\titerative_based_train_features_selected = temp_iterative_based_train_features_selected\n\t\t\titerative_based_test_features_selected = temp_iterative_based_test_features_selected\n\n\titerative_based_mask = iterative_based_selector.get_support()\n\tprint(\"This is the iterative based mask: \")\n\tprint(iterative_based_mask)\n\n\treturn iterative_based_selector, iterative_based_score, iterative_based_train_features_selected, iterative_based_test_features_selected, iterative_based_mask", "def _best_split(cls, X, y):\n n = X.shape[0]\n num_feature = X.shape[1]\n y_types = np.unique(y)\n\n # initialize\n min_score = float(n)\n feature_idx = None\n best_theta = None\n best_idx = None\n\n for feature_idx in xrange(num_feature):\n # counter for y\n cumulate_y = Counter()\n rest_y = Counter()\n for y_type in y_types:\n cnt = np.where(y == y_type)[0].shape[0]\n rest_y[y_type] = cnt\n\n # sorted data\n sorted_idx = np.argsort(X[:, feature_idx])\n sorted_X = np.copy(X)\n sorted_y = np.copy(y)\n sorted_X = sorted_X[sorted_idx]\n sorted_y = sorted_y[sorted_idx]\n #print \"_best_split:\", sorted_X.shape, sorted_y.shape\n\n for idx in xrange(n-1):\n theta = (sorted_X[idx, feature_idx] + sorted_X[idx + 1, feature_idx]) / 2\n y_label = sorted_y[idx]\n cumulate_y[y_label] += 1\n rest_y[y_label] -= 1\n left_cnt = sum(cumulate_y.values())\n right_cnt = sum(rest_y.values())\n w_1 = left_cnt * cls._gini_index(cumulate_y.values())\n w_2 = right_cnt * cls._gini_index(rest_y.values())\n score = w_1 + w_2\n if score < min_score:\n min_score = score\n best_theta = theta\n best_idx = feature_idx\n #print('new min score: %.3f' % score)\n #print('feature: %d, theta: %.3f' % (best_idx, best_theta))\n #print('left: %d, right: %d' % (left_cnt, right_cnt))\n print('feature: %d, theta: %.3f' % (best_idx, best_theta))\n return (best_idx, best_theta)", "def _best_split(cls, X, y):\n n = X.shape[0]\n num_feature = X.shape[1]\n y_types = np.unique(y)\n\n # initialize\n min_score = float(n)\n feature_idx = None\n best_theta = None\n best_idx = None\n\n for feature_idx in xrange(num_feature):\n # counter for y\n cumulate_y = Counter()\n rest_y = Counter()\n for y_type in y_types:\n cnt = np.where(y == y_type)[0].shape[0]\n rest_y[y_type] = cnt\n\n # sorted data\n sorted_idx = np.argsort(X[:, feature_idx])\n sorted_X = np.copy(X)\n sorted_y = np.copy(y)\n sorted_X = sorted_X[sorted_idx]\n sorted_y = sorted_y[sorted_idx]\n #print \"_best_split:\", sorted_X.shape, sorted_y.shape\n\n for idx in xrange(n-1):\n theta = (sorted_X[idx, feature_idx] + sorted_X[idx + 1, feature_idx]) / 2\n y_label = sorted_y[idx]\n cumulate_y[y_label] += 1\n rest_y[y_label] -= 1\n left_cnt = sum(cumulate_y.values())\n right_cnt = sum(rest_y.values())\n w_1 = left_cnt * cls._gini_index(cumulate_y.values())\n w_2 = right_cnt * cls._gini_index(rest_y.values())\n score = w_1 + w_2\n if score < min_score:\n min_score = score\n best_theta = theta\n best_idx = feature_idx\n #print('new min score: %.3f' % score)\n #print('feature: %d, theta: %.3f' % (best_idx, best_theta))\n #print('left: %d, right: %d' % (left_cnt, right_cnt))\n print('feature: %d, theta: %.3f' % (best_idx, best_theta))\n return (best_idx, best_theta)", "def best_split(self, X, y, attributes):\n if (self.criterion==\"information_gain\"):\n global_if = float('-inf') # the highest value of information gain/gini gain seen so far\n attr = None\n for attribute in attributes:\n attr_val = X[attribute].copy()\n cur_if = information_gain(y,attr_val,self.type)\n if (cur_if>global_if):\n # Update when a better split is receieved\n global_if = cur_if\n attr = attribute\n return attr\n else:\n global_if = float('inf')\n attr = None\n for attribute in attributes:\n attr_val = X[attribute].copy()\n cur_if = gini_gain(y,attr_val)\n if (global_if>cur_if):\n # Update when a better split is receieved\n global_if = cur_if\n attr = attribute\n return attr", "def get_best_split(rows):\n best_gain = 0\n best_question = None\n current_impurity = get_gini(rows)\n n_features = len(rows[0])\n\n for col in range(n_features):\n\n for row in rows:\n question = Question(col, row[col])\n true_rows, false_rows = partition(rows, question)\n\n if len(true_rows) == 0 or len(false_rows) == 0:\n break\n\n question_gain = get_info_gain(true_rows, false_rows, current_impurity)\n\n if question_gain >= best_gain:\n best_gain = question_gain\n best_question = question\n\n print(best_gain)\n print(best_question)\n return best_gain, best_question", "def __gini(self, data_set, split_feature, target_feature):\n frequencies = self.__calculate_frequency(data_set, split_feature)\n gini_value = 1.0\n\n # Calculate the gini of the data.\n for value, frequency in frequencies.items():\n probability = frequency / sum(frequencies.values())\n gini_value -= math.pow(probability, 2)\n\n return gini_value", "def test_gini_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 1)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1]), set([2])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1, 2:2})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.66)", "def find_best_split(rows):\n best_gain = 0 # keep track of the best information gain\n best_question = None # keep train of the feature / value that produced it\n current_uncertainty = gini(rows)\n n_features = len(rows[0]) - 1 # number of columns\n #print(\"n_features:\", n_features)\n\n for col in range(1,n_features): # for each feature\n # for each iteration this is the set of all values of a specific column, eg, All pixels number 0\n values = set([row[col] for row in rows]) # unique values in the column\n for val in values: # for each value\n\n # Create a question object for each val under a column, holding the val and the col number\n question = Question(col, val)\n\n # try splitting the dataset\n true_rows, false_rows = partition(rows, question)\n\n # Skip this split if it doesn't divide the\n # dataset.\n if len(true_rows) == 0 or len(false_rows) == 0:\n continue\n\n # Calculate the information gain from this split\n gain = info_gain(true_rows, false_rows, current_uncertainty)\n\n # You actually can use '>' instead of '>=' here\n # but I wanted the tree to look a certain way for our\n # toy dataset.\n if gain >= best_gain:\n best_gain, best_question = gain, question\n\n return best_gain, best_question", "def test_gini_gain(self):\n self.decision_tree.train(self.data,\n list(range(self.data.num_samples)),\n max_depth=1,\n min_samples_per_node=1,\n use_stop_conditions=False,\n max_p_value_chi_sq=None)\n self.assertEqual(self.decision_tree.get_root_node().node_split.separation_attrib_index, 0)\n self.assertEqual(self.decision_tree.get_root_node().node_split.splits_values,\n [set([0]), set([1])])\n self.assertEqual(self.decision_tree.get_root_node().node_split.values_to_split,\n {0:0, 1:1})\n self.assertEqual(self.decision_tree.get_root_node().node_split.criterion_value, 0.5)", "def evaluate_split( df, attribute, split ):\n mask = df[attribute] <= split\n \n # split the dataset on the split attribute\n dfl = df[mask]\n dfr = df[~mask]\n \n \n # calculate weighting factors for child\n weighting_factor_left = float(dfl.shape[0])/df.shape[0]\n weighting_factor_right = float(dfr.shape[0])/df.shape[0]\n\n # calculate gini for left and right\n gini_parent = gini_impurity(df)\n gini_left = gini_impurity(dfl)\n gini_right = gini_impurity(dfr)\n \n # calculate weighted gini for this split \n weighted_gini = gini_parent - (weighting_factor_left*gini_left + weighting_factor_right*gini_right)\n return weighted_gini", "def aux_best_percentile_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor, used_features):\n\tpercentile_score = 0\n\tpercentiles = [25, 35, 45, 50, 55, 65, 75]\n\t# percentiles = [45]\n\tpercentile_selector = None\n\tpercentile_train_features_selected = None\n\tpercentile_test_features_selected = None\n\n\tfor percentile in percentiles:\n\t\tprint(percentile)\n\t\ttemp_percentile_selector = SelectPercentile(score_func=f_regression, percentile=percentile)\n\t\ttemp_percentile_selector.fit(train_features, train_similarity_target)\n\t\ttemp_percentile_train_features_selected = temp_percentile_selector.transform(train_features)\n\t\ttemp_percentile_test_features_selected = temp_percentile_selector.transform(test_features)\n\n\t\tregressor.fit(temp_percentile_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_percentile_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Percentile Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > percentile_score:\n\t\t\tpercentile_score = temp_score\n\t\t\tpercentile_selector = temp_percentile_selector\n\t\t\tpercentile_train_features_selected = temp_percentile_train_features_selected\n\t\t\tpercentile_test_features_selected = temp_percentile_test_features_selected\n\n\tpercentile_mask = percentile_selector.get_support()\n\tprint(\"This is the percentile mask: \")\n\tprint(percentile_mask)\n\n\tpercentile_mask = build_mask(percentile_mask, used_features)\n\tmask_save_path = os.path.join('feature_selection_masks', 'assin2_percentile_based_mask.txt')\n\tdebug_data(percentile_mask, mask_save_path)\n\n\treturn percentile_train_features_selected, percentile_test_features_selected, percentile_selector", "def _compute_best_split_and_push(self, node):\n\n node.split_info = self.splitter.find_node_split(\n node.sample_indices, node.histograms, node.sum_gradients,\n node.sum_hessians)\n\n if node.split_info.gain <= 0: # no valid split\n self._finalize_leaf(node)\n else:\n heappush(self.splittable_nodes, node)", "def get_split(self,X,y):\n \n BEST_COL = 0\n BEST_SPLIT =0\n BEST_IMPUR = 99\n for i,feature in enumerate(X.T):\n arg_sort=np.argsort(feature) #Sort the feature for optimizing the find of splitting points\n feature= feature[arg_sort]\n y_sort = y[arg_sort]\n splits = self.possible_splits(feature,y_sort) #Get \n\n impur,splits = self.test_split(feature,y_sort,splits) #Get impurity for splitting points\n best_idx = np.argmin(impur)\n best_impur = impur[best_idx]\n \n if best_impur==0.0: #Found perfect split, terminate\n return(i,splits[best_idx])\n elif best_impur<BEST_IMPUR:\n BEST_IMPUR=best_impur\n BEST_SPLIT=splits[best_idx]\n BEST_COL=i\n return (BEST_COL,BEST_SPLIT)", "def split_next(self):\n # Consider the node with the highest loss reduction (a.k.a. gain)\n node = heappop(self.splittable_nodes)\n\n tic = time()\n (sample_indices_left,\n sample_indices_right,\n right_child_pos) = self.splitter.split_indices(node.split_info,\n node.sample_indices)\n self.total_apply_split_time += time() - tic\n\n depth = node.depth + 1\n n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes)\n n_leaf_nodes += 2\n\n left_child_node = TreeNode(depth,\n sample_indices_left,\n node.split_info.sum_gradient_left,\n node.split_info.sum_hessian_left,\n parent=node)\n right_child_node = TreeNode(depth,\n sample_indices_right,\n node.split_info.sum_gradient_right,\n node.split_info.sum_hessian_right,\n parent=node)\n left_child_node.sibling = right_child_node\n right_child_node.sibling = left_child_node\n node.right_child = right_child_node\n node.left_child = left_child_node\n\n # set start and stop indices\n left_child_node.partition_start = node.partition_start\n left_child_node.partition_stop = node.partition_start + right_child_pos\n right_child_node.partition_start = left_child_node.partition_stop\n right_child_node.partition_stop = node.partition_stop\n\n self.n_nodes += 2\n\n if self.max_depth is not None and depth == self.max_depth:\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n return left_child_node, right_child_node\n\n if (self.max_leaf_nodes is not None\n and n_leaf_nodes == self.max_leaf_nodes):\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n self._finalize_splittable_nodes()\n return left_child_node, right_child_node\n\n if left_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(left_child_node)\n if right_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(right_child_node)\n\n # Compute histograms of childs, and compute their best possible split\n # (if needed)\n should_split_left = left_child_node.value is None # node isn't a leaf\n should_split_right = right_child_node.value is None\n if should_split_left or should_split_right:\n\n # We will compute the histograms of both nodes even if one of them\n # is a leaf, since computing the second histogram is very cheap\n # (using histogram subtraction).\n n_samples_left = left_child_node.sample_indices.shape[0]\n n_samples_right = right_child_node.sample_indices.shape[0]\n if n_samples_left < n_samples_right:\n smallest_child = left_child_node\n largest_child = right_child_node\n else:\n smallest_child = right_child_node\n largest_child = left_child_node\n\n # We use the brute O(n_samples) method on the child that has the\n # smallest number of samples, and the subtraction trick O(n_bins)\n # on the other one.\n tic = time()\n smallest_child.histograms = \\\n self.histogram_builder.compute_histograms_brute(\n smallest_child.sample_indices)\n largest_child.histograms = \\\n self.histogram_builder.compute_histograms_subtraction(\n node.histograms, smallest_child.histograms)\n self.total_compute_hist_time += time() - tic\n\n tic = time()\n if should_split_left:\n self._compute_best_split_and_push(left_child_node)\n if should_split_right:\n self._compute_best_split_and_push(right_child_node)\n self.total_find_split_time += time() - tic\n\n return left_child_node, right_child_node", "def param_selection(df):\n n = df.count()\n numTrees = np.round(np.log10(n) * 100)\n maxDepth = np.round(np.log(n))\n minInstancesPerNode = np.round(np.log10(n) * (np.ceil(n / 500000) + 1))\n #maxBins = np.minimum(80, np.round(500 / np.log(n)))\n subsamplingRate = float(np.where(n > 500000, 0.6, 0.8))\n maxIter = np.round(np.log10(n) * 50)\n\n # minInstancesPerNode\n\n minInstancesPerNode = 200 if minInstancesPerNode > 200 else maxDepth\n minInstancesPerNode = 25 if minInstancesPerNode < 25 else minInstancesPerNode\n\n # maxDepth\n\n maxDepth = 15 if maxDepth > 15 else maxDepth\n maxDepth = 3 if maxDepth < 3 else maxDepth\n\n # maxIter applies to GBT\n\n maxIter = 200 if maxIter > 100 else maxIter\n maxIter = 50 if maxIter < 50 else maxIter\n\n # maxBins set to 32\n\n maxBins = 32\n\n print \"[Info] numTrees: \" + str(numTrees)\n print \"[Info] maxDepth: \" + str(maxDepth)\n print \"[Info] minInstancesPerNode: \" + str(minInstancesPerNode)\n print \"[Info] maxBins: \" + str(maxBins)\n print \"[Info] subsamplingRate: \" + str(subsamplingRate)\n print \"[Info] maxIter: \" + str(maxIter)\n\n return numTrees, maxDepth, minInstancesPerNode, maxBins, subsamplingRate, maxIter" ]
[ "0.7151503", "0.64216876", "0.6352301", "0.6302367", "0.62781364", "0.62125915", "0.6181519", "0.6135771", "0.60826075", "0.6048427", "0.5933168", "0.590119", "0.5873095", "0.58700764", "0.5845192", "0.583579", "0.5818668", "0.5818668", "0.58096075", "0.5801238", "0.5736901", "0.57084674", "0.5700281", "0.5676648", "0.56521", "0.56396794", "0.5639582", "0.5626537", "0.5621652", "0.56026095" ]
0.66302925
1
A class without the key_fields annotation should raise a RuntimeError
def testNoKeyFields(): with pytest.raises(RuntimeError): class AnnotatedNode(Node): x: str y: int def __init__(self, x: str, y: int): self.x = x self.y = y @property def _display(self) -> str: return self.x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_key_no_data(self):\n key = Key({})\n\n assert key.warning is None\n assert key.in_car is None", "def _validate(self):\n fields, schema = self.__dict__, self._def.default\n extra_fields = fields.viewkeys() - schema.viewkeys()\n if len(extra_fields) > 0:\n raise AttributeError('Fields found that are not in the schema: %r' % (list(extra_fields)))\n for key in fields.iterkeys():\n if type(fields[key]) is not type(schema[key]):\n raise AttributeError('Invalid %s for field \"%s\", should be %s' %\n (type(fields[key]), key, type(schema[key])))", "def test_set_non_dictionary_based_field(self):\n self.assertRaises(TypeError, self._p.set_fields, '')", "def test_entities__Entity__getRawField__1(entity):\n with pytest.raises(KeyError):\n entity.getRawField('asdf')", "def test_throws_base_price_missing_key(self):\n with pytest.raises(marshmallow.ValidationError):\n BasePrice.Schema().loads(json.dumps(base_price_missing_key))", "def test_unknown_fields_are_not_allowed() -> None:\n with pytest.raises(pydantic.ValidationError):\n r4.Meta(unknown_field=True)", "def testBadKeys(self):\n # Ignore access to protected members\n # pylint: disable=W0212\n self.assertRaises(DOLAPI._DOLAPIError,\n self.badauth.table,\n self.dataset,\n self.table)", "def test_defining_a_primary_key_counter_column_fails(self):\r\n with self.assertRaises(TypeError):\r\n class model(Model):\r\n partition = columns.UUID(primary_key=True, default=uuid4)\r\n cluster = columns.Counter(primary_ley=True)\r\n counter = columns.Counter()\r\n\r\n # force it\r\n with self.assertRaises(ModelDefinitionException):\r\n class model(Model):\r\n partition = columns.UUID(primary_key=True, default=uuid4)\r\n cluster = columns.Counter()\r\n cluster.primary_key = True\r\n counter = columns.Counter()", "def test_keyerror(self):\n try:\n self.db['foo']\n except KeyError, e:\n assert \"no key 'foo' in database <SequenceFileDB\" in str(e), str(e)", "def test_defining_only_or_defer_on_nonexistant_fields_fails(self):", "def __missing__(self, key):\n return key", "def test_missing_required_field_raises_error():\n with pytest.raises(ValidationError):\n Entity()", "def test_missing_mandatory(self):\n try:\n CollectorUpdate()\n self.assertFalse(\"RuntimeError expected\")\n except RuntimeError as exception:\n assert_that(str(exception), equal_to(\"Missing keys: 'stage', 'status', 'timestamp'\"))", "def test_entities__Entity__getField__1(entity):\n with pytest.raises(KeyError):\n entity.getField('asdf')", "def _check_key(self, key):\n raise NotImplementedError", "def test_declare_error(self):\n\n with self.assertRaises(ModelDeclareError) as ctx:\n class Foo(Base):\n pass\n\n with self.assertRaises(ModelDeclareError) as ctx:\n class Foo(Base):\n _id = IDField()\n _id_2 = IDField()", "def test_raise_if_no_attr(self):\n self.assertRaises(AttributeError, self.Model.set_primary_key, 'asdf')", "def testMissingKeys(self):\n self.assertRaises(ValueError,\n self.unauth.table,\n self.dataset,\n self.table)", "def test_raise_on_missing_critical(self):\n name_for_field = 'absent_field'\n field_opts = {'names': (name_for_field, 'absent'), 'alt_field': '', 'computed': False}\n critical_fields = {'absent_field': field_opts}\n with self.assertRaises(ImproperlyConfigured):\n self.form.fields_for_critical(critical_fields)", "def check_keys(self):", "def test_keys_failure(self):\n storage = Storage()\n storage._keys_dict = {'1': 'one',\n 'abc': '1'}\n self.assertRaises(StoragePatternError, storage.keys, 'ab[cd')", "def test_fields(self):\n\n class Foo(Model):\n field1 = StringField()\n field2 = IntegralField()\n\n assert hasattr(Foo, \"_fields\")\n assert type(Foo._fields) is dict\n\n assert not hasattr(Foo, \"field1\")\n assert \"field1\" in Foo._fields\n assert type(Foo._fields[\"field1\"]) is StringField\n\n assert not hasattr(Foo, \"field2\")\n assert \"field2\" in Foo._fields\n assert type(Foo._fields[\"field2\"]) is IntegralField", "def test_normal_fields_can_be_defined_between_primary_keys(self):", "def test_attempting_to_save_abstract_model_fails(self):\r\n with self.assertRaises(CQLEngineException):\r\n AbstractModelWithFullCols.create(pkey=1, data=2)", "def __missing__(self, key):\n global MISSING\n MISSING = key # For debugging - save name of missing key\n return INVALID", "def test_primary_key(self):\r\n\r\n # This should just work.\r\n class AutoFieldKey(models.Model):\r\n key = models.AutoField(primary_key=True)\r\n AutoFieldKey.objects.create()\r\n\r\n # This one can be exactly represented.\r\n class CharKey(models.Model):\r\n id = models.CharField(primary_key=True, max_length=10)\r\n CharKey.objects.create(id='a')\r\n\r\n # Some rely on unstable assumptions or have other quirks and\r\n # should warn.\r\n\r\n# # TODO: Warning with a range limitation.\r\n# with self.assertRaises(Warning):\r\n#\r\n# class IntegerKey(models.Model):\r\n# id = models.IntegerField(primary_key=True)\r\n# IntegerKey.objects.create(id=1)\r\n\r\n# # TODO: date/times could be resonably encoded / decoded as\r\n# # strings (in a reversible manner) for key usage, but\r\n# # would need special handling and continue to raise an\r\n# # exception for now\r\n# with self.assertRaises(Warning):\r\n#\r\n# class DateKey(models.Model):\r\n# id = models.DateField(primary_key=True, auto_now=True)\r\n# DateKey.objects.create()\r\n\r\n# # TODO: There is a db.Email field that would be better to\r\n# # store emails, but that may prevent them from being\r\n# # used as keys.\r\n# with self.assertRaises(Warning):\r\n#\r\n# class EmailKey(models.Model):\r\n# id = models.EmailField(primary_key=True)\r\n# EmailKey.objects.create(id='[email protected]')\r\n\r\n# # TODO: Warn that changing field parameters breaks sorting.\r\n# # This applies to any DecimalField, so should belong to\r\n# # the docs.\r\n# with self.assertRaises(Warning):\r\n#\r\n# class DecimalKey(models.Model):\r\n# id = models.DecimalField(primary_key=True, decimal_places=2,\r\n# max_digits=5)\r\n# DecimalKey.objects.create(id=1)\r\n\r\n # Some cannot be reasonably represented (e.g. binary or string\r\n # encoding would prevent comparisons to work as expected).\r\n with self.assertRaises(DatabaseError):\r\n\r\n class FloatKey(models.Model):\r\n id = models.FloatField(primary_key=True)\r\n FloatKey.objects.create(id=1.0)\r\n\r\n # TODO: Better fail during validation or creation than\r\n # sometimes when filtering (False = 0 is a wrong key value).\r\n with self.assertRaises(DatabaseError):\r\n\r\n class BooleanKey(models.Model):\r\n id = models.BooleanField(primary_key=True)\r\n BooleanKey.objects.create(id=True)\r\n len(BooleanKey.objects.filter(id=False))", "def __init__(self, key):\n self.key = key", "def test_no_extra_fields():\n t_task = Task()\n t_dict = t_task._asdict()\n assert len(t_dict) <= 4", "def __init__(self, key=None):\n self.key = key", "def __missing__(self, key):\n raise KeyNotInContextError(f\"{key} not found in the pypyr context.\")" ]
[ "0.664005", "0.64458597", "0.63761204", "0.63392276", "0.620251", "0.61894745", "0.6169179", "0.611666", "0.60839826", "0.60760987", "0.6069411", "0.6068303", "0.60593605", "0.60562086", "0.6022622", "0.60070866", "0.5998208", "0.59926015", "0.59468085", "0.59364104", "0.5934645", "0.59262913", "0.58830374", "0.5865956", "0.5852171", "0.584993", "0.5848283", "0.5838684", "0.5835953", "0.5835846" ]
0.7018351
0
creates randomized colors of shape size_x by size_y
def create_world(size_x = 100, size_y=100): colors = np.random.randint(0,2,(size_x,size_y)).tolist() for row in range(len(colors)): for col in range(len(colors[row])): if (colors[row][col]== 1): colors[row][col] = 'R' else: colors[row][col] = 'G' r = [[10.0 for i in range(size_y)] for i in range(size_x)] g = [[10.0 for i in range(size_y)] for i in range(size_x)] b = [[10.0 for i in range(size_y)] for i in range(size_x)] RGB = [] for i in range(size_x): for j in range(size_y): if colors[i][j] == 'R': r[i][j] = 255.0 else: b[i][j] = 255.0 RGB.append(b[i][j]) RGB.append(r[i][j]) RGB.append(g[i][j]) RGB = np.array(RGB).reshape(size_x,size_y,3) return RGB, colors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def randColor():\r\n return np.array([random.random(), random.random(), random.random()]).reshape((1, 1, 3))", "def random_color_gen():\n r = randint(0, 255)\n g = randint(0, 255)\n b = randint(0, 255)\n return [r, g, b]", "def random_color(num):\n # 为每个类别的边界框随机匹配相应颜色\n np.random.seed(80)\n COLORS = np.random.randint(0, 256, size=(num, 3), dtype='uint8') #\n return COLORS", "def random_color():\n colormode(255)\n return randint(0, 255), randint(0, 255), randint(0, 255)", "def mutate(self, size):\n rand = random.random()\n if rand <= 0.5:\n print u\"changing colour\"\n idx = random.randrange(0, 4)\n value = random.randrange(0, 256)\n colour = list(self.colour)\n colour[idx] = value\n self.colour = tuple(colour)\n else:\n print u\"changing point\"\n idx = random.randrange(0, len(self.points))\n point = generate_point(size[0], size[1])\n self.points[idx] = point", "def create_random_color(self):\n # Create a list of n colors.\n n = 4\n dc = 1.0 / (n-1)\n color_list = [i*dc for i in range(n)]\n\n if self.is_scaffold:\n rgb = [1.0, 1.0, 1.0]\n else:\n rgb = [random.choice(color_list) for i in range(3)]\n # Don't generate blue (that's for a scaffold in cadnano) or black.\n if (rgb[0] == 0.0) and (rgb[1] == 0.0):\n rgb[0] = random.choice(color_list[1:])\n if rgb[2] == 0.0: \n rgb[2] = random.choice(color_list[1:]) \n #__if (rgb[0] == 0) and (rgb[1] == 0)\n #__if self.is_scaffold\n return rgb", "def generate_random_colours_list(rng: random.Random, size: int) -> List[TupleInt3]:\n return [random_colour(rng) for _ in range(size)]", "def implement_random(self):\n shape = set()\n for coord in INDICES:\n if randint(0, 1):\n shape.add(coord)\n self.implement_shape(shape)", "def randcolor():\n return (randint(0,255), randint(0,255), randint(0,255))", "def generate_random_data(size, x_min=X_MIN, x_max=X_MAX, y_min=Y_MIN, y_max=Y_MAX):\n result = []\n for _i in range(size):\n result.append((randint(x_min, x_max), randint(y_min, y_max)))\n\n return result", "def random_color():\n\n rgbl=[255,0,0]\n random.shuffle(rgbl)\n return tuple(rgbl)", "def color_from_ind(i: int) -> np.ndarray:\n np.random.seed(i)\n return np.random.random(3)", "def random_shape(height, width):\n # Shape\n shape = random.choice([\"square\", \"circle\", \"triangle\"])\n # Color\n color = tuple([random.randint(0, 255) for _ in range(3)])\n # Center x, y\n buffer = 20\n y = random.randint(buffer, height - buffer - 1)\n x = random.randint(buffer, width - buffer - 1)\n # Size\n s = random.randint(buffer, height // 4)\n return shape, color, (x, y, s)", "def random_color() -> Tuple[int, int, int]:\n return randrange(0, 255), randrange(0, 255), randrange(0, 255)", "def createPickColor():\n color_list = []\n\n for i in range(50, 450, 100): #Create the 4 shapes to show colors\n point1 = g.Point(50, i)\n point2 = g.Point(100, i+50)\n shape = g.Rectangle(point1, point2)\n color_list.append(shape)\n\n #Set the right colors\n color_list[0].setFill(\"Blue\")\n color_list[1].setFill(\"Green\")\n color_list[2].setFill(\"Yellow\")\n color_list[3].setFill(\"Red\")\n\n return color_list", "def randomcolour(self):\n r = random.randrange(1, 255)\n g = random.randrange(1, 255)\n b = random.randrange(1, 255)\n self.colour((r,g,b))", "def _random_color() -> List[float]:\n return [np.random.uniform(), np.random.uniform(), np.random.uniform()]", "def randcolor():\r\n r = random(0.0, 1.0)\r\n g = random(0.0, 1.0)\r\n b = random(0.0, 1.0)\r\n return vec(r, g, b) # A color is a three-element vec\r", "def _genRandomColor():\n b = random.randint(0, 255)\n g = random.randint(0, 255)\n r = random.randint(0, 255)\n return (b, g, r)", "def generate_colour():\n red = random.randrange(0, 256)\n green = random.randrange(0, 256)\n blue = random.randrange(0, 256)\n alpha = random.randrange(0, 256)\n return (red, green, blue, alpha)", "def colors(k): \n ret = []\n for i in range(k):\n ret.append((random.uniform(0, 1), random.uniform(0, 1), random.uniform(0, 1)))\n return ret", "def test_shaped_instance(self, seed):\n dim = Dimension(\"yolo\", \"norm\", 0.9, shape=(3, 2))\n samples = dim.sample(seed=seed)\n assert len(samples) == 1\n assert_eq(dists.norm.rvs(0.9, size=(3, 2)), samples[0])\n\n assert dim.shape == (3, 2)\n\n dim = Dimension(\"yolo\", \"norm\", 0.9, shape=4)\n samples = dim.sample(seed=seed)\n assert len(samples) == 1\n assert_eq(dists.norm.rvs(0.9, size=4), samples[0])\n\n assert dim.shape == (4,)", "def create_color():\n r = random.randint(0,255)\n g = random.randint(0,255)\n b = random.randint(0,255)\n a = random.randint(0,255)\n return introcs.RGB(r,g,b,a)", "def createColors():\n\n colors = \"Blue\", \"Green\", \"Yellow\", \"Red\"\n color_list = []\n color_colum = []\n\n for i in range(15): #Create 2D list of 15*25 with colors\n color_colum = []\n for k in range(25):\n color_colum.append(random.choice(colors))\n color_list.append(color_colum)\n \n return color_list", "def uniqueish_color():\n return plt.cm.gist_ncar(np.random.random())", "def create_rand_grid(grid_size):\n\n return [[randint(0, 1) for x in range(0, grid_size)] for y in range(0, grid_size)]", "def rand_branch_color():\n red = random.randint(0, 100)\n green = random.randint(175, 255)\n blue = random.randint(0, 100)\n return (red, green, blue)", "def genrandimg(args) -> None:\n\n size = (int(args.x), int(args.y))\n fp = Image.new(\"RGB\", size)\n data = []\n\n if not args.c: # If color\n for i in range(size[0]*size[1]):\n r = random.choice([0x00, 0xff])\n data.append((r, r, r)) # Each RGB value is the same random value\n else: # Else black-and-white\n for i in range(size[0]*size[1]):\n r = [random.choice(range(0, 256)) for _ in range(0, 3)]\n r = (r[0], r[1], r[2]) # Choose 3 random numbers for different RGB values\n data.append(r)\n\n fp.putdata(data)\n print(\"Saving to %s...\" % args.o)\n fp.save(args.o)\n fp.close()", "def make_random_nodes(size=100, ndim=2, expand=True):\n \n coords = np.random.random(size=size*ndim).reshape((-1,ndim))\n if expand:\n coords = coords * size**(1/ndim)\n return coords", "def _rand_color(self):\n\n return self._rand_elem(COLOR_NAMES)" ]
[ "0.72354615", "0.681743", "0.67759746", "0.65923506", "0.647772", "0.6391293", "0.6389239", "0.6381156", "0.63713896", "0.6345931", "0.6305542", "0.62956303", "0.62650055", "0.62281466", "0.6208814", "0.6204611", "0.61832035", "0.617489", "0.6139714", "0.6136026", "0.6124625", "0.60273397", "0.5979785", "0.5954721", "0.5953351", "0.59367096", "0.59269786", "0.5872487", "0.5867898", "0.58649373" ]
0.7120646
1
Finds a dihedral angle adjacent to the selected atoms that includes a new atom
def _find_dihedral(selected): atom_name = lambda atom: atom.fullName() atom_mass = lambda atom: atom.mass() # Loop over possible nearest neighbors for a2 in selected: # Find the new atom attached_to_a2 = sorted([a for a in a2.bondedTo() \ if a not in selected], key=atom_name) for a1 in sorted(attached_to_a2, key=atom_mass, reverse=True): # Find the third atom attached_to_a3 = sorted([a for a in a2.bondedTo() \ if (a in selected) and (a!=a1)], key=atom_name) for a3 in sorted(attached_to_a3, key=atom_mass, reverse=True): # Find the last atom attached_to_a4 = sorted([a for a in a3.bondedTo() \ if (a in selected) and (a!=a2)], key=atom_name) for a4 in sorted(attached_to_a4, key=atom_mass, reverse=True): return (a1, a2, a3, a4) print 'Selected atoms:', selected raise Exception('No new dihedral angle found!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dihedral_calculator():\n\n\t# Prime with first 3 points\n\tp1 = Vector3((yield None))\n\tp2 = Vector3((yield None))\n\tp3 = Vector3((yield None))\n\n\t# Set up for first angle\n\tlastpoint = p3\n\tlastdisp = p3 - p2\n\tlastnormal = ((p2 - p1) @ lastdisp).normalize()\n\n\tangle = None\n\n\t# For each point starting with the 4th, we can compute a new angle\n\twhile True:\n\n\t\t# Yield the last angle (None the first time), get the next point\n\t\tnextpoint = Vector3((yield angle))\n\n\t\t# Displacement from previous point to current\n\t\tnextdisp = nextpoint - lastpoint\n\n\t\t# Normal vector to plane containing last 3 points\n\t\tnextnormal = (lastdisp @ nextdisp).normalize()\n\n\t\t# This one's complicated... see step 3 in source.\n\t\tx = lastnormal * nextnormal\n\t\ty = (lastnormal @ lastdisp.normalize()) * nextnormal\n\t\tangle = -math.atan2(y, x)\n\n\t\t# Current values used as previous in next loop\n\t\tlastpoint = nextpoint\n\t\tlastdisp = nextdisp\n\t\tlastnormal = nextnormal", "def get_dihedral_angles(self):\n mol = self.m\n c1 = mol.GetConformer(-1)\n torsma = '[!$(*#*)&!D1]~[!$(*#*)&!D1]'\n q = Chem.MolFromSmarts(torsma)\n matches = mol.GetSubstructMatches(q)\n nmat = len(matches)\n dic = {}\n for match in matches:\n j = match[0]\n k = match[1]\n bond = mol.GetBondBetweenAtoms(j, k)\n aj = mol.GetAtomWithIdx(j)\n ak = mol.GetAtomWithIdx(k)\n hj, hk = [ _hyb[_a.GetHybridization()] for _a in [aj,ak] ]\n iok1 = ( hj not in [2,3] )\n iok2 = ( hk not in [2,3] )\n if iok1 or iok2: continue\n for b1 in aj.GetBonds():\n if (b1.GetIdx() == bond.GetIdx()):\n continue\n i = b1.GetOtherAtomIdx(j)\n for b2 in ak.GetBonds():\n if (b2.GetIdx() == bond.GetIdx()) or (b2.GetIdx() == b1.GetIdx()):\n continue\n l = b2.GetOtherAtomIdx(k)\n # skip 3-membered rings\n if (l == i):\n continue\n _dang = rdMolTransforms.GetDihedralDeg(c1, i,j,k,l)\n dang = abs(_dang)\n assert dang <= 180.0\n ias4 = (i,j,k,l)\n if not self.wH:\n if np.any([ self.zs[iaa]==1 for iaa in ias4 ]):\n continue\n if self.key in ['z']:\n #print('atsi=',ias4, 'zsi=', [_zs[iaa] for iaa in ias4])\n zi,zj,zk,zl = [ self.zs[iaa] for iaa in ias4 ]\n if (zj==zk and zi>zl) or (zj>zk):\n ias4 = (l,k,j,i)\n #torsions.append(ias4)\n #_zi,_zj,_zk,_zl = [ zs[_] for _ in ias4 ]\n #typez = '%d-%d-%d-%d'%(_zi,_zj,_zk,_zl)\n type4 = tuple([self.zs[iaa] for iaa in ias4])\n if type4 in list(dic.keys()):\n dic[type4] += [dang]\n else:\n dic[type4] = [dang]\n elif self.key in ['ia','i']:\n type4 = ias4\n dic[type4] = dang\n else:\n raise Exception('#unknown key')\n return dic", "def sp2_dihedrals(atoms):\n\n #problems with atoms inbuilt dihedral method (doesn't match gaussview/jmol at all)\n #so we'll use one taken from http://stackoverflow.com/questions/20305272/dihedral-torsion-angle-from-four-points-in-cartesian-coordinates-in-python\n def get_dihedral(p):\n b = p[:-1] - p[1:]\n b[0] *= -1\n v = np.array([v - (v.dot(b[1])/b[1].dot(b[1])) * b[1] for v in [b[0], b[2]]])\n # Normalize vectors\n v /= np.sqrt(np.einsum('...i,...i', v, v)).reshape(-1,1)\n b1 = b[1] / np.linalg.norm(b[1])\n x = np.dot(v[0], v[1])\n m = np.cross(v[0], b1)\n y = np.dot(m, v[1])\n return np.degrees(np.arctan2(y, x))\n\n mol = to_molmod(atoms)\n data = []\n\n for i in range(len(atoms)):\n if len(mol.graph.neighbors[i]) == 3:\n atom_indices = [i] + list(mol.graph.neighbors[i])\n atom_positions = np.array([atoms[temp_index].position for temp_index in atom_indices])\n #dihedral = atoms.get_dihedral(atom_indices)\n dihedral = get_dihedral(atom_positions)\n result = (i, dihedral)\n data.append(result)\n\n return data", "def calculate_dihedral_angles(mol, dihedral_atom_sets):\n\n # Create list for the dihedrals (to be ordered in the same order as the input dihedral sets)\n dihedral_angles = []\n # Now calculate the dihedral angles between the sets identified previously\n conf = mol.GetConformer()\n # Loop through the angles => 2-3 is the rotatable bonds, 1,4 are the neighbours of 2,3 respectively\n for at1, at2, at3, at4 in dihedral_atom_sets:\n # Get the coordinates of the positions\n pos1 = conf.GetAtomPosition(at1)\n pos2 = conf.GetAtomPosition(at2)\n pos3 = conf.GetAtomPosition(at3)\n pos4 = conf.GetAtomPosition(at4)\n # Need to calculate three vectors 1->2, 2->3, 3->4\n vec1 = pos2 - pos1\n vec2 = pos3 - pos2\n vec3 = pos4 - pos3\n # Get the normals to the two planes (vec1-vec2 plane and vec2-vec3 plane))\n cross12 = vec1.CrossProduct(vec2)\n cross23 = vec2.CrossProduct(vec3)\n # Normalise the normals\n cross12.Normalize()\n cross23.Normalize()\n # Calculate dot-product and then inverse cosine to get the angle\n dot_prod = cross12.DotProduct(cross23)\n dihedral_rad = math.acos(dot_prod)\n dihedral_deg = 180*dihedral_rad/math.pi\n dihedral_angles.append(dihedral_deg)\n return dihedral_angles", "def calc_dihedral(v1, v2, v3, v4):\n ab = v1 - v2\n cb = v3 - v2\n db = v4 - v3\n u = ab ** cb\n v = db ** cb\n w = u ** v\n angle = u.angle(v)\n # Determine sign of angle\n try:\n if cb.angle(w) > 0.001:\n angle = -angle\n except ZeroDivisionError:\n # dihedral=pi\n pass\n return angle", "def addDihedralBond(a1, a2, length, angleInfo, dihedInfo):\n\n\tif a1.molecule == a2.molecule:\n\t\traise ValueError(\"Atoms to be bonded must be in different models\")\n\n\t# first, get the distance correct\n\tfrom chimera import Xform, cross, angle, Point\n\tdvector = a1.xformCoord() - a2.xformCoord()\n\tdvector.length = dvector.length + length\n\topenState = a2.molecule.openState\n\topenState.globalXform(Xform.translation(dvector))\n\n\t# then angle\n\tif angleInfo:\n\t\tatoms, angleVal = angleInfo\n\t\tp1, p2, p3 = [a.xformCoord() for a in atoms]\n\t\taxis = cross(p1-p2, p2-p3)\n\t\tcurAngle = angle(p1, p2, p3)\n\t\tdelta = angleVal - curAngle\n\t\tv2 = p2 - Point(0.0, 0.0, 0.0)\n\t\ttrans1 = Xform.translation(v2)\n\t\tv2.negate()\n\t\ttrans2 = Xform.translation(v2)\n\t\ttrans1.multiply(Xform.rotation(axis, delta))\n\t\ttrans1.multiply(trans2)\n\t\topenState.globalXform(trans1)", "def getDihedrals(self):\n uniqKpList = self.getFlagData('DIHEDRAL_FORCE_CONSTANT')\n uniqPeriodList = self.getFlagData('DIHEDRAL_PERIODICITY')\n uniqPhaseList = self.getFlagData('DIHEDRAL_PHASE')\n # for list below, true atom number = abs(index)/3 + 1\n dihCodeHList = self.getFlagData('DIHEDRALS_INC_HYDROGEN')\n dihCodeNonHList = self.getFlagData('DIHEDRALS_WITHOUT_HYDROGEN')\n dihCodeList = dihCodeHList + dihCodeNonHList\n properDih = []\n improperDih = []\n condProperDih = [] # list of dihedrals condensed by the same quartet\n #atomPairs = []\n atomPairs = set()\n for i in xrange(0, len(dihCodeList), 5):\n idAtom1 = dihCodeList[i] / 3 # remember python starts with id 0\n idAtom2 = dihCodeList[i+1] / 3\n # 3 and 4 indexes can be negative: if id3 < 0, end group interations\n # in amber are to be ignored; if id4 < 0, dihedral is improper\n idAtom3raw = dihCodeList[i+2] / 3 # can be negative -> exclude from 1-4vdw\n idAtom4raw = dihCodeList[i+3] / 3 # can be negative -> Improper\n idAtom3 = abs(idAtom3raw)\n idAtom4 = abs(idAtom4raw)\n dihTypeId = dihCodeList[i+4] - 1\n atom1 = self.atoms[idAtom1]\n atom2 = self.atoms[idAtom2]\n atom3 = self.atoms[idAtom3]\n atom4 = self.atoms[idAtom4]\n kPhi = uniqKpList[dihTypeId] # already divided by IDIVF\n period = int(uniqPeriodList[dihTypeId]) # integer\n phase = uniqPhaseList[dihTypeId]# angle given in rad in prmtop\n atoms = [atom1, atom2, atom3, atom4]\n dihedral = Dihedral(atoms, kPhi, period, phase)\n if idAtom4raw > 0:\n try: atomsPrev = properDih[-1].atoms\n except: atomsPrev = []\n properDih.append(dihedral)\n if idAtom3raw < 0 and atomsPrev == atoms:\n condProperDih[-1].append(dihedral)\n else:\n condProperDih.append([dihedral])\n pair = (atom1, atom4)\n #if atomPairs.count(pair) == 0 and idAtom3raw > 0:\n if idAtom3raw > 0:\n atomPairs.add(pair)\n else:\n improperDih.append(dihedral)\n try: atomPairs = sorted(atomPairs)\n except: pass\n self.properDihedrals = properDih\n self.improperDihedrals = improperDih\n self.condensedProperDihedrals = condProperDih # [[],[],...]\n self.atomPairs = atomPairs # set((atom1, atom2), ...)\n self.printDebug(\"getDihedrals done\")", "def bond_angles_wrt_bond(current, next, xy, NL, KL):\n n_tmp = NL[next, np.argwhere(KL[next].ravel())]\n if len(n_tmp) == 1:\n print 'le: The bond is a lone bond, not part of a triangle, so returning neighbor as next particle'\n neighbors = n_tmp\n else:\n neighbors = np.delete(n_tmp, np.where(n_tmp == current)[0])\n # print 'n_tmp = ', n_tmp\n # print 'neighbors = ', neighbors\n angles = np.mod(np.arctan2(xy[neighbors, 1] - xy[next, 1],\n xy[neighbors, 0] - xy[next, 0]).ravel() -\n np.arctan2(xy[current, 1] - xy[next, 1],\n xy[current, 0] - xy[next, 0]).ravel(),\n 2 * np.pi)\n return angles, neighbors", "def generate_dihedral_matrices(protein):\n\n #double check maths for this to be safe (particularly signs)\n\n natoms = len(protein.atoms)\n ndihedrals = len(protein.dihedrals)\n\n A = np.zeros([ndihedrals, 3*natoms])\n force_constants = np.zeros(ndihedrals)\n for dihedral in protein.dihedrals:\n \n atom1_id = dihedral.atom1.id\n atom2_id = dihedral.atom2.id\n atom3_id = dihedral.atom3.id\n atom4_id = dihedral.atom4.id\n\n atom1_xyz = dihedral.atom1.xyz\n atom2_xyz = dihedral.atom2.xyz\n atom3_xyz = dihedral.atom3.xyz\n atom4_xyz = dihedral.atom4.xyz\n\n four_centre_length = np.linalg.norm(atom1_xyz - atom4_xyz)\n\n row = A[dihedral.id]\n row[[3*atom1_id, (3*atom1_id)+1, (3*atom1_id)+2]] = -((atom1_xyz - atom3_xyz) + (atom4_xyz - atom2_xyz))/four_centre_length \n row[[3*atom2_id, (3*atom2_id)+1, (3*atom2_id)+2]] = -((atom2_xyz - atom1_xyz) + (atom2_xyz - atom3_xyz) + (atom2_xyz - atom4_xyz))/four_centre_length\n row[[3*atom3_id, (3*atom3_id)+1, (3*atom3_id)+2]] = -((atom3_xyz - atom4_xyz) + (atom3_xyz - atom1_xyz) + (atom3_xyz - atom2_xyz))/four_centre_length\n row[[3*atom4_id, (3*atom4_id)+1, (3*atom4_id)+2]] = -((atom4_xyz - atom2_xyz) + (atom1_xyz - atom3_xyz))/four_centre_length\n\n force_constant = dihedral.force_constant\n force_constants[dihedral.id] = force_constant\n\n A = scipy.sparse.csr_matrix(A)\n G = scipy.sparse.diags(force_constants)\n\n return (A, G)", "def calculate_dihedral_atom_equivalences(mol1, mol2):\n\n # Check that the mols are identical-ish\n if mol1.GetNumHeavyAtoms() != mol2.GetNumHeavyAtoms():\n raise EqualityError('Molecules are not identical (Num Atoms) {!s} != {!s}.\\n{!s}\\n{!s}'.format(mol1.GetNumHeavyAtoms(),mol2.GetNumHeavyAtoms(),Chem.MolToSmiles(mol1),Chem.MolToSmiles(mol2)))\n if mol1.GetNumBonds() != mol2.GetNumBonds():\n raise EqualityError('Molecules are not identical (Num Bonds) {!s} != {!s}:\\n{!s}\\n{!s}'.format(mol1.GetNumBonds(),mol2.GetNumBonds(),Chem.MolToSmiles(mol1), Chem.MolToSmiles(mol2)))\n\n # Gets a list of lists of atoms in mol1 (12,16,3, ...) that match the atoms in mol2 (1,2,3, ...)\n match_patterns = mol1.GetSubstructMatches(mol2, uniquify=False)\n # Get the quadruplets to calculate the dihedrals from for mol1\n mol1_atom_sets = identify_rotatable_bond_atom_pairs(mol1)\n num_atms = mol1.GetNumHeavyAtoms()\n # List for returning\n paired_atom_sets = []\n # Iterate through the different ways of overlaying the molecule (ensures we get the minimum rmsd)\n for match_pattern in match_patterns:\n # Translate from the atoms in mol1 to the atoms in mol2 (for this match_pattern)\n trans_dict = dict(zip(match_pattern, range(0,num_atms)))\n # Translate the atoms in mol1 to the atoms in mol2\n mol2_atom_sets = [ tuple([trans_dict[atm] for atm in bond_set]) for bond_set in mol1_atom_sets]\n # Add to list\n paired_atom_sets.append((mol1_atom_sets, mol2_atom_sets))\n # Check that the atom types are identical (test)\n mol1_atom_types = [ tuple([mol1.GetAtomWithIdx(atm).GetAtomicNum() for atm in bond_set]) for bond_set in mol1_atom_sets]\n mol2_atom_types = [ tuple([mol2.GetAtomWithIdx(atm).GetAtomicNum() for atm in bond_set]) for bond_set in mol2_atom_sets]\n assert mol1_atom_types == mol2_atom_types, \"ATOM TYPES ARE NOT THE SAME ON THE DIHEDRAL ANGLE TO BE CALCULATED - THERE'S BEEN A MATCHING ERROR\"\n # Return the list of lists of paired atoms between the structures\n return paired_atom_sets", "def calculate_dihedral_angle_differences(mol1, mol2):\n\n # Get the dihedrals to calculate for both of the molecules (possibly multiple ways of overlaying the mols if symmetry exists)\n atom_sets = calculate_dihedral_atom_equivalences(mol1, mol2)\n # list of possible rmsds for the molecule\n differences = []\n # Iterate through and calculate the rmsd for each set of atom equivalences\n for mol1_atom_set, mol2_atom_set in atom_sets:\n # Calculate the dihedrals of both\n mol1_dihedrals = calculate_dihedral_angles(mol1, mol1_atom_set)\n mol2_dihedrals = calculate_dihedral_angles(mol2, mol2_atom_set)\n # Calculate the differences squared for each angle difference\n diffs = [an1-an2 for an1, an2 in zip(mol1_dihedrals,mol2_dihedrals)]\n # Append list of angle differences\n differences.append(diffs)\n\n return atom_sets, differences", "def selectLinkedElement():\n\n collector = FilteredElementCollector(doc).ToElementIds()\n wrongAngle = []\n for id in collector:\n \n element= doc.GetElement(id)\n\n if element.get_Parameter(BuiltInParameter.FABRICATION_PART_ANGLE) is not None:\n try:\n chord = element.CenterlineLength\n angle = element.get_Parameter(BuiltInParameter.FABRICATION_PART_ANGLE).AsDouble()\n angle = degrees(angle)\n diameter = element.get_Parameter(BuiltInParameter.FABRICATION_PART_DIAMETER_IN).AsDouble()\n radius = ((360/angle)*chord )/(pi*2)\n \n if round(radius,4) == round(diameter,4):\n wrongAngle.append(id)\n\n except Exception as ex:\n print(ex, str(id))\n pass\n\n wrongAngle = List[ElementId](wrongAngle)\n uidoc.Selection.SetElementIds(wrongAngle)", "def planInternal(r):\n\t# First find the atoms that are connected to preceding\n\t# or succeeding residues. If none, pick an arbitrary atom.\n\t# These atoms are always interpolated in Cartesian space.\n\tplan = []\n\tdone = set([])\n\ttodo = []\n\tm = r.molecule\n\tneighbors = set([m.residueBefore(r), m.residueAfter(r)])\n\tfixed = set([])\n\tfor a0 in r.atoms:\n\t\tfor na in a0.primaryNeighbors():\n\t\t\tif na.residue in neighbors:\n\t\t\t\tfixed.add(a0)\n\t\t\t\tbreak\n\tif not fixed:\n\t\tfixed.add(r.atoms[0])\n\tfor a0 in fixed:\n\t\tplan.append((interpCartesian, (a0,)))\n\t\t_finished(a0, done, todo)\n\n\t# Now we look for atoms that are connected to those in\n\t# \"fixed\". If we can find three atoms that define a\n\t# dihedral, we use dihedral interpolation; otherwise\n\t# we use Cartesian interpolation.\n\twhile todo:\n\t\tna, a = todo.pop(0)\n\t\tif na in done:\n\t\t\t# May be part of a loop and have been\n\t\t\t# visited via another path\n\t\t\tcontinue\n\t\tanchors = _findAnchor(a, done)\n\t\tif len(anchors) >= 2:\n\t\t\t# Found two anchor atoms connected to the\n\t\t\t# fixed atom, we can use them for defining\n\t\t\t# the dihedral\n\t\t\tplan.append((interpInternal,\n\t\t\t\t\t(na, a, anchors[0], anchors[1])))\n\t\t\t_finished(na, done, todo)\n\t\t\tcontinue\n\t\tif len(anchors) == 1:\n\t\t\t# Found one anchor atom connected to the\n\t\t\t# fixed atom, so we need to get another\n\t\t\t# anchor atom connected to the one we found\n\t\t\t# (but is not our original fixed atom)\n\t\t\tanchors2 = _findAnchor(anchors[0], done, a)\n\t\t\tif len(anchors2) >= 1:\n\t\t\t\tplan.append((interpInternal,\n\t\t\t\t\t(na, a, anchors[0], anchors2[0])))\n\t\t\t\t_finished(na, done, todo)\n\t\t\t\tcontinue\n\t\t# Cannot find three fixed atoms to define dihedral.\n\t\t# Use Cartesian interpolation for this atom.\n\t\tplan.append((interpCartesian, (na,)))\n\t\t_finished(na, done, todo)\n\treturn plan", "def get_dihedral(p0,p1,p2,p3,unit):\n if unit == 'Ang':\n p0 = p0*0.529177249\n p1 = p1*0.529177249\n p2 = p2*0.529177249\n p3 = p3*0.529177249\n\n b0 = -1.0*(p1 - p0)\n b1 = p2 - p1\n b2 = p3 - p2\n\n # normalize b1 so that it does not influence magnitude of vector\n # rejections that come next\n b1 /= linalg.norm(b1)\n\n # vector rejections\n # v = projection of b0 onto plane perpendicular to b1\n # = b0 minus component that aligns with b1\n # w = projection of b2 onto plane perpendicular to b1\n # = b2 minus component that aligns with b1\n v = b0 - dot(b0, b1)*b1\n w = b2 - dot(b2, b1)*b1\n\n # angle between v and w in a plane is the torsion angle\n # v and w may not be normalized but that's fine since tan is y/x\n x = dot(v, w)\n y = dot(cross(b1, v), w)\n return degrees(arctan2(y, x))\n\n #q1 = subtract(p1,p0) # b - a \n #q2 = subtract(p2,p1) # c - b \n #q3 = subtract(p3,p2) # d - c\n #print(q1,q2)\n\n #q1_x_q2 = cross(q1,q2) \n #q2_x_q3 = cross(q2,q3)\n\n #n1 = q1_x_q2/sqrt(dot(q1_x_q2,q1_x_q2)) \n #n2 = q2_x_q3/sqrt(dot(q2_x_q3,q2_x_q3))\n\n #u1 = n2\n #u3 = q2/(sqrt(dot(q2,q2))) \n #u2 = cross(u3,u1)\n\n #cos_theta = dot(n1,u1)\n #sin_theta = dot(n1,u2)\n ## Calculate theta\n #theta = -atan2(sin_theta,cos_theta)\n ## it is different from atan2 from fortran math.atan2(y,x)\n #theta_deg = degrees(theta)\n #return(theta_deg)", "def identify_bonds(chosen_atom, atom_list):\n list_of_hydrogens = ['H15', 'H14', 'H13', 'H12', 'H11', 'H10', 'H9', 'H8', 'H7', 'H6', 'H5', 'H4', 'H3', 'H2', 'H1'] \n if ((chosen_atom.atom_name not in list_of_hydrogens) and (chosen_atom.residue_name != \"P1A\")):\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 2) and (abs(chosen_atom.y - atom.y) <= 2) and (abs(chosen_atom.z - atom.z) <= 2))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 2)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n elif ((chosen_atom.atom_name not in list_of_hydrogens) and (chosen_atom.residue_name == \"P1A\")):\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 2) and (abs(chosen_atom.y - atom.y) <= 2) and (abs(chosen_atom.z - atom.z) <= 2))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 1.8)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n else:\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 1.6) and (abs(chosen_atom.y - atom.y) <= 1.6) and (abs(chosen_atom.z - atom.z) <= 1.6))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 1.6)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n for elements in nearby_atoms:\n if (check_if_no_bond(chosen_atom, elements, bond_list, bond_list_3) == True):\n nearby_atoms.remove(elements)\n if (len(nearby_atoms) == len(identified_bonds)):\n return identified_bonds\n else:\n return []", "def getDihedrals(self):\n try:\n return self._dihedralList\n except AttributeError:\n pass\n forceConstant=self._raw_data[\"DIHEDRAL_FORCE_CONSTANT\"]\n phase=self._raw_data[\"DIHEDRAL_PHASE\"]\n periodicity=self._raw_data[\"DIHEDRAL_PERIODICITY\"]\n dihedralPointers = self._raw_data[\"DIHEDRALS_INC_HYDROGEN\"] \\\n +self._raw_data[\"DIHEDRALS_WITHOUT_HYDROGEN\"]\n self._dihedralList=[]\n forceConstConversionFactor = (units.kilocalorie_per_mole).conversion_factor_to(units.kilojoule_per_mole)\n for ii in range(0,len(dihedralPointers),5):\n if int(dihedralPointers[ii])<0 or int(dihedralPointers[ii+1])<0:\n raise Exception(\"Found negative dihedral atom pointers %s\"\n % ((dihedralPointers[ii],\n dihedralPointers[ii+1],\n dihedralPointers[ii+2],\n dihedralPointers[ii+3]),))\n iType=int(dihedralPointers[ii+4])-1\n self._dihedralList.append((int(dihedralPointers[ii])//3,\n int(dihedralPointers[ii+1])//3,\n abs(int(dihedralPointers[ii+2]))//3,\n abs(int(dihedralPointers[ii+3]))//3,\n float(forceConstant[iType])*forceConstConversionFactor,\n float(phase[iType]),\n int(0.5+float(periodicity[iType]))))\n return self._dihedralList", "def set_potential_aperture_angle_to(self, atom_hash, new_distance):\n #TODO: Finish this.\n pseudopotentials = self.identify_pseudocarbon_potentials(atom_hash)\n potential_coords_list = []\n deletion_list = []\n\n for pseudopotential in pseudopotentials:\n # get rotation axis via cross-products\n # if 3 atoms within pseudo-distance this is an sp3 pseudo-carbon\n if len(pseudopotentials) == 3:\n pass\n\n # if 4 atoms within pseudo-distance this is an sp2 2e pseudo-carbon\n elif len(pseudopotentials) == 4:\n pass\n\n\n # if 6 atoms within pseudo-distance this is an sp2 pseudo-carbon\n elif len(pseudopotentials) == 6:\n pass\n\n # apply euler-rodriguez\n\n vector_from_pseudo_carbon = self.vectorise_atom(pseudopotential['#']) - self.vectorise_atom(atom_hash)\n new_vector_from_pseudocarbon = self.lengtherise_vector(vector_from_pseudo_carbon, new_distance)\n new_potential_coordinates = self.vectorise_atom(atom_hash) + new_vector_from_pseudocarbon\n\n potential_coords_list.append(new_potential_coordinates)\n deletion_list.append(pseudopotential['#'])\n\n self.delete_specified_atoms(deletion_list)\n for potential_coord in potential_coords_list:\n self.write_coord(potential_coord, overwrite=False)", "def dilation(hexs, diameter):\n new_hex_set = set(hexs)\n for a_hex in hexs:\n adjacent_hexs = h3.k_ring(a_hex, diameter)\n new_hex_set = new_hex_set.union(adjacent_hexs)\n new_hexs = list(new_hex_set)\n return new_hexs", "def _get_dihedral_types(\n structure, use_rb_torsions, use_dihedrals, epsilon_conversion_factor\n):\n if use_rb_torsions:\n unique_dihedral_types = _get_unique_rb_torsion_types(\n structure, epsilon_conversion_factor\n )\n\n dihedral_types = [\n unique_dihedral_types[\n _get_dihedral_rb_torsion_key(\n dihedral, epsilon_conversion_factor\n )\n ]\n for dihedral in structure.rb_torsions\n ]\n\n elif use_dihedrals:\n print_warn_text = (\n \"WARNING: Using the charmm style and impropers is not \"\n \"available in the current version of this psf, pdb, and GOMC writer.\"\n )\n warn(print_warn_text)\n return None, None\n\n unique_dihedral_check_dict = OrderedDict()\n for i_value_dihed, i_key_dihed in unique_dihedral_types.items():\n i_value_duplicated = False\n for j_value_dihed, j_key_dihed in unique_dihedral_types.items():\n j_value_dihed_reorder = (\n j_value_dihed[0],\n j_value_dihed[1],\n j_value_dihed[2],\n j_value_dihed[3],\n j_value_dihed[4],\n j_value_dihed[5],\n j_value_dihed[6],\n j_value_dihed[7],\n j_value_dihed[11],\n j_value_dihed[10],\n j_value_dihed[9],\n j_value_dihed[8],\n j_value_dihed[15],\n j_value_dihed[14],\n j_value_dihed[13],\n j_value_dihed[12],\n )\n\n if i_value_dihed == j_value_dihed_reorder:\n i_value_duplicated = True\n if i_value_dihed[8] > j_value_dihed[8]:\n unique_dihedral_check_dict.update(\n {j_value_dihed: len(unique_dihedral_check_dict) + 1}\n )\n else:\n unique_dihedral_check_dict.update(\n {i_value_dihed: len(unique_dihedral_check_dict) + 1}\n )\n if i_value_duplicated is False:\n unique_dihedral_check_dict.update(\n {i_value_dihed: len(unique_dihedral_check_dict) + 1}\n )\n\n unique_dihedral_types = OrderedDict(\n [(y, x) for y, x in unique_dihedral_check_dict.items()]\n )\n\n return dihedral_types, unique_dihedral_types", "def _get_dihedral_rb_torsion_key(dihedral, epsilon_conversion_factor):\n\n lj_unit = 1 / epsilon_conversion_factor\n\n dihed_type_RB_c0 = round(dihedral.type.c0 * lj_unit, 8)\n dihed_type_RB_c1 = round(dihedral.type.c1 * lj_unit, 8)\n dihed_type_RB_c2 = round(dihedral.type.c2 * lj_unit, 8)\n dihed_type_RB_c3 = round(dihedral.type.c3 * lj_unit, 8)\n dihed_type_RB_c4 = round(dihedral.type.c4 * lj_unit, 8)\n dihed_type_RB_c5 = round(dihedral.type.c5 * lj_unit, 8)\n\n dihed_type_scee = round(dihedral.type.scee, 4)\n dihed_type_scnb = round(dihedral.type.scnb, 4)\n\n dihed_atom_1_type = dihedral.atom1.type\n dihed_atom_2_type = dihedral.atom2.type\n dihed_atom_3_type = dihedral.atom3.type\n dihed_atom_4_type = dihedral.atom4.type\n\n dihed_atom_1_res_type = dihedral.atom1.residue.name\n dihed_atom_2_res_type = dihedral.atom2.residue.name\n dihed_atom_3_res_type = dihedral.atom3.residue.name\n dihed_atom_4_res_type = dihedral.atom4.residue.name\n\n return (\n dihed_type_RB_c0,\n dihed_type_RB_c1,\n dihed_type_RB_c2,\n dihed_type_RB_c3,\n dihed_type_RB_c4,\n dihed_type_RB_c5,\n dihed_type_scee,\n dihed_type_scnb,\n dihed_atom_1_type,\n dihed_atom_2_type,\n dihed_atom_3_type,\n dihed_atom_4_type,\n dihed_atom_1_res_type,\n dihed_atom_2_res_type,\n dihed_atom_3_res_type,\n dihed_atom_4_res_type,\n )", "def find_girth(self):\r\n girth = sys.maxint\r\n face = []\r\n vertices = list(self.graph.vertices)\r\n shift = random.randint(0,len(vertices)-1)\r\n vertices = vertices[shift:] + vertices[:shift]\r\n random.shuffle(vertices)\r\n \r\n for vertex in vertices:\r\n s = set() # set of explored edge id\r\n distance = {}\r\n distance[vertex.id] = 0\r\n father = {}\r\n father[vertex.id] = (None, None) # (a,b) a is v_id, b is edge id\r\n nodes = [vertex.id] # stack for the vertices to start with\r\n while len(nodes) > 0:\r\n node = nodes.pop(0)\r\n v_a = self.graph.get_vertex(node)\r\n nbrs = list(v_a.neighbors)\r\n random.shuffle(nbrs)\r\n for edge in nbrs:\r\n if not edge.id in s:\r\n another = edge.get_another_vertex(node)\r\n if not distance.has_key(another):\r\n nodes.append(another)\r\n s.add(edge.id)\r\n father[another] = (node, edge.id)\r\n distance[another] = distance[node] + 1\r\n elif distance[another] + distance[node] + 1 < girth:\r\n girth = distance[another] + distance[node] + 1\r\n\r\n face = list()\r\n face.append(edge.id)\r\n start = father[another]\r\n while start[0] is not None:\r\n face.append(start[1])\r\n start = father[start[0]]\r\n face.reverse()\r\n start = father[node]\r\n while start[0] is not None:\r\n face.append(start[1])\r\n start = father[start[0]]\r\n\r\n cycle = []\r\n edge0 = self.graph.get_edge(face[0])\r\n edge1 = self.graph.get_edge(face[1])\r\n (a, b) = edge0.get_endpoints()\r\n if a in edge1.get_endpoints():\r\n a, b = b, a\r\n for e in face:\r\n cycle.append(a)\r\n a = self.graph.get_edge(e).get_another_vertex(a)\r\n # logger.info(\"girth: %s\",cycle)\r\n return (face, cycle)", "def test_dihedrals(pose):\n for i in range(1, pose.total_residue()+1):\n\n print \"\\n\"+str(pose.pdb_info.pose2pdb(i))\n try:\n print \"Phi: \"+repr(math.degrees(pose.phi(i)))\n print \"Psi: \"+repr(math.degrees(pose.psi(i)))\n print \"Omega:\"+repr(math.degrees(pose.omega(i)))\n except Exception:\n \"Print could not get dihedral for resnum \"+repr(i)\n\n return True", "def modify_cand():\n if col_i + 1 < len(lastrow):\n return (lastrow[col_i + 1] +\n diff(left_elem, right_elem, key=key + [left_i],\n minimal=minimal, verbose=False))", "def _determine_extra_angles(self, angle_force, reference_topology, growth_indices):\n from simtk import openmm\n import itertools\n from openeye import oechem, oeomega\n\n if len(growth_indices)==0:\n return\n angle_force_constant = 400.0*unit.kilojoules_per_mole/unit.radians**2\n atoms = list(reference_topology.atoms())\n growth_indices = list(growth_indices)\n #get residue from first atom\n residue = atoms[growth_indices[0].idx].residue\n try:\n oemol = FFAllAngleGeometryEngine._oemol_from_residue(residue)\n except Exception as e:\n print(\"Could not generate an oemol from the residue.\")\n print(e)\n\n #get the omega geometry of the molecule:\n\n omega = oeomega.OEOmega()\n omega.SetMaxConfs(1)\n omega.SetStrictStereo(False) #TODO: fix stereochem\n omega(oemol)\n\n #we now have the residue as an oemol. Time to find the relevant angles.\n #There's no equivalent to OEGetTorsions, so first find atoms that are relevant\n #TODO: find out if that's really true\n aromatic_pred = oechem.OEIsAromaticAtom()\n heavy_pred = oechem.OEIsHeavy()\n angle_criteria = oechem.OEAndAtom(aromatic_pred, heavy_pred)\n\n #get all heavy aromatic atoms:\n #TODO: do this more efficiently\n heavy_aromatics = list(oemol.GetAtoms(angle_criteria))\n for atom in heavy_aromatics:\n #bonded_atoms = [bonded_atom for bonded_atom in list(atom.GetAtoms()) if bonded_atom in heavy_aromatics]\n bonded_atoms = list(atom.GetAtoms())\n for angle_atoms in itertools.combinations(bonded_atoms, 2):\n angle = oechem.OEGetAngle(oemol, angle_atoms[0], atom, angle_atoms[1])\n atom_indices = [angle_atoms[0].GetData(\"topology_index\"), atom.GetData(\"topology_index\"), angle_atoms[1].GetData(\"topology_index\")]\n angle_radians = angle*unit.radian\n growth_idx = self._calculate_growth_idx(atom_indices, growth_indices)\n #If this is a CustomAngleForce, we need to pass the parameters as a list, and it will have the growth_idx parameter.\n #If it's a regular HarmonicAngleForce, there is no growth_index and the parameters are passed separately.\n if isinstance(angle_force, openmm.CustomAngleForce):\n angle_force.addAngle(atom_indices[0], atom_indices[1], atom_indices[2], [angle_radians, angle_force_constant, growth_idx])\n elif isinstance(angle_force, openmm.HarmonicAngleForce):\n angle_force.addAngle(atom_indices[0], atom_indices[1], atom_indices[2], angle_radians, angle_force_constant)\n else:\n raise ValueError(\"Angle force must be either CustomAngleForce or HarmonicAngleForce\")\n return angle_force", "def get_lig_dihedrals(np_xyz, lig_ndx, close_ndxs, inp):\n n_at1, n_at2 = np.sum(inp.lig1_n_per_bead), np.sum(inp.lig2_n_per_bead)\n n_core = int(len(np_xyz) - inp.lig1_num*n_at1 - inp.lig2_num*n_at2)\n core_xyz = np_xyz[:n_core]\n\n lig1_dihedrals, lig2_dihedrals = [], []\n\n if n_at1 >= 3:\n for i in range(inp.lig1_num):\n ndx0 = n_core + i*n_at1\n ndx1 = ndx0*1\n ndx2 = ndx1 + 1\n ndx3 = ndx1 + 2\n ndx4 = close_ndxs[lig_ndx[0][i]]#np.argsort(cdist([np_xyz[ndx1]], core_xyz))[0,0]\n dihedral = [ndx4, ndx1, ndx2, ndx3]\n lig1_dihedrals.append(dihedral)\n for j in range(n_at1-4):\n ndx1 = ndx0 + j\n ndx2 = ndx1 + 1\n ndx3 = ndx1 + 2\n ndx4 = ndx1 + 3\n dihedral = [ndx1, ndx2, ndx3, ndx4]\n lig1_dihedrals.append(dihedral)\n\n if n_at2 >= 3:\n for i in range(inp.lig2_num):\n ndx0 = n_core + n_at1*inp.lig1_num + i*n_at2\n ndx1 = ndx0*1\n ndx2 = ndx1 + 1\n ndx3 = ndx1 + 2\n ndx4 = close_ndxs[lig_ndx[1][i]]#np.argsort(cdist([np_xyz[ndx1]], core_xyz))[0,0]\n dihedral = [ndx4, ndx1, ndx2, ndx3]\n lig2_dihedrals.append(dihedral)\n for j in range(n_at2-4):\n ndx1 = ndx0 + j\n ndx2 = ndx1 + 1\n ndx3 = ndx1 + 2\n ndx4 = ndx1 + 3\n dihedral = [ndx1, ndx2, ndx3, ndx4]\n lig2_dihedrals.append(dihedral)\n\n return (lig1_dihedrals, lig2_dihedrals)", "def closer_ang(x,a,dir=0):\r\n if dir == 0:\r\n return a + smaller_ang(x-a)\r\n elif dir == 1:\r\n return a + (x-a)%(2*pi)\r\n elif dir == -1:\r\n return a + (x-a)%(2*pi) - 2*pi", "def find_contour(hole_atoms, atom_list):\n contour_atoms = []\n extra_atoms = []\n global bond_list\n bond_list = bond_list_1\n for atom in hole_atoms:\n c = [bond[0] for bond in identify_bonds(atom, atom_list) if ((bond[0] not in hole_atoms) and (bond[0] not in contour_atoms))]\n for element in c:\n contour_atoms.append(element)\n for atom in atom_list:\n c = [bond[0] for bond in identify_bonds(atom, atom_list)]\n count = 0\n for element in c:\n if element in contour_atoms:\n count += 1\n if (count >= 2):\n extra_atoms.append(atom)\n for atom in atom_list:\n c = [bond[0] for bond in identify_bonds(atom, atom_list)]\n for element in c:\n if ((element in contour_atoms) or (element in extra_atoms)):\n for i in [bond[0] for bond in identify_bonds(element, atom_list)]:\n if ((i in hole_atoms) and (atom not in hole_atoms) and (atom not in contour_atoms) and (atom not in extra_atoms)):\n extra_atoms.append(atom) \n \n contour_atoms = contour_atoms + extra_atoms\n \n extra_atoms2 = []\n for atom in contour_atoms:\n for atom2 in contour_atoms:\n if (atom != atom2):\n c = [bond[0] for bond in identify_bonds(atom, atom_list) if ((bond in identify_bonds(atom2, atom_list)) and (bond[0] not in (contour_atoms)))]\n if (len(c) != 0):\n extra_atoms2.append(c[0]) \n for element in extra_atoms2:\n contour_atoms.append(element)\n return contour_atoms", "def set_dihedral(self, pivots, scan, deg_increment):\n if deg_increment == 0:\n logger.warning('set_dihedral was called with zero increment for {label} with pivots {pivots}'.format(\n label=self.label, pivots=pivots))\n for rotor in self.rotors_dict.values(): # penalize this rotor to avoid inf. looping\n if rotor['pivots'] == pivots:\n rotor['times_dihedral_set'] += 1\n break\n else:\n for rotor in self.rotors_dict.values():\n if rotor['pivots'] == pivots and rotor['times_dihedral_set'] <= 10:\n rotor['times_dihedral_set'] += 1\n break\n else:\n logger.info('\\n\\n')\n for i, rotor in self.rotors_dict.items():\n logger.error('Rotor {i} with pivots {pivots} was set {times} times'.format(\n i=i, pivots=rotor['pivots'], times=rotor['times_dihedral_set']))\n raise RotorError('Rotors were set beyond the maximal number of times without converging')\n coordinates, atoms, _, _, _ = get_xyz_matrix(self.final_xyz)\n mol = molecules_from_xyz(self.final_xyz, multiplicity=self.multiplicity, charge=self.charge)[1]\n conf, rd_mol, indx_map = rdkit_conf_from_mol(mol, coordinates)\n rd_scan = [indx_map[i - 1] for i in scan] # convert the atom indices in `scan` to RDKit indices\n new_xyz = set_rdkit_dihedrals(conf, rd_mol, indx_map, rd_scan, deg_increment=deg_increment)\n self.initial_xyz = get_xyz_string(coords=new_xyz, symbols=atoms)", "def closer_angle(x, a, dir=0):\n if dir == 0:\n return a + smaller_angle(x-a)\n elif dir == 1:\n return a + (x-a)%(2*np.pi)\n elif dir == -1:\n return a + (x-a)%(2*np.pi) - 2*np.pi", "def calc_torsion(residues, include_residue=False, include_omega=False):\n\n\tlast_residue = None\n\tlast_contiguous = True\n\tlast_valid = False\n\n\tlast_omega = None\n\tlast_phi = None\n\n\tdef yield_vals(residue, omega, phi, psi):\n\t\tangles = (omega, phi, psi) if include_omega else (phi, psi)\n\t\treturn (residue, *angles) if include_residue else angles\n\n\tfor residue in residues:\n\n\t\t# Whether this residue is contiguous with the last and angles calculated\n\t\t# from that residue's atoms are valid\n\t\tis_contiguous = last_valid and residue.seq == last_residue.seq + 1\n\n\t\t# Reset the generator if not using atoms from last residue\n\t\tif not is_contiguous:\n\t\t\tangle_calculator = dihedral_calculator()\n\t\t\tangle_calculator.send(None) # Prime it\n\n\t\t# Get N, CA, and C atoms from residue\n\t\tbackbone_atoms = get_backbone_atoms(residue)\n\n\t\tif None in backbone_atoms:\n\t\t\t# Didn't get all backbone atoms - residue is invalid\n\t\t\tis_valid = False\n\t\t\tpsi = omega = phi = None\n\n\t\telse:\n\t\t\t# Residue good\n\t\t\tis_valid = True\n\n\t\t\t# Get backbone atom coords and calculate angles for residue\n\t\t\tbackbone_coords = [a.coord for a in backbone_atoms]\n\n\t\t\tpsi = angle_calculator.send(backbone_coords[0])\n\t\t\tomega = angle_calculator.send(backbone_coords[1])\n\t\t\tphi = angle_calculator.send(backbone_coords[2])\n\n\t\t# Yield angles for the previous residue (because calculating psi\n\t\t# required an atom from this residue)\n\t\tif last_residue is not None:\n\t\t\tyield yield_vals(\n\t\t\t\tlast_residue,\n\t\t\t\tlast_omega if last_contiguous else None,\n\t\t\t\tlast_phi if last_contiguous else None,\n\t\t\t\tpsi if is_contiguous else None,\n\t\t\t)\n\n\t\t# Keep track of state for previous residue\n\t\tlast_residue = residue\n\t\tlast_contiguous = is_contiguous\n\t\tlast_valid = is_valid\n\t\tlast_omega = omega\n\t\tlast_phi = phi\n\n\t# Last one is only partial - no value for psi\n\tyield yield_vals(\n\t\tlast_residue,\n\t\tlast_omega if last_contiguous else None,\n\t\tlast_phi if last_contiguous else None,\n\t\tNone\n\t)" ]
[ "0.6646527", "0.63952506", "0.6240537", "0.5905683", "0.588075", "0.5806025", "0.5764874", "0.56130666", "0.5525293", "0.5508618", "0.5460365", "0.53958726", "0.535405", "0.53534824", "0.5297335", "0.5294864", "0.52534354", "0.52349055", "0.52014965", "0.51829153", "0.51585686", "0.5158113", "0.51318425", "0.5131614", "0.5120848", "0.51107645", "0.50867856", "0.5049683", "0.5030174", "0.49682838" ]
0.8237362
0
Conversion from (internal or extended) BondAngleTorsion to Cartesian coordinates
def Cartesian(self, BAT): # Arrange BAT coordinates in convenient arrays offset = 6 if len(BAT) == (3 * self.natoms) else 0 bonds = BAT[offset + 3::3] angles = BAT[offset + 4::3] phase_torsions = BAT[offset + 5::3] torsions = [(phase_torsions[n] + phase_torsions[self._firstTorsionTInd[n]]) \ if self._firstTorsionTInd[n]!=n else phase_torsions[n] \ for n in range(self.ntorsions)] p1 = np.array([0., 0., 0.]) p2 = np.array([0., 0., BAT[offset]]) p3 = np.array([BAT[offset+1]*np.sin(BAT[offset+2]), 0., \ BAT[offset]-BAT[offset+1]*np.cos(BAT[offset+2])]) # If appropriate, rotate and translate the first three atoms if offset == 6: # Rotate the third atom by the appropriate value (phi, theta, omega) = BAT[3:6] co = np.cos(omega) so = np.sin(omega) Romega = np.array([[co, -so, 0], [so, co, 0], [0, 0, 1]]) p3 = Romega.dot(p3) # Rotate the second two atoms to point in the right direction cp = np.cos(phi) sp = np.sin(phi) ct = np.cos(theta) st = np.sin(theta) Re = np.array([[cp * ct, -sp, cp * st], [ct * sp, cp, sp * st], [-st, 0, ct]]) p2 = Re.dot(p2) p3 = Re.dot(p3) # Translate the first three atoms by the origin origin = np.array(BAT[:3]) p1 += origin p2 += origin p3 += origin XYZ = np.zeros((self.natoms, 3)) XYZ[self.rootInd[0]] = p1 XYZ[self.rootInd[1]] = p2 XYZ[self.rootInd[2]] = p3 for ((a1,a2,a3,a4), bond, angle, torsion) in \ zip(self._torsionIndL,bonds,angles,torsions): sphere = Sphere(Vector(XYZ[a2]), bond) cone = Cone(Vector(XYZ[a2]), Vector(XYZ[a3] - XYZ[a2]), angle) plane123 = Plane(Vector(XYZ[a4]), Vector(XYZ[a3]), Vector(XYZ[a2])) points = sphere.intersectWith(cone).intersectWith(plane123) p = points[0] if (Plane(Vector(XYZ[a3]), Vector( XYZ[a2]), points[0]).normal * plane123.normal) > 0 else points[1] p = rotatePoint(Vector(p), Line(Vector(XYZ[a2]), Vector(XYZ[a2] - XYZ[a3])), torsion) XYZ[a1] = p.array return XYZ for ((a1,a2,a3,a4), bond, angle, torsion) in \ zip(self._torsionIndL,bonds,angles,torsions): p2 = XYZ[a2] p3 = XYZ[a3] p4 = XYZ[a4] # circle = sphere.intersectWith(cone) n23 = normalize(p3 - p2) # points = circle.intersectWith(plane123) # plane.intersectWith(Plane(circle.center, circle.normal)) is a line # line_direction = cross(normalize(cross(p4-p3,n23)),n23) # Rotate the point about the p2-p3 axis by the torsion angle v21 = (bond * np.cos(angle)) * n23 - (bond * np.sin(angle)) * cross( normalize(cross(p4 - p3, n23)), n23) s = np.sin(torsion) c = np.cos(torsion) XYZ[a1] = p2 - cross(n23, v21) * s + np.sum( n23 * v21) * n23 * (1.0 - c) + v21 * c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _cartesian_to_internal(self, atom_position, bond_position, angle_position, torsion_position):\n # TODO: _cartesian_to_internal and _internal_to_cartesian should accept/return units and have matched APIs\n\n check_dimensionality(atom_position, unit.nanometers)\n check_dimensionality(bond_position, unit.nanometers)\n check_dimensionality(angle_position, unit.nanometers)\n check_dimensionality(torsion_position, unit.nanometers)\n\n # Convert to internal coordinates once everything is dimensionless\n # Make sure positions are float64 arrays implicitly in units of nanometers for numba\n from perses.rjmc import coordinate_numba\n internal_coords = coordinate_numba.cartesian_to_internal(\n atom_position.value_in_unit(unit.nanometers).astype(np.float64),\n bond_position.value_in_unit(unit.nanometers).astype(np.float64),\n angle_position.value_in_unit(unit.nanometers).astype(np.float64),\n torsion_position.value_in_unit(unit.nanometers).astype(np.float64))\n # Return values are also in floating point implicitly in nanometers and radians\n r, theta, phi = internal_coords\n\n # Compute absolute value of determinant of Jacobian\n detJ = np.abs(r**2*np.sin(theta))\n\n check_dimensionality(r, float)\n check_dimensionality(theta, float)\n check_dimensionality(phi, float)\n check_dimensionality(detJ, float)\n\n return internal_coords, detJ", "def _internal_to_cartesian(self, bond_position, angle_position, torsion_position, r, theta, phi):\n # TODO: _cartesian_to_internal and _internal_to_cartesian should accept/return units and have matched APIs\n\n check_dimensionality(bond_position, unit.nanometers)\n check_dimensionality(angle_position, unit.nanometers)\n check_dimensionality(torsion_position, unit.nanometers)\n check_dimensionality(r, float)\n check_dimensionality(theta, float)\n check_dimensionality(phi, float)\n\n # Compute Cartesian coordinates from internal coordinates using all-dimensionless quantities\n # All inputs to numba must be in float64 arrays implicitly in md_unit_syste units of nanometers and radians\n from perses.rjmc import coordinate_numba\n xyz = coordinate_numba.internal_to_cartesian(\n bond_position.value_in_unit(unit.nanometers).astype(np.float64),\n angle_position.value_in_unit(unit.nanometers).astype(np.float64),\n torsion_position.value_in_unit(unit.nanometers).astype(np.float64),\n np.array([r, theta, phi], np.float64))\n # Transform position of new atom back into unit-bearing Quantity\n xyz = unit.Quantity(xyz, unit=unit.nanometers)\n\n # Compute abs det Jacobian using unitless values\n detJ = np.abs(r**2*np.sin(theta))\n\n check_dimensionality(xyz, unit.nanometers)\n check_dimensionality(detJ, float)\n return xyz, detJ", "def cartesian2polar(cartesian):\n cartesian = np.array(cartesian).squeeze()\n x, y = cartesian\n r = np.linalg.norm([x, y])\n azimuth = np.arctan2(y, x)\n return np.array([r, azimuth])", "def polarToCartesian(theta=0, radius=0):\n\n x = radius * np.cos(theta)\n y = radius * np.sin(theta)\n return x, y", "def to_cartesian(self):\n w = 1.73205 # sqrt(3)\n h = 2\n dx = 0.5 * w if self.y % 2 == 1 else 0\n x = 0.5 * w + self.x * w + dx\n y = 0.5 * h + 0.75 * self.y * h\n return (x, y)", "def to_cartesian(self):\n\n if self.cartesian is None:\n theta = math.radians(self.lat)\n phi = math.radians(self.long)\n x = R_EARTH * math.cos(theta) * math.cos(phi)\n y = R_EARTH * math.cos(theta) * math.sin(phi)\n z = R_EARTH * math.sin(theta)\n self.cartesian = CartesianPoint(x, y, z)\n return self.cartesian", "def get_cartesian_coords(self):\n r = 1\n dec = self.dec + 90\n x = r * math.sin(np.deg2rad(dec)) * math.cos(np.deg2rad(self.ra))\n y = r * math.sin(np.deg2rad(dec)) * math.sin(np.deg2rad(self.ra))\n z = r * math.cos(np.deg2rad(dec))\n\n return [x, y, z]", "def polar2cartesian(polar):\n polar = np.array(polar).squeeze()\n r, azimuth = polar\n x = r * np.cos(azimuth)\n y = r * np.sin(azimuth)\n return np.array([x, y])", "def cartesian_coordinates(self):\n # extract RA items\n ra_hours, ra_minutes, ra_seconds = RA_RE.match(str(self.ra)).groups()\n # then cast\n ra_hours = int(ra_hours)\n ra_minutes = int(ra_minutes)\n ra_seconds = float(ra_seconds)\n\n # extract DEC items\n dec_sign, dec_degrees, dec_minutes, dec_seconds = DEC_RE.match(str(self.dec)).groups()\n # then cast\n dec_sign = -1 if dec_sign == '-' else 1\n dec_degrees = int(dec_degrees)\n dec_minutes = int(dec_minutes)\n dec_seconds = float(dec_seconds)\n\n # to degrees\n a = (ra_hours*15) + (ra_minutes*0.25) + (ra_seconds*0.004166)\n b = abs(dec_degrees + dec_minutes/60 + dec_seconds/3600) * dec_sign\n\n # to radians\n a = math.radians(a)\n b = math.radians(b)\n\n distance = float(self.distance)\n\n x = (distance * math.cos(b)) * math.cos(a)\n y = (distance * math.cos(b)) * math.sin(a)\n z = distance * math.sin(b)\n\n return x, y, z", "def _position_cylindrical2cartesian(pos):\n \n rho=pos[:,0]\n theta=pos[:,1]\n z=pos[:,2]\n\n x=rho*np.cos(theta)\n y=rho*np.sin(theta)\n z=z\n\n return np.dstack((x,y,z))[0]", "def cartesianToPolar(x=0, y=0):\n\n radius = np.hypot(x, y)\n theta = np.arctan2(y, x)\n return theta, radius", "def polar_to_cartesian(radius, angle_deg):\n\n theta = np.deg2rad(angle_deg)\n x = radius * np.cos(theta)\n y = radius * np.sin(theta)\n return(x, y)", "def cartesianToPolar(x,y):\n r = np.sqrt(x**2 + y**2)\n theta = np.arctan2(y,x)\n\n return r,theta", "def polar_to_cartesian(r, theta):\n\n x = r * cos(theta)\n y = r * sin(theta)\n\n return x, y", "def cartesian(position):\n return [position[0] * cos(position[1]), position[0] * sin(position[1])]", "def polar2cartesian(phi, r):\n phi_radians = radians(phi)\n x = r*cos(phi_radians)\n y = r*sin(phi_radians)\n return x, y", "def _position_cartesian2cylindrical(pos):\n\n \n #save cartesian position of each particle\n x=pos[:,0]\n y=pos[:,1]\n z=pos[:,2]\n\n rho= np.sqrt(x**2+y**2)\n theta=np.arctan2(y,x)\n\n\n return np.dstack((rho,theta,z))[0]", "def polarToCartesian(r,theta):\n x = r * np.cos(theta)\n y = r * np.sin(theta)\n return x,y", "def polar_to_cartesian(self, r, theta):\n # x = rcos(theta), y = rsin(theta)\n x, y = r*math.cos(theta), r*math.sin(theta)\n x, y = self.add((x, y), self.pole)\n return x, y", "def to_cartesian(dimensions, angles):\n return Operator(transform=np.transpose(np.array(_basis_vectors(dimensions, angles))))", "def to_cartesian(r, phi):\n x = r*np.cos(phi)\n y = r*np.sin(phi)\n return x, y", "def polar_to_cartesian(dist, theta, phi):\n z = np.cos(phi)\n s = np.sin(phi)\n x = s * np.cos(theta)\n y = s * np.sin(theta)\n return np.stack((x, y, z), axis=-1) * np.expand_dims(dist, axis=-1)", "def _position_spherical2cartesian(pos):\n \n r=pos[:,0]\n theta=pos[:,1]\n phi=pos[:,2]\n\n if any(theta>np.pi) or any(theta<0): #sanity check. not necessary for phi.\n raise ValueError, \"Theta beyond [0,pi]. Exiting.\"\n\n\n x=r*np.sin(theta)*np.cos(phi)\n y=r*np.sin(theta)*np.sin(phi)\n z=r*np.cos(theta)\n\n return np.dstack((x,y,z))[0]", "def cartesian2polar(x, y):\n r = (x**2+y**2)**.5\n phi = atan2(y, x)\n return phi, r", "def cart2polar3d(cartesian):\n radius = np.linalg.norm(cartesian)\n theta = np.cos", "def getCartesian(self, phi, theta, radius):\n point_x = round(sin(theta) * cos(phi) * radius,4)\n point_y = round(sin(theta) * sin(phi) * radius,4)\n point_z = round(cos(theta) * radius,4)\n return [point_x, point_y, point_z]", "def _spherical_to_cartesian(ra, dec):\n rar = np.radians(ra)\n decr = np.radians(dec)\n\n x = np.cos(rar) * np.cos(decr)\n y = np.sin(rar) * np.cos(decr)\n z = np.sin(decr)\n \n return x, y, z", "def getAxisAndAngleDegres(self):\n theta = numpy.arccos(self.real)\n angle = 360 * theta / numpy.pi\n xyz = self.pure / numpy.sin(theta)\n return xyz, angle", "def to_axang(self) -> Tuple[np.ndarray, float]:\n return self.to_axisangle()", "def __cartesian2spherical(x: float, y: float, z: float) -> Tuple[float, float]:\n if x == 0 and y == 0:\n return 0, np.degrees(np.pi * 0.5 * np.sign(z))\n lat = np.arctan2(z, np.sqrt(x * x + y * y))\n lon = np.arctan2(y, x)\n return np.degrees(lon), np.degrees(lat)" ]
[ "0.68272", "0.68232924", "0.6670016", "0.6668133", "0.6666709", "0.66622704", "0.6632937", "0.6616939", "0.6595138", "0.6567005", "0.6544198", "0.65291274", "0.6471032", "0.6468995", "0.63908124", "0.63474804", "0.63316333", "0.63281065", "0.631877", "0.6312516", "0.6256514", "0.6237257", "0.6228126", "0.62179005", "0.6212803", "0.6207163", "0.6195567", "0.61545914", "0.610239", "0.60956603" ]
0.68568003
0
Opens the molecule in VMD
def showMolecule(self, colorBy=None, label=False, dcdFN=None): # Write PDB file # To set Occupancy, change atom.occupancy # To set Beta, change atom.temperature_factor import os.path pdbFN = os.path.join(MMTK.Database.molecule_types.directory, 'showMolecule.pdb') outF = MMTK.PDB.PDBOutputFile(pdbFN) outF.write(self.molecule) outF.close() # Write VMD script script = 'set ligand [mol new ' + pdbFN + ']\n' if colorBy is not None: script += 'mol modcolor 0 $ligand ' + colorBy + '\n' script += 'mol modstyle 0 0 CPK 1.000000 0.300000 10.000000 10.000000\n' if label: script += """ proc label_atoms { molid seltext } { set sel [atomselect $molid $seltext] set atomlist [$sel list] foreach {atom} $atomlist { set atomlabel [format "%d/%d" $molid $atom] label add Atoms $atomlabel } $sel delete } label_atoms 0 all """ if dcdFN is not None: script += 'animate delete all $ligand\n' script += 'mol addfile ' + dcdFN + ' type dcd waitfor all\n' scriptF = open('showMolecule.vmd', 'w') scriptF.write(script) scriptF.close() # Find and run vmd import AlGDock vmdCommand = AlGDock.findPath(AlGDock.search_paths['vmd']) import subprocess subprocess.call([vmdCommand, '-e', 'showMolecule.vmd']) # Remove files os.remove(pdbFN) os.remove('showMolecule.vmd')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def viewNMDinVMD(filename):\n\n vmd = pathVMD()\n if vmd:\n os.system('{0} -e {1}'.format(vmd, abspath(filename)))", "def _vmd_script_molecule(mole, filename=\"molecule.xyz\"):\n output = \"# load new molecule\\n\"\n if len(mole.atom) == 0:\n raise ValueError(\"Need at least one molecule file with coordinates.\")\n atoms = mole.atom\n natoms = len(mole.atom[0:, 0])\n f = open(filename, \"w\")\n f.write(str(natoms) + \"\\n\\n\")\n for i in range(0, natoms):\n symb = str(atoms[i, 0])\n coord = \" \".join(map(str, atoms[i, 1].tolist()))\n f.write(symb + \" \" + coord + \"\\n\")\n f.close()\n output += (\n \"mol {0} {1} type {2} first 0 last -1 step 1 filebonds 1 autobonds 1 waitfor all\"\n \"\\n\".format(\"new\", filename, \"{xyz}\")\n )\n output += \"#\\n\" \"# representation of the atoms\\n\"\n output += \"mol representation CPK 1.000000 0.300000 118.000000 131.000000\\n\"\n output += (\n \"mol delrep 0 top\\n\"\n \"mol color Element\\n\"\n \"mol selection {{all}}\\n\"\n \"mol material Opaque\\n\"\n \"mol addrep top\\n\"\n \"#\\n\"\n )\n return output", "def open(self):\n self._command = \"open\"", "def open(self):\n try:\n self.handle = self.rm.get_instrument(self.visaName)\n self.handle.write('*RST') #reset device to default\n time.sleep(.5)\n self.handle.write(':FORM:DATA ASC') #return ASCII\n except Exception:\n print('Dvm34411.open() failed !')\n raise\n return True", "def open_database(app):\n app.status.message(\"Opening DICOM folder..\")\n path = app.dialog.directory(\"Select a DICOM folder\")\n if path == '':\n app.status.message('') \n return\n app.status.cursorToHourglass()\n app.close()\n app.open(path)\n app.status.hide()\n app.status.cursorToNormal()", "def OpenDicomSerie(dirname=None):\n\tglobal volume, dim_x, dim_y, dim_z, spacing, origin, CT_open, filename_CT, dir_ini\n ct_swapY, ct_swapZ = False, False\n \n\tprint 'Opening DICOM serie ... '\n\n\t# Opening file\n\tif(dirname==None):\n\t\tfile_path = tkFileDialog.askopenfilename(initialdir = dir_ini, filetypes = [('DICOM files', '*.dcm')])\n\t\tfilelist = os.listdir(os.path.dirname(file_path))\n\telse:\n\t\tfilelist = os.listdir(dirname)\n\t\tfile_path = dirname + filelist[0]\n\n\tfilename_CT = file_path\n dir_ini = str(file_path.rsplit('/', 1)[0])+'/'\n\n\t# Getting dimensions\n\tds = pydicom.read_file(file_path)\n\tsp = ds.PixelSpacing\n\tds.file_meta.TransferSyntaxUID = pydicom.uid.ImplicitVRLittleEndian\n\tct_swapZ =(ds.ImageOrientationPatient[0:3] == [1, 0, 0])\n\tct_swapY =(ds.ImageOrientationPatient[3:6] == [0, 1, 0])\n\n dim_x = 0\n for f in filelist:\n if f.endswith(\".dcm\"): dim_x = dim_x + 1 \n\n\tdim_y, dim_z = np.shape(ds.pixel_array)[1], np.shape(ds.pixel_array)[0]\n \n\tvolume = np.zeros((dim_x, dim_y,dim_z))\n slicelocation = np.zeros(dim_x)\n\n\t# creating volume\n\tfor f,i in zip(filelist,range(dim_x)):\n\t\tif f.endswith(\".dcm\"):\n\t\t\tds = pydicom.read_file(os.path.dirname(file_path)+'/'+f)\n\t\t\tds.file_meta.transfersyntaxuid = pydicom.uid.ImplicitVRLittleEndian \n\t\t\tvolume[i,:,:] = ds.pixel_array\n\t\t\tif('slicelocation' in ds):\tslicelocation[i] = ds.SliceLocation\n\t\t\telse:\tslicelocation[i] = ds.ImagePositionPatient[2]\n \n\torder = np.argsort(slicelocation)\n slicelocation = slicelocation[order] # slicelocation is now sorted\n \n\tspacing = [float(slicelocation[1] - slicelocation[0]),float(sp[1]), float(sp[0])]\n\torigin = [float(slicelocation[0]),float(ds.ImagePositionPatient[1]),float(ds.ImagePositionPatient[0])]\n\tvolume = volume[order,:,:] # volume is now sorted\n\n\tif (\"RescaleSlope\" in ds):\tvolume = float(ds.RescaleSlope)*volume\n\tif (\"RescaleIntercept\" in ds):\tvolume = volume + float(ds.RescaleIntercept)\n\n\t# Dealing with image orientation\n print ' ct_swapY, ct_swapZ :', ct_swapY, ct_swapZ\n\tif(ct_swapY == True):\n volume = np.flip(volume,1) # flip volume, Y direction\n origin[1] = origin[1] + dim_y*spacing[1] \n if(ct_swapZ == True):\n volume = np.flip(volume,2) # flip volume, Z direction\n origin[2] = origin[2] + dim_z*spacing[2] \n if(ct_swapZ == True)and(ct_swapY == True): spacing[1], spacing[2] = spacing[2], spacing[1]\n\n\tSet_axes_lim_init()\n\tSet_scales()\n\tCT_open = True\n\tUpdate_all()\n\n\tprint(' file successfully opened!')", "def open_idf(self):\n\n self.save()\n\n filepath = self.idfname\n\n import os\n import platform\n import subprocess\n\n if platform.system() == \"Darwin\": # macOS\n subprocess.call((\"open\", filepath))\n elif platform.system() == \"Windows\": # Windows\n os.startfile(filepath)\n else: # linux variants\n subprocess.call((\"xdg-open\", filepath))", "def on_open_uv_editor():\n cmds.TextureViewWindow()", "def open(self):\n super(Nodzgraph, self).open(dockable=self.configuration.maya.docked,\n area=self.configuration.maya.dock_area,\n allowedArea=self.configuration.maya.allowed_dock_areas,\n floating=self.configuration.maya.floating,\n width=self.configuration.maya.width,\n height=self.configuration.maya.height\n )", "def open(self) -> None:", "def open(self) -> None:", "def open(self) -> None:", "def open(self):\r\n pass", "def open(self):\r\n pass", "def dicom_cli():", "def open( self ):\n pass", "def open(self):", "def open(self):\n raise NotImplementedError(\"Implement this method in child class\")", "def Open(self):\n return True", "def Open(self):\n return True", "def open(file):\n args = {\"file\": file}\n send_command(\"open\", args)", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n pass", "def open(self):\n self._isOpen = True", "def runOpenMM(parm, inpcrdFile, system, rad, K, Indices, solvate, out_dcd, out_csv, out_rst ):\n\n \n def newIntegrator():\n integrator = mm.LangevinIntegrator(\n 300.0 * u.kelvin,\n 10.0 / u.picosecond,\n 2.0 * u.femtosecond)\n return integrator\n\n def pmdStructureToOEMol(parm, resname):\n\n from oeommtools.utils import openmmTop_to_oemol\n mask = \"!(:%s)\" %resname\n structure_LIG = parmed.load_file( '../2gmx_wat.prmtop', xyz = '../equilibration/rst/step8.rst.125000' )\n structure_LIG.strip(mask)\n pos = structure_LIG.positions\n top = structure_LIG.topology\n molecule = openmmTop_to_oemol(top, pos, verbose=False)\n OEPerceiveBondOrders(molecule)\n OEAssignAromaticFlags(molecule)\n OEFindRingAtomsAndBonds(molecule)\n\n return molecule\n \n def getAtomIndices( structure, resname ):\n \"\"\"\n Get atom indices of a ligand from ParmEd Structure.\n Arguments\n ---------\n resname : str\n String specifying the resiue name of the ligand.\n structure: parmed.Structure\n ParmEd Structure object of the atoms to be moved.\n Returns\n -------\n atom_indices : list of ints\n list of atoms in the coordinate file matching lig_resname\n \"\"\"\n atom_indices_ligand = []\n topology = structure.topology\n for atom in topology.atoms():\n if str(resname) in atom.residue.name:\n atom_indices_ligand.append(atom.index)\n\n return atom_indices_ligand\n\n\n \"\"\"\n Rotate the torsion to an angle rad using openeye toolkits\n \"\"\" \n molecule = pmdStructureToOEMol( parm, \"LIG\" )\n atom_indices_ligand = getAtomIndices( parm, \"LIG\" )\n\n\n dihedral_atoms = [\"C10\", \"C9\", \"C3\", \"C2\" ]\n atom1 = molecule.GetAtom(OEHasAtomName(dihedral_atoms[0]))\n atom2 = molecule.GetAtom(OEHasAtomName(dihedral_atoms[1]))\n atom3 = molecule.GetAtom(OEHasAtomName(dihedral_atoms[2]))\n atom4 = molecule.GetAtom(OEHasAtomName(dihedral_atoms[3]))\n if OESetTorsion(molecule, atom1, atom2, atom3, atom4, rad ) == False :\n print(\"Torsional bond couldn't be rotated. Please enter correct atoms!\"); \n exit()\n\n # Update ligand positions in nc_sim\n updated_pos = molecule.GetCoords()\n\n for index, atomidx in enumerate(atom_indices_ligand): \n parm.positions[atomidx] = np.array(updated_pos[index])*u.nanometers\n\n \"\"\"\n harmonically restrain dihedral angle\n see units, http://docs.openmm.org/6.3.0/userguide/theory.html\n \"\"\"\n pi = np.pi\n harmonic = mm.CustomTorsionForce(\"k*min(dtheta, 2*pi-dtheta)^2; dtheta = abs(theta-theta0); pi = %.5f\" % pi);\n harmonic.addPerTorsionParameter(\"theta0\");\n harmonic.addPerTorsionParameter(\"k\");\n system.addForce(harmonic)\n harmonic.addTorsion(Indices[0], Indices[1], Indices[2], Indices[3], (rad, K))\n\n # Restraint non-moving part of the ligand\n restraintWt = 200 #kcal/mol/A2\n # define the custom force to restrain atoms to their starting positions\n force_restr = mm.CustomExternalForce('k_restr*periodicdistance(x, y, z, x0, y0, z0)^2')\n # Add the restraint weight as a global parameter in kcal/mol/A^2\n force_restr.addGlobalParameter(\"k_restr\", restraintWt*u.kilocalories_per_mole/u.angstroms**2)\n # Define the target xyz coords for the restraint as per-atom (per-particle) parameters\n force_restr.addPerParticleParameter(\"x0\")\n force_restr.addPerParticleParameter(\"y0\")\n force_restr.addPerParticleParameter(\"z0\")\n alch_list = ['C9', 'H92', 'H93', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'H1', 'H2', 'H4', 'H5', 'H6']\n for idx, atom_crd in enumerate( parm.positions ):\n name=parm.atoms[idx].name;\n resname=parm.atoms[idx].residue.name;\n if resname == \"LIG\":\n if not name in alch_list:\n xyz = parm.positions[idx].in_units_of(u.nanometers)/u.nanometers\n force_restr.addParticle(idx, xyz)\n system.addForce( force_restr )\n\n # build simulaion\n platform = mm.Platform.getPlatformByName('CUDA')\n integ1 = newIntegrator()\n simulation = app.Simulation(parm.topology, system, integ1)\n simulation.context.setPositions( parm.positions )\n\n # Set Box dimensions\n inpcrd = app.AmberInpcrdFile( inpcrdFile );\n if inpcrd.boxVectors is not None:\n simulation.context.setPeriodicBoxVectors(*inpcrd.boxVectors)\n\n print('RESTARTING simulation from a previous State..........%s' %inpcrdFile)\n velocities = parm.velocities \n simulation.context.setVelocities( inpcrd.velocities ) \n\n # perform minimization\n print('Minimizing...')\n simulation.minimizeEnergy( tolerance = 0.5 * kilojoule/mole )\n \n # adding simulation reporters\n simulation.context.setVelocitiesToTemperature(300*u.kelvin)\n simulation.reporters.append(app.DCDReporter(out_dcd, 1000))\n simulation.reporters.append(app.StateDataReporter(csv_file, 1000, step=True, potentialEnergy=True, totalEnergy=True, volume=True,temperature=True, separator='\\t'))\n restrt = RestartReporter( out_rst, 10000000, parm.ptr('natom') );\n state = simulation.context.getState(getPositions=True, getEnergy=True, getVelocities=True, enforcePeriodicBox=True)\n restrt.report(simulation, state)\n\n\n print('Production run at NVT...')\n simulation.step(5000000) # 10 ns\n \n # saving last restart\n state = simulation.context.getState(getPositions=True, getEnergy=True, getVelocities=True, enforcePeriodicBox=True)\n restrt.report(simulation, state)\n return" ]
[ "0.612638", "0.6115121", "0.60250276", "0.6011934", "0.5911261", "0.57081985", "0.568981", "0.5678963", "0.5570185", "0.5564583", "0.5564583", "0.5564583", "0.5531796", "0.5531796", "0.54938924", "0.54809994", "0.5477402", "0.5396461", "0.5377517", "0.5377517", "0.5375009", "0.53636", "0.53636", "0.53636", "0.53636", "0.53636", "0.53636", "0.53636", "0.5300105", "0.5294562" ]
0.6395765
0
Test read and write ints.
def test_message_int(): result = True message = msg.Message() for i in range(num_it): message.appendInt(i) if message.length != msg.HEADER_SIZE + (i+1)*msg.intStruct.size: print("Size is ", message.length, " but should be ", msg.HEADER_SIZE + (i+1)*msg.intStruct.size) print("Error : message.appendInt") result = False message.resetCursor() for i in range(num_it): r = message.readInt() if r != i: print(r, " vs ", i) print("Error : message.read/appendInt") result = False return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read(self) -> int:\n ...", "def test_int_field():", "def read(self) -> int:", "def test_numbers_roundtrip():\n for num in (0, 1, 2, 178, 300, BIG_NUMBER):\n num2 = UnsignedInt.read(UnsignedInt.to_bytes(num))\n assert num2 == num", "def test_integer(self):\n esnA = ESN(N_in,N_out,random_state=1)\n esnB = ESN(N_in,N_out,random_state=1)\n self._compare(esnA,esnB,should_be=\"same\")", "def testInt(self):\n self.assertEquals(20, int(Color.RED))\n self.assertEquals(2, int(Color.ORANGE))", "def test_toInt(self):\r\n self.assertEqual(self.black.toInt(), 0)\r\n self.assertEqual(self.red.toInt(), 16711680)\r\n self.assertEqual(self.pink.toInt(), 6553600)", "def test_int_to_int(self):\n @converters.wrap\n def inner_test(param: int):\n \"\"\"Make sure the parameter was converted correctly.\"\"\"\n self.assertEqual(param, 15)\n inner_test(param=15)", "def check_for_int(check):", "def test_int(self):\n from random import randint\n from ctypes import byref, c_int\n # back up array.\n a_orig = self.a.copy()\n # run FORTRAN subroutine.\n tval = randint(0,10000000)\n self.args[0] = byref(c_int(tval))\n self.lib_c_ctypes.ctypes_test(*self.args)\n # revert in Python and test.\n self.a -= tval\n for i in range(len(self.a)):\n self.assertEqual(self.a[i], a_orig[i])", "def test_toint(number, expected, cond):\n assert toInt(number, cond=cond) == expected", "def test_read_count(self):\n self.assertEqual(1, self.alice_storage.read_count)\n self.assertEqual(1, self.bob_storage.read_count)\n self.assertEqual(0, self.carol_storage.read_count)\n self.assertEqual(0, self.anonymous_storage.read_count)", "def test_integer(self):\n conn = self.database.connection()\n cursor = conn.cursor()\n dialect = self.database.dialect()\n dbapi = self.database.dbapi()\n query = dialect.translate('DROP TABLE test_integer')\n try:\n cursor.execute(query)\n except dbapi.Error:\n conn.rollback()\n query = dialect.translate('CREATE TABLE test_integer ' \\\n '( value INTEGER NOT NULL )')\n cursor.execute(query)\n data = []\n query = 'INSERT INTO test_integer VALUES (%s)'\n for i in range(100):\n item = random.randrange(-sys.maxint, sys.maxint)\n data.append(item)\n cursor.execute(query, (item,))\n query = 'SELECT * FROM test_integer'\n cursor.execute(query)\n result = cursor.fetchall()\n for row in result:\n item = row[0]\n assert isinstance(item, int) or isinstance(item, long)\n assert item in data\n data.remove(item)\n query = dialect.translate('DELETE FROM test_integer')\n cursor.execute(query)\n query = dialect.translate('DROP TABLE test_integer')\n cursor.execute(query)\n conn.commit()", "def test_int_out_of_range(parallel, guess):\n imin = np.iinfo(int).min + 1\n imax = np.iinfo(int).max - 1\n huge = f\"{imax+2:d}\"\n\n text = f\"P M S\\n {imax:d} {imin:d} {huge:s}\"\n expected = Table([[imax], [imin], [huge]], names=(\"P\", \"M\", \"S\"))\n # NOTE: Warning behavior varies for the parameters being passed in.\n with pytest.warns() as w:\n table = ascii.read(\n text, format=\"basic\", guess=guess, fast_reader={\"parallel\": parallel}\n )\n if not parallel:\n assert len(w) == 1\n assert (\n \"OverflowError converting to IntType in column S, reverting to String\"\n in str(w[0].message)\n )\n assert_table_equal(table, expected)\n\n # Check with leading zeroes to make sure strtol does not read them as octal\n text = f\"P M S\\n000{imax:d} -0{-imin:d} 00{huge:s}\"\n expected = Table([[imax], [imin], [\"00\" + huge]], names=(\"P\", \"M\", \"S\"))\n with pytest.warns() as w:\n table = ascii.read(\n text, format=\"basic\", guess=guess, fast_reader={\"parallel\": parallel}\n )\n if not parallel:\n assert len(w) == 1\n assert (\n \"OverflowError converting to IntType in column S, reverting to String\"\n in str(w[0].message)\n )\n assert_table_equal(table, expected)", "def test_integers(self):\n for const in [\n SSL_ST_CONNECT,\n SSL_ST_ACCEPT,\n SSL_ST_MASK,\n SSL_CB_LOOP,\n SSL_CB_EXIT,\n SSL_CB_READ,\n SSL_CB_WRITE,\n SSL_CB_ALERT,\n SSL_CB_READ_ALERT,\n SSL_CB_WRITE_ALERT,\n SSL_CB_ACCEPT_LOOP,\n SSL_CB_ACCEPT_EXIT,\n SSL_CB_CONNECT_LOOP,\n SSL_CB_CONNECT_EXIT,\n SSL_CB_HANDSHAKE_START,\n SSL_CB_HANDSHAKE_DONE,\n ]:\n assert isinstance(const, int)\n\n # These constants don't exist on OpenSSL 1.1.0\n for const in [\n SSL_ST_INIT,\n SSL_ST_BEFORE,\n SSL_ST_OK,\n SSL_ST_RENEGOTIATE,\n ]:\n assert const is None or isinstance(const, int)", "def test_reading_counter(self):\n self._test_reading_counter_template()", "def test_roundtrip_signed_int():\n for num in (0, -0, -1, 2, -178, 300, -BIG_NUMBER, BIG_NUMBER):\n num2 = SignedInt.read(SignedInt.to_bytes(num))\n assert num2 == num", "def test_get_value_int(self):\n val = self.setting_int.get_value()\n self.assertIsInstance(val, int)\n self.assertEqual(val, 170)", "def test_get_value_int(self):\n val = self.setting_int.get_value()\n self.assertIsInstance(val, int)\n self.assertEqual(val, 170)", "def test_int(self):\n output, _err = self.executor.prepare('do-stuff', 'special', verbosity=5).batch()\n self.assertEqual(output, 'doing stuff very specially')", "def test_int(self):\n htype = h5t.py_create('i')\n self.assertIsInstance(htype, h5t.TypeIntegerID)", "def test_int(self, env: yaenv.Env):\n _val = env.int('INT_VAR')\n assert _val == 1 and type(_val) == int\n _val = env.int('MISSING', -2)\n assert _val == -2 and type(_val) == int\n with pytest.raises(yaenv.EnvError) as err:\n _ = env.int('LIST_VAR')\n assert 'Invalid integer' in str(err.value)\n assert env.int('MISSING') is None", "def getInt(self, int: int, int2: int) -> int:\n ...", "def test_create_valid_int(self):\n storage = FileStorage()\n tests = [9, 12, 10000]\n expected = [9, 12, 10000]\n\n for i in range(len(tests)):\n self.remove_all()\n with patch('sys.stdout', new=StringIO()) as f:\n self.console.onecmd(\n 'create BaseModel test_var={}'.format(tests[i]))\n attributes = list(storage.all().values())\n actual = attributes[0].test_var\n self.assertEqual(expected[i], actual)\n self.assertEqual(int, type(actual))", "def test_integer_index():\n\tlib.backup_and_restore(\n\t\tlambda context: create_indexes(lib.create_integer_index),\n\t\tNone,\n\t\tlambda context: check_indexes(lib.check_simple_index, 12345)\n\t)", "def test_devide_int(self):\n self.assertEqual(operations.devide(8,4), 2)", "def write(self, value: int, /) -> None:", "def test_safeGetInt(self):\n self.assertEqual(\n BMConfigParser().safeGetInt('nonexistent', 'nonexistent'), 0)\n self.assertEqual(\n BMConfigParser().safeGetInt('nonexistent', 'nonexistent', 42), 42)", "def test_bit_get_int(self):\n ops = [bitwise_operations.bit_get_int(self.five_255_bin, 0, 8, False)]\n\n _, _, result = self.as_connection.operate(self.test_key, ops)\n\n expected_result = 255\n assert result[\"255\"] == expected_result", "def test_op_one_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n offl_a = stream.bind(a)\n offl_a.one()\n offl_a.update_host()\n stream.sync()\n self.assertTrue((a == 1).all(),\n \"Array should be all one.\" + str(a))" ]
[ "0.62519395", "0.6250594", "0.62442267", "0.61574054", "0.5960592", "0.59363145", "0.5931189", "0.5928802", "0.59218436", "0.587367", "0.5865747", "0.58532387", "0.57997644", "0.5783584", "0.57745636", "0.5768972", "0.5754527", "0.57348996", "0.57348996", "0.57081896", "0.56948346", "0.56756854", "0.5665358", "0.56630194", "0.5621748", "0.56077176", "0.5563673", "0.5561611", "0.55376834", "0.55287766" ]
0.625918
0
Handles a leave game request. Deletes the user from the game.
def leave_game(players_cursor, states_cursor, user, room_id): leave_query = '''DELETE FROM players_table WHERE user = ? AND room_id = ?''' players_cursor.execute(leave_query, (user, room_id)) FRAMES.append(display_game(players_cursor, states_cursor, user, room_id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def leave(msg: telebot.types.Message):\n if utils.in_menu(msg.from_user):\n bot.reply_to(\n msg,\n 'This command outside of game is useless.'\n )\n return\n\n game, user, opponent = utils.get_game_user_opponent(msg.from_user)\n if not game or not user:\n # todo log something\n return\n\n user.state = states.USER_IN_MENU\n user.losses += 1\n utils.update_user(user)\n bot.send_message(\n user.user_id,\n 'You surrendered.'\n )\n\n if opponent:\n opponent.state = states.USER_IN_MENU\n opponent.wins += 1\n utils.update_user(opponent)\n bot.send_message(\n opponent.user_id,\n 'Your opponent surrendered'\n )\n\n field = json.loads(game.field)\n sig = 1 if user == game.user1 else 2\n\n # changes users emojis to poop\n for i in range(len(field)):\n for j in range(len(field[i])):\n if field[i][j] == sig:\n field[i][j] = 4\n\n if opponent:\n utils.send_updated_field(bot, field, game, opponent)\n Game.delete_by_id(game.id)", "def leaveGame(game, player): # is also called in register player if THE UNPROBABLE happens (e.g. there was a crash and bobby can't come in again)\n\t#check if player is in game and game exists, if the player is the creator close the game\n\tgame_key = game.key()\n\tplayer_key = player.key()\n\n\tif game != None and player != None:\t\t\t\n\t\tif game.creator.key() == player.key():\n\t\t\t#TODO: close game\n\n\t\t\tplayer.currentGame = None\n\t\t\tplayer.put()\n\n\t\t\tgame.status = 2\n\t\t\tgame.players.remove(player.key())\n\t\t\tgame.playerCount -= 1\n\t\t\tgame.put()\n\n\t\t\tlogging.info('Creator %s left game %s, game stopped'%(player_key,game_key))\n\t\t\tvalue = \"done\"\n\t\telif player.key() in game.players:\n\t\t\tplayer.currentGame = None\n\t\t\tplayer.put()\n\n\t\t\tgame.players.remove(player.key())\n\t\t\tgame.playerCount -= 1\n\t\t\tgame.put()\n\n\t\t\tlogging.info('Player %s left game %s, game has now %s players left'%(player_key,game_key,game.playerCount))\n\n\t\t\t#TODO: deal with the horrible aftermath\n\t\t\t#maybe if only 2 left start showdown, give 2 minutes then set marker in between them\n\t\t\tvalue = \"done\"\n\t\telse:\n\t\t\tlogging.error('Attempt to leave game %s by player %s failed, not in list apparently and not creator'%(game_key,player_key))\t\t\t\n\t\t\tvalue = \"error\"\t\t\n\telse:\n\t\tlogging.error('Attempt to leave game %s by player %s failed, no game or player'%(game_key,player_key))\t\t\t\n\t\tvalue = \"error\"\n\n\treturn value", "def on_leave(self, event):\n self.pre_check(event)\n self.remove_player(event.guild.id)", "def _leave(self, *args):\n if not self.game:\n raise ServerException('not playing a game')\n self.game.leave(self)\n self.game = self.player = None", "def on_leave(data):\n username = request.sid\n room = data\n leave_room(room)\n logging.info(username + ' has left the room.')\n send(username + ' has left the room.', room=room)", "async def tod_leave(self, ctx, *args):\n try:\n self.players.remove(ctx.author)\n role = discord.utils.get(ctx.guild.roles, name=\"Player\")\n await ctx.author.remove_roles(role)\n except ValueError:\n pass\n message = f\"{ctx.author.mention} has been removed from the game!\"\n await ctx.send(message)", "async def leave_room(self, label):\n user = self.user\n room = await self.get_room(label)\n\n await self.channel_layer.group_send(\n room.group_name,\n {\n 'type': 'chat.leave',\n 'label': label,\n 'username': user.username,\n 'title': room.name,\n }\n )\n # Remove that we're in the room\n self.rooms.discard(label)\n\n # Remove client from the group so he no longer get room messages\n await self.channel_layer.group_discard(\n room.group_name,\n self.channel_name\n )\n\n await self.send_json(\n return_value(\n ACTION_LEAVE, room.label, TO_ME, MSG_LEAVE, NO_MESSAGE\n )\n )", "def leave(self, message, db_session):\n username = self.ts.get_user(message)\n user = db_session.query(db.User).filter(db.User.name == username).one_or_none()\n if not user:\n user = db.User(name=username)\n db_session.add(user)\n for tup in self.player_queue.queue:\n if tup[0] == username:\n self.player_queue.queue.remove(tup)\n self._add_to_whisper_queue(username, \"You've left the queue.\")\n user.times_played -= 1\n break\n else:\n self._add_to_whisper_queue(username, \"You're not in the queue and must join before leaving.\")", "async def leave(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n else:\n player = ctx.message.author.name\n if player.lower() not in list(tod_games[room]['participants'].keys()):\n await amor_manager.say(\"{}, you cannot leave the game if you have not joined\".format(player))\n elif player == tod_games[room]['host']:\n await amor_manager.say(\"{}, you cannot leave the game you're the host\".format(player))\n else:\n del tod_games[room]['participants'][player.lower()]\n await amor_manager.say(\"{} has left Truth or Dare.\".format(player))", "def on_leave(data):\r\n\r\n username = data['username']\r\n room = data['room']\r\n leave_room(room)\r\n send({\"msg\": username + \" has left the room\"}, room=room)", "def leave_group():\n incoming = request.get_json()\n Participant.delete_participant_with_user_id_and_room_id(session['user_id'], incoming['room_id'])\n return jsonify(results = incoming['room_id'])", "def handle_leave_room(self, lobby_command, client_socket):\n user = self.clients[client_socket]['data'].decode('utf-8')\n words = lobby_command.split()\n roomname = words[1]\n print(f\"Handling leave room {roomname} for {user}\")\n for _room in self.rooms:\n if _room.name == roomname:\n print(\"Requested roomname found..\")\n if user not in _room.room_attrbts['members']:\n msg = f\"Client {user} is already NOT a member of room {_room.name}\"\n self.log_and_send(client_socket, msg)\n return\n else:\n _room.room_attrbts['members'].remove(user)\n msg = f\"User {user} successfully removed from room {roomname}\"\n self.log_and_send(client_socket, msg)\n return\n msg = f'Client {user} passed invalid room. Could not join room {roomname}'\n self.log_and_send(client_socket, msg)\n return", "def leave_farm(self, request, pk):\n farm = self.get_object()\n user = request.user\n farm.remove_member(user)\n return Response({}, status=status.HTTP_204_NO_CONTENT)", "def leave(self):\n self.game.dealer_leave(self)\n return self.game", "def leave(ctx, network):\n return _leave(ctx.obj['client'], network)", "def leave(self, *args, **kwargs):\n return self.bot.leave_chat(self.id, *args, **kwargs)", "def leave(self):\n self.remove(\n self.subreddit._reddit.config.username or self.subreddit._reddit.user.me()\n )", "def leave(self):\n self.game.leave(self)\n return self.game", "def cancel_game(self, request):\n game = get_by_urlsafe(request.urlsafe_key, Game)\n if game and not game.game_over:\n game.key.delete()\n return StringMessage(message='Game with key: {} deleted.'.\n format(request.urlsafe_key))\n elif game and game.game_over:\n raise endpoints.BadRequestException('Game is already over!')\n else:\n raise endpoints.NotFoundException('Game not found!')", "def cancel_game(self, request):\n game = get_by_urlsafe(request.urlsafe_game_key,Game)\n if not game:\n raise endpoints.NotFoundException('A Game with that key does not exist!')\n if game.game_over:\n raise endpoints.ForbiddenException('Game has ended.')\n else:\n game.key.delete()\n return StringMessage(message = 'Game Cancelled!')", "async def chat_leave(self, event):\n await self.send_json(\n return_value(\n ACTION_WENT_OFFLINE,\n event['label'],\n event['username'],\n MSG_LEAVE,\n NO_MESSAGE\n )\n )", "async def leave(ctx, *, check=\"\"):\r\n # if botv.isAdmin(ctx.message.author) and check == \"now, bot\":\r\n # if necessary, save checks can go here; check presently commented out because botv can\r\n # fail to initialize in testing\r\n await bot.say(\"Allan, please add dialogue!\")\r\n quit()", "def user_logged_out(self, sender, request, user, **kwargs):", "def on_client_exit(self, game) -> None:\n pass", "def delete_board(request):\n required_fields = ['user_id', 'game_id', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # check for not allowed characters\n if check_special_characters(str(data['user_id'])) or check_special_characters(str(data['game_id'])) \\\n or check_special_characters(str(data['token'])):\n return Response({'error': str('Unaccepted character passed!')},\n status=status.HTTP_400_BAD_REQUEST)\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # Here delete the game board from user's saved profile\n if not db.delete_game(data['user_id'], data['game_id']):\n return Response({'error': str('Error when deleting the game!')}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return Response({'status': 'success'})", "def leave(self):\n p = GameOverPopup(self)\n p.open()", "def leave_room(room):\n return request.namespace.leave_room(room)", "def leaveEvent(self, event):\n self.destroy()", "def cancel_game(self):\n ndb.delete_multi(UserGame.query(UserGame.game_key == self.key).fetch(keys_only=True)) \n\n k = self.key\n k.delete()", "def leave_loose_game(self):\n self.update_json_file()\n self.end = True\n self.root.destroy()\n GameOver()" ]
[ "0.7001719", "0.6419002", "0.63921964", "0.639108", "0.6370939", "0.633421", "0.63169086", "0.6268317", "0.6229235", "0.61546296", "0.6119499", "0.6118067", "0.60932064", "0.6045495", "0.6015096", "0.5974975", "0.59517586", "0.59031034", "0.59014386", "0.5897309", "0.5892574", "0.5889712", "0.58342934", "0.58237493", "0.5820593", "0.57754123", "0.57619804", "0.57610285", "0.5752704", "0.5738764" ]
0.66279423
1
Select Relationships associated with specified fact_id.
def select_by_fact_id(cls, fact_id): return db.session.query(cls).filter_by(fact_id=fact_id).all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_relationships(person_id):\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(relationship_query, (person_id,)) # note a tuple is needed as a parameter value for SQLITE\n\n relation_list = []\n for row in c:\n _relation = Relationship()\n _relation.person_id = row[\"personid\"]\n _relation.person.first_name = row[\"firstname\"]\n _relation.person.last_name = row[\"lastname\"]\n _relation.person.middle_initial = row[\"middleinitial\"]\n _relation.related_person_id = row[\"related_personid\"]\n _relation.relationship_id = row[\"relationshipid\"]\n _relation.relationship_type = row[\"relationshiptype\"]\n _relation.relationship_type_description = row[\"key\"]\n relation_list.append(_relation)\n conn.close()\n return relation_list\n except:\n return []", "def has_relationship(id):\n\n relationship = p.toolkit.get_action(\"package_relationships_list\")(\n data_dict={\"id\": id}\n )\n if relationship:\n rel = bool(relationship)\n return rel", "def get_relationship(self, guid):\n results = None\n atlas_endpoint = self.endpoint_url + f\"/relationship/guid/{guid}\"\n\n getResponse = requests.get(\n atlas_endpoint,\n headers=self.authentication.get_authentication_headers()\n )\n\n results = self._handle_response(getResponse)\n\n return results", "def selected_relationships(self):\n return self._selected_relationships", "def relationships(self):", "def get_relationships_for_destination(self, destination_id):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_for_destination\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'destinationId': str(destination_id)},\n **self._view_filter())).sort('_id', ASCENDING)\n return objects.RelationshipList(result, runtime=self._runtime)", "def select_by_foreign_keys(cls, subject_id=None, object_id=None, relationship_type_id=None):\n filter_clause = sa.and_(\n sa.and_(cls.subject_id == subject_id, cls.object_id == object_id),\n cls.relationship_type_id == relationship_type_id)\n return db.session.query(cls).filter(filter_clause).first()", "def user_relationships(id, related_collection_name):\n response = None\n if request.method == 'POST':\n response = User.create_relationships(id, related_collection_name, eval(request.data))\n elif request.method == 'PATCH':\n response = User.update_relationship(id, related_collection_name, json.loads(request.data))\n elif request.method == 'DELETE':\n response = User.disconnect_relationship(id, related_collection_name, eval(request.data))\n elif request.method == 'GET':\n response = User.get_relationship(request.args, id, related_collection_name)\n return response", "def get_relationship_query(self):\n # Implemented from template for\n # osid.resource.ResourceQuerySession.get_resource_query_template\n return queries.RelationshipQuery(runtime=self._runtime)", "def findAllInfectedRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:COVID_EXPOSURE]->(n2:Person) \"\n \"RETURN ID(n1) , r , r.date , r.name , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def get_relationship(self, relationship_id):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resource\n # NOTE: This implementation currently ignores plenary view\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find_one(\n dict({'_id': ObjectId(self._get_id(relationship_id, 'relationship').get_identifier())},\n **self._view_filter()))\n return objects.Relationship(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)", "def get_relationships_for_source(self, source_id):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_for_source\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'sourceId': str(source_id)},\n **self._view_filter())).sort('_sort_id', ASCENDING)\n return objects.RelationshipList(result, runtime=self._runtime)", "def get_depend_package(id):\n\n relationships = []\n try:\n relationships = p.toolkit.get_action(\"package_relationships_list\")(\n data_dict={\"id\": id, \"rel\": \"dependency_of\"}\n )\n except Exception, e:\n return {}\n\n depend = []\n if relationships:\n for rel in relationships:\n try:\n access = p.toolkit.check_access(\n \"package_show\",\n context={\"user\": c.user},\n data_dict={\"id\": rel[\"object\"]},\n )\n dep = p.toolkit.get_action(\"package_show\")(\n data_dict={\"id\": rel[\"object\"]}\n )\n depend.append(dep)\n except:\n pass\n return depend", "def _get_fact(self, fact):\n for kbfact in self.facts:\n if fact == kbfact:\n return kbfact", "def _get_fact(self, fact):\n for kbfact in self.facts:\n if fact == kbfact:\n return kbfact", "def _get_fact(self, fact):\n for kbfact in self.facts:\n if fact == kbfact:\n return kbfact", "def get_doctor_include_related(id):\n doctor = Doctor.query.get(id)\n result = full_doctor_schema.dump(doctor)\n return jsonify(result.data)", "def get_relationships_by_genus_type_for_destination(self, destination_id, relationship_genus_type):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_by_genus_type_for_destination\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'destinationId': str(destination_id),\n 'genusTypeId': str(relationship_genus_type)},\n **self._view_filter())).sort('_id', ASCENDING)\n return objects.RelationshipList(result, runtime=self._runtime)", "def get_rule(rule_id):\n\n rule = get_db().execute('SELECT i.*, c.name as category_name FROM ruleset i JOIN categories c ON i.category_id = c.id WHERE i.id = ?', (rule_id, )).fetchone()\n\n return rule", "def get_child_ids(id,conn):\n\n child_ids = ('WITH RECURSIVE children AS '\n '(SELECT subject_id '\n 'FROM cvterm_relationship '\n 'WHERE object_id = %s '\n 'UNION '\n 'SELECT cr.subject_id '\n 'FROM cvterm_relationship cr '\n 'INNER JOIN children ch ON ch.subject_id = cr.object_id) '\n 'SELECT * FROM children')\n ids = connect(child_ids,id,conn)\n list_of_ids = []\n for item in ids:\n list_of_ids.append(item[0])\n return(list_of_ids)", "def get_related(this_obj, other_obj, m2m=False):\n # is het niet raar dat je voor twee concrete objecten ophaalt naar welke van het ene type\n # verwezen wordt vanuit het andere type? Of is dat om de vorige/volgende te kunnen bepalen?\n # als ik kijk naar het gebruik in GetRelations dan is het tweede argument ook niet een object\n # maar een relatie (uit de fields verzameling)\n if m2m:\n fields = [x for x in other_obj._meta.many_to_many]\n else:\n fields = [x for x in other_obj._meta.get_fields() if x.name != 'project' and\n x.get_internal_type() == 'ForeignKey']\n for fld in fields:\n if fld.related_model == this_obj._meta.model:\n related_name = fld.related_query_name()\n break\n else:\n return None # not found\n try:\n return this_obj.__getattribute__(related_name).all()\n except UnboundLocalError:\n return None\n # zou je deze ook kunnen vervangen door een aanroep van get_relation en dan met de opgehaalde\n # naam de gerelateerde objecten ophalen en meteen de vorige en de volgende bepalen?\n # (heeft uiteraard konsekwenties voor de aanroepende code)\n # oorspronkelijk lijkt dat ook zo geweest te zijn, de functie heette toen get_relation en het\n # gedeelte dat nu nog zo heet was daarin hardgecodeerd\n # deze functie wordt alleen aangeroepen in een paar methoden van de hieronder opgenomen klasse\n # GetRelations, namelijk om de namen van relaties uit andere objecten naar het huidige te kunnen\n # bepalen.\n # Als je get_relation zoals die nu is gebruikt zou je dat onderscheid (van versus naar relaties)\n # met dezelfde functie kunnen afhandelen", "def _filter_related_fk(self, rel):\n field = rel.field\n if isinstance(field, models.ForeignKey):\n if self._join_allowed(rel.parent_model, rel.model, field):\n return rel", "def get_depend_def_by_step_id(self, step_id):\n try:\n result = self._session.query(StepEntity.name).\\\n filter(StepEntity.id == StepDependencyEntity.parent_id).\\\n filter(StepDependencyEntity.child_id == step_id).\\\n all()\n\n result_list = [\n row[0] for row in result\n ]\n\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n return result_list", "def get_facts(facts, situation, target):\n return [f[2] for f in facts if (f[0], f[1]) == (situation, target)]", "def test_select_by_concept_type(self, select_relationships):\n select_relationships.return_value = [Mock(subject='hello'), Mock(subject='kitty')]\n mock_concept_type = Mock(name='concept_type')\n\n result = FactQuery._select_by_concept_type(mock_concept_type)\n self.assertEqual(['hello', 'kitty'], result)\n select_relationships.assert_called_once_with('is', object_name=mock_concept_type)", "def get_links_package(id):\n\n relationships = []\n try:\n relationships = p.toolkit.get_action(\"package_relationships_list\")(\n data_dict={\"id\": id, \"rel\": \"linked_from\"}\n )\n except Exception, e:\n return {}\n\n links = []\n if relationships:\n for rel in relationships:\n try:\n access = p.toolkit.check_access(\n \"package_show\",\n context={\"user\": c.user},\n data_dict={\"id\": rel[\"object\"]},\n )\n link = p.toolkit.get_action(\"package_show\")(\n data_dict={\"id\": rel[\"object\"]}\n )\n links.append(link)\n except:\n pass\n return links", "def filter_relationships(self, srcif, routes):\n outroutes = []\n rel = self.relations[srcif]\n for route in routes:\n opp_rel = self.relations[route[PEER]]\n if (rel == CUST or opp_rel == CUST) or (rel == PROV and opp_rel == PROV):\n outroutes.append(route)\n return outroutes", "def findAllAppContactRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:APP_CONTACT]->(n2:Person) \"\n \"RETURN ID(n1) , r , r.date , r.hour, ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def findAllGetVaccineRelationships(tx):\n query = (\n \"MATCH (n1:Person)-[r:GET_VACCINE]->(n2:Vaccine) \"\n \"RETURN ID(n1) , r , r.date , r.country , r.expirationDate , ID(n2);\"\n )\n results = tx.run(query).data()\n return results", "def test_filter_relationships_by_concept_type__object(self):\n # Set up mocks and test data\n concept_type = 'birds'\n mock_match_0 = Mock(name='mock_match_0',\n object=Mock(name='mock_subject_0',\n concept_name='mock_subject_0',\n concept_types=['birds', 'snakes']))\n\n mock_match_1 = Mock(name='mock_match_1',\n object=Mock(name='mock_subject_1',\n concept_name='mock_subject_1',\n concept_types=['snakes', 'turtles']))\n\n mock_match_2 = Mock(name='mock_match_2',\n object=Mock(name='mock_subject_2',\n concept_name='mock_subject_2',\n concept_types=['snakes', 'birds']))\n\n mock_matches = [mock_match_0, mock_match_1, mock_match_2]\n\n # Make call\n filtered_matches = FactQuery._filter_relationships_by_concept_type(\n mock_matches, concept_type, relationship_attr='object')\n\n # Verify results\n self.assertEqual([mock_match_0, mock_match_2], filtered_matches)" ]
[ "0.58382934", "0.514626", "0.50882876", "0.5030613", "0.5016771", "0.49949938", "0.4898808", "0.48690563", "0.48584178", "0.48120192", "0.47925648", "0.47674325", "0.47505498", "0.473037", "0.473037", "0.473037", "0.47198808", "0.47121876", "0.46776888", "0.46749067", "0.46717918", "0.46698582", "0.46666363", "0.46639878", "0.46553952", "0.4637488", "0.46335638", "0.4614692", "0.46069464", "0.45725232" ]
0.68101376
0
Select Relationship with specified subject, object and relationship type.
def select_by_foreign_keys(cls, subject_id=None, object_id=None, relationship_type_id=None): filter_clause = sa.and_( sa.and_(cls.subject_id == subject_id, cls.object_id == object_id), cls.relationship_type_id == relationship_type_id) return db.session.query(cls).filter(filter_clause).first()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_by_values(cls, relationship_type_name=None, relationship_number=None,\n subject_name=None, object_name=None):\n query = db.session.query(cls).\\\n join(RelationshipType).\\\n filter(RelationshipType.relationship_type_name==relationship_type_name)\n if relationship_number:\n query = query.filter(Relationship.count==relationship_number)\n if subject_name: \n subject_concept = sa_orm.aliased(Concept)\n query = query.\\\n join(subject_concept, Relationship.subject_id==subject_concept.concept_id).\\\n filter(subject_concept.concept_name==subject_name)\n if object_name:\n object_concept = sa_orm.aliased(Concept)\n query = query.\\\n join(object_concept, Relationship.object_id==object_concept.concept_id).\\\n filter(object_concept.concept_name==object_name)\n return query.all()", "def relation( self, obj, relType ):\n raise NotImplementedError(\"relation\")", "def test_filter_relationships_by_concept_type__object(self):\n # Set up mocks and test data\n concept_type = 'birds'\n mock_match_0 = Mock(name='mock_match_0',\n object=Mock(name='mock_subject_0',\n concept_name='mock_subject_0',\n concept_types=['birds', 'snakes']))\n\n mock_match_1 = Mock(name='mock_match_1',\n object=Mock(name='mock_subject_1',\n concept_name='mock_subject_1',\n concept_types=['snakes', 'turtles']))\n\n mock_match_2 = Mock(name='mock_match_2',\n object=Mock(name='mock_subject_2',\n concept_name='mock_subject_2',\n concept_types=['snakes', 'birds']))\n\n mock_matches = [mock_match_0, mock_match_1, mock_match_2]\n\n # Make call\n filtered_matches = FactQuery._filter_relationships_by_concept_type(\n mock_matches, concept_type, relationship_attr='object')\n\n # Verify results\n self.assertEqual([mock_match_0, mock_match_2], filtered_matches)", "def get_association(relation: str, subject_node: Node, object_node: Node, is_negated: bool, pmid: str = '',\n association_config: dict = None):\n if association_config is None:\n association_config = associations\n most_relevant_relation = list(get_biolink_association(subject_node, object_node, association_config).items())[0][0]\n biolink_relation = most_relevant_relation(id=0,\n subject=subject_node['id'],\n relation=relation,\n object=object_node['id'],\n negated=is_negated,\n publications=[pmid])\n return get_relationship_from_biolink(subject_node, biolink_relation, object_node)", "def test_select_by_concept_type(self, select_relationships):\n select_relationships.return_value = [Mock(subject='hello'), Mock(subject='kitty')]\n mock_concept_type = Mock(name='concept_type')\n\n result = FactQuery._select_by_concept_type(mock_concept_type)\n self.assertEqual(['hello', 'kitty'], result)\n select_relationships.assert_called_once_with('is', object_name=mock_concept_type)", "def test_filter_relationships_by_concept_type__subject(self):\n # Set up mocks and test data\n concept_type = 'birds'\n mock_match_0 = Mock(name='mock_match_0',\n subject=Mock(name='mock_subject_0',\n concept_name='mock_subject_0',\n concept_types=['birds', 'snakes']))\n\n mock_match_1 = Mock(name='mock_match_1',\n subject=Mock(name='mock_subject_1',\n concept_name='mock_subject_1',\n concept_types=['snakes', 'turtles']))\n\n mock_match_2 = Mock(name='mock_match_2',\n subject=Mock(name='mock_subject_2',\n concept_name='mock_subject_2',\n concept_types=['snakes', 'birds']))\n\n mock_matches = [mock_match_0, mock_match_1, mock_match_2]\n\n # Make call\n filtered_matches = FactQuery._filter_relationships_by_concept_type(\n mock_matches, concept_type, relationship_attr='subject')\n\n # Verify results\n self.assertEqual([mock_match_0, mock_match_2], filtered_matches)", "def get_relationship_type(\n r: Union[\"ModelRelationship\", t.RelationshipType, t.RelationshipName, str]\n) -> t.RelationshipType:\n relationship_type = r.type if isinstance(r, ModelRelationship) else r\n return t.RelationshipType(normalize_relationship_type(relationship_type))", "def get_relationships_by_record_type(self, relationship_record_type):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resources_by_record_type\n # STILL NEED TO IMPLEMENT!!!\n return objects.RelationshipList([])", "def get_relationship_from_biolink(biolink_subject: Node,\n biolink_association: Association,\n biolink_object: Node):\n properties = {key: value for key, value in biolink_association.__dict__.items() if key != 'id'}\n return Relationship(biolink_subject,\n get_pythonic_name(biolink_association.__class__.__name__),\n biolink_object,\n **properties)", "def get_biolink_association(subject_node: Node, object_node: Node, association_config: dict = None) -> dict:\n if association_config is None:\n association_config = associations\n subject_query = list(subject_node.labels)[0]\n object_query = list(object_node.labels)[0]\n association = {association: requirements for association, requirements in association_config.items()\n if subject_query in requirements[0]\n and object_query in requirements[1]}\n if len(association) is 0:\n association = {Association: ['*']}\n return association", "def filter_for_term_relationships(src, relationship_type, object_id, target=True):\n filters = [\n Filter(\"type\", \"=\", \"relationship\"),\n Filter(\"relationship_type\", \"=\", relationship_type),\n ]\n if target:\n filters.append(Filter(\"target_ref\", \"=\", object_id))\n else:\n filters.append(Filter(\"source_ref\", \"=\", object_id))\n\n results = src.query(filters)\n return remove_deprecated(results)", "def get_relationships_by_genus_type(self, relationship_genus_type):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resources_by_genus_type\n # NOTE: This implementation currently ignores plenary view\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'genusTypeId': str(relationship_genus_type)},\n **self._view_filter())).sort('_id', DESCENDING)\n return objects.RelationshipList(result, runtime=self._runtime, proxy=self._proxy)", "def by_type(self, type):\n return self.filter(related_type__title=type)", "def get_relationship(self, relationship_id):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resource\n # NOTE: This implementation currently ignores plenary view\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find_one(\n dict({'_id': ObjectId(self._get_id(relationship_id, 'relationship').get_identifier())},\n **self._view_filter()))\n return objects.Relationship(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)", "def get_ids_related_to(cls, object_type, related_type, related_ids=[]):\n\n if isinstance(related_ids, (int, long)):\n related_ids = [related_ids]\n\n if not related_ids:\n return db.session.query(Relationship.source_id).filter(sql.false())\n\n destination_ids = db.session.query(Relationship.destination_id).filter(\n and_(\n Relationship.destination_type == object_type,\n Relationship.source_type == related_type,\n Relationship.source_id.in_(related_ids),\n )\n )\n source_ids = db.session.query(Relationship.source_id).filter(\n and_(\n Relationship.source_type == object_type,\n Relationship.destination_type == related_type,\n Relationship.destination_id.in_(related_ids),\n )\n )\n\n queries = [destination_ids, source_ids]\n queries.extend(cls.get_extension_mappings(\n object_type, related_type, related_ids))\n queries.extend(cls.get_special_mappings(\n object_type, related_type, related_ids))\n\n return cls._array_union(queries)", "def test_get_relation_type(self):\n pass", "def relationship(*args, b: bool=True, relationshipData: Union[AnyStr, List[AnyStr], bool]=\"\",\n q=True, query=True, e=True, edit=True, **kwargs)->Union[None, Any]:\n pass", "def get(self, dto):\n assert dto.using in list(self.models.keys())\n Relation = self.models[dto.using]\n return self.session.query(Relation)\\\n .filter(Relation.purpose == dto.purpose)\\\n .filter(Relation.sender == dto.sender)\\\n .filter(Relation.recipient == dto.recipient)\\\n .first()", "def relationship(cls):\n return relationship.many_to_one(cls, 'relationship')", "def get_relation(srt, soort):\n result, multiple = None, None\n if srt != soort or soort in ('funcproc', 'techproc'):\n for relobj in my.rectypes[srt]._meta.get_fields():\n if relobj.related_model and corr_naam(relobj.related_model._meta.model_name) == soort:\n result = relobj.name\n multiple = False if relobj.get_internal_type() == 'ForeignKey' else True\n break\n return result, multiple", "def get_object(self, subject=None, predicate=None):\n\n # Get the result of the search\n results = self.rdf.objects(subject, predicate)\n as_list = list(results)\n\n # Don't raise exceptions, value test!\n if not as_list:\n return None\n\n return as_list[0]", "def get_relationships_by_genus_type_for_source(self, source_id, relationship_genus_type):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_by_genus_type_for_source\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'sourceId': str(source_id),\n 'genusTypeId': str(relationship_genus_type)},\n **self._view_filter())).sort('_id', ASCENDING)\n return objects.RelationshipList(result, runtime=self._runtime)", "def get_relationships_by_genus_type_for_peers(self, source_id, destination_id, relationship_genus_type):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_by_genus_type_for_peers\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'sourceId': str(source_id),\n 'destinationId': str(destination_id),\n 'genusTypeId': str(relationship_genus_type)},\n **self._view_filter())).sort('_id', ASCENDING)\n return objects.RelationshipList(result, runtime=self._runtime)", "def subjects(\n self,\n predicate: Optional[\"_PredicateType\"] = None,\n object: Optional[\"_ObjectType\"] = None,\n ) -> Generator[\"_SubjectType\", None, None]:\n for t, c in self.triples((None, predicate, object)):\n yield t[0]", "def subjects(\n self,\n predicate: Optional[\"_PredicateType\"] = None,\n object: Optional[\"_ObjectType\"] = None,\n ) -> Generator[\"_SubjectType\", None, None]:\n for t, c in self.triples((None, predicate, object)):\n yield t[0]", "async def determine_contact(rec, ref: Ref, type='parent'):\n if type not in ['parent', 'impacted']:\n raise ValueError(\"Type must be impacted or parent!\")\n\n LOG.debug(f\"Determing {type} for object id: %s -- %s-%s...\", rec.id,\n rec.Name, rec.Type)\n offset_min = rec.last_seen - 2.5\n\n if type == \"parent\":\n accpt_colors = ['Blue', 'Red'\n ] if rec.Color == 'Violet' else [rec.Color]\n\n # query_filter = (\n # ~(Object.type.startswith('Decoy'))\n # & ~(Object.c.type.startswith('Misc'))\n # & ~(Object.c.type.startswith('Projectile'))\n # & ~(Object.c.type.startswith('Weapon'))\n # &\n # ~(Object.c.type.startswith(\"Ground+Light+Human+Air+Parachutist\")))\n query_filter = \" (type not like ('%Decoy%')\"\\\n \" AND type not like ('%Misc%')\"\\\n \" AND type not like ('%Weapon%')\"\\\n \" AND type not like ('%Projectile%')\"\\\n \" AND type not like ('%Ground+Light+Human+Air+Parachutist%'))\"\n\n elif type == 'impacted':\n accpt_colors = ['Red'] if rec.Color == 'Blue' else ['Red']\n # query_filter = (Object.c.type.startswith('Air+'))\n query_filter = \" type like ('%Air+%')\"\n\n else:\n raise NotImplementedError\n\n color_query = f\"\"\" color in ('{\"','\".join(accpt_colors)}')\"\"\"\n id_query = f\" id != {rec.id} \"\n query = f\"\"\" SELECT id FROM object\n WHERE {query_filter} AND {color_query} AND {id_query}\n \"\"\"\n\n nearby_objs = await DB.fetch(query)\n\n closest = []\n for nearby in nearby_objs:\n near = ref.obj_store[nearby[0]]\n if ((near.last_seen <= offset_min\n and not (near.Type.startswith('Ground') and near.alive == 1))\n and (abs(near.alt - rec.alt) < 2000)\n and (abs(near.lat - rec.lat) <= 0.0005)\n and (abs(near.lon - rec.lon) <= 0.0005)):\n continue\n\n prox = compute_dist(rec.cart_coords, near.cart_coords)\n LOG.debug(\"Distance to object %s - %s is %s...\", near.Name, near.Type,\n str(prox))\n if not closest or (prox < closest[1]):\n closest = [near.id, prox, near.Name, near.Pilot, near.Type]\n\n if not closest:\n return None\n\n if closest[1] > 1000:\n LOG.warning(\n f\"Rejecting closest {type} for {rec.id}-{rec.Name}-{rec.Type}: \"\n \"%s %sm...%d checked!\", closest[4],\n str(closest[1]), len(nearby_objs))\n\n return None\n\n return closest", "def fetch_relation(self, address):\n if (self.from_id is not None) and (self.to_id is not None):\n new_neofj = NeoFJ(address=address)\n relations = new_neofj.get_two_node_relations(_id1=self.from_id, _id2=self.to_id, _f_relation=self.rel_type)\n relation = relations[0]\n self.rel_type = relation.type\n self.rel_dict = relation.properties", "def get_relationships_by_parent_genus_type(self, relationship_genus_type):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resources_by_parent_genus_type\n # STILL NEED TO IMPLEMENT!!!\n return objects.RelationshipList([])", "def get_relationships_by_genus_type_for_destination(self, destination_id, relationship_genus_type):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_by_genus_type_for_destination\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'destinationId': str(destination_id),\n 'genusTypeId': str(relationship_genus_type)},\n **self._view_filter())).sort('_id', ASCENDING)\n return objects.RelationshipList(result, runtime=self._runtime)", "def delete_relationship(tx, node_value_1=None, node_value_2=None, node_type_1=None, node_type_2=None, relationship=None):\n if node_value_1 is None and node_type_1 is None:\n cql = \"MATCH ()-[u:\" + relationship + \"]-(w:\" + node_type_2 + \"{name:$node_value_2}) \" \\\n \"DELETE u;\"\n try:\n tx.run(cql, node_value_2=node_value_2)\n except Exception as e:\n print(str(e))\n elif node_value_2 is None and node_type_2 is None:\n cql = \"MATCH (s:\" + node_type_1 + \"{name:$node_value_1})-[u:\" + relationship + \"]-() \" \\\n \"DELETE u;\"\n try:\n tx.run(cql, node_value_1=node_value_1)\n except Exception as e:\n print(str(e))\n else:\n cql = \"MATCH (s:\" + node_type_1 + \"{name:$node_value_1})-[u:\" + relationship + \"]-(w:\" + node_type_2 + \"{name:$node_value_2}) \" \\\n \"DELETE u;\"\n try:\n tx.run(cql, node_value_1=node_value_1, node_value_2=node_value_2)\n except Exception as e:\n print(str(e))" ]
[ "0.6398576", "0.59592456", "0.58598846", "0.5763038", "0.5670848", "0.56461585", "0.5524951", "0.5501221", "0.5444976", "0.5378021", "0.53130275", "0.5282662", "0.5178828", "0.51488453", "0.5099441", "0.50218856", "0.501885", "0.49724635", "0.48888737", "0.4885239", "0.4870398", "0.47908905", "0.4773507", "0.4773079", "0.4773079", "0.47706643", "0.47632754", "0.475571", "0.4748589", "0.47369" ]
0.72244817
0
Select Relationships with specified relationship_type, count, subject, and object.
def select_by_values(cls, relationship_type_name=None, relationship_number=None, subject_name=None, object_name=None): query = db.session.query(cls).\ join(RelationshipType).\ filter(RelationshipType.relationship_type_name==relationship_type_name) if relationship_number: query = query.filter(Relationship.count==relationship_number) if subject_name: subject_concept = sa_orm.aliased(Concept) query = query.\ join(subject_concept, Relationship.subject_id==subject_concept.concept_id).\ filter(subject_concept.concept_name==subject_name) if object_name: object_concept = sa_orm.aliased(Concept) query = query.\ join(object_concept, Relationship.object_id==object_concept.concept_id).\ filter(object_concept.concept_name==object_name) return query.all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_by_foreign_keys(cls, subject_id=None, object_id=None, relationship_type_id=None):\n filter_clause = sa.and_(\n sa.and_(cls.subject_id == subject_id, cls.object_id == object_id),\n cls.relationship_type_id == relationship_type_id)\n return db.session.query(cls).filter(filter_clause).first()", "def test_filter_relationships_by_concept_type__object(self):\n # Set up mocks and test data\n concept_type = 'birds'\n mock_match_0 = Mock(name='mock_match_0',\n object=Mock(name='mock_subject_0',\n concept_name='mock_subject_0',\n concept_types=['birds', 'snakes']))\n\n mock_match_1 = Mock(name='mock_match_1',\n object=Mock(name='mock_subject_1',\n concept_name='mock_subject_1',\n concept_types=['snakes', 'turtles']))\n\n mock_match_2 = Mock(name='mock_match_2',\n object=Mock(name='mock_subject_2',\n concept_name='mock_subject_2',\n concept_types=['snakes', 'birds']))\n\n mock_matches = [mock_match_0, mock_match_1, mock_match_2]\n\n # Make call\n filtered_matches = FactQuery._filter_relationships_by_concept_type(\n mock_matches, concept_type, relationship_attr='object')\n\n # Verify results\n self.assertEqual([mock_match_0, mock_match_2], filtered_matches)", "def test_filter_relationships_by_concept_type__subject(self):\n # Set up mocks and test data\n concept_type = 'birds'\n mock_match_0 = Mock(name='mock_match_0',\n subject=Mock(name='mock_subject_0',\n concept_name='mock_subject_0',\n concept_types=['birds', 'snakes']))\n\n mock_match_1 = Mock(name='mock_match_1',\n subject=Mock(name='mock_subject_1',\n concept_name='mock_subject_1',\n concept_types=['snakes', 'turtles']))\n\n mock_match_2 = Mock(name='mock_match_2',\n subject=Mock(name='mock_subject_2',\n concept_name='mock_subject_2',\n concept_types=['snakes', 'birds']))\n\n mock_matches = [mock_match_0, mock_match_1, mock_match_2]\n\n # Make call\n filtered_matches = FactQuery._filter_relationships_by_concept_type(\n mock_matches, concept_type, relationship_attr='subject')\n\n # Verify results\n self.assertEqual([mock_match_0, mock_match_2], filtered_matches)", "def get_relationships_by_record_type(self, relationship_record_type):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resources_by_record_type\n # STILL NEED TO IMPLEMENT!!!\n return objects.RelationshipList([])", "def filter_for_term_relationships(src, relationship_type, object_id, target=True):\n filters = [\n Filter(\"type\", \"=\", \"relationship\"),\n Filter(\"relationship_type\", \"=\", relationship_type),\n ]\n if target:\n filters.append(Filter(\"target_ref\", \"=\", object_id))\n else:\n filters.append(Filter(\"source_ref\", \"=\", object_id))\n\n results = src.query(filters)\n return remove_deprecated(results)", "def get_relationships_by_genus_type(self, relationship_genus_type):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resources_by_genus_type\n # NOTE: This implementation currently ignores plenary view\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'genusTypeId': str(relationship_genus_type)},\n **self._view_filter())).sort('_id', DESCENDING)\n return objects.RelationshipList(result, runtime=self._runtime, proxy=self._proxy)", "def test_select_by_concept_type(self, select_relationships):\n select_relationships.return_value = [Mock(subject='hello'), Mock(subject='kitty')]\n mock_concept_type = Mock(name='concept_type')\n\n result = FactQuery._select_by_concept_type(mock_concept_type)\n self.assertEqual(['hello', 'kitty'], result)\n select_relationships.assert_called_once_with('is', object_name=mock_concept_type)", "def get_ids_related_to(cls, object_type, related_type, related_ids=[]):\n\n if isinstance(related_ids, (int, long)):\n related_ids = [related_ids]\n\n if not related_ids:\n return db.session.query(Relationship.source_id).filter(sql.false())\n\n destination_ids = db.session.query(Relationship.destination_id).filter(\n and_(\n Relationship.destination_type == object_type,\n Relationship.source_type == related_type,\n Relationship.source_id.in_(related_ids),\n )\n )\n source_ids = db.session.query(Relationship.source_id).filter(\n and_(\n Relationship.source_type == object_type,\n Relationship.destination_type == related_type,\n Relationship.destination_id.in_(related_ids),\n )\n )\n\n queries = [destination_ids, source_ids]\n queries.extend(cls.get_extension_mappings(\n object_type, related_type, related_ids))\n queries.extend(cls.get_special_mappings(\n object_type, related_type, related_ids))\n\n return cls._array_union(queries)", "def relationship_count(self, r_type=None, n_ids=()):\n if r_type is None and not n_ids:\n return len(self._relationships)\n elif not n_ids:\n return len(self._relationships_by_type.get(r_type, ()))\n else:\n return sum(1 for _ in self.relationships(r_type, n_ids))", "def by_type(self, type):\n return self.filter(related_type__title=type)", "def get_relationships_by_parent_genus_type(self, relationship_genus_type):\n # Implemented from template for\n # osid.resource.ResourceLookupSession.get_resources_by_parent_genus_type\n # STILL NEED TO IMPLEMENT!!!\n return objects.RelationshipList([])", "def test_select_by_concept_type__no_matches(self, select_relationships):\n select_relationships.return_value = []\n\n result = FactQuery._select_by_concept_type(Mock(name='concept_types'))\n self.assertEqual([], result)", "def _get_objects(self, object_type, **kwargs):\r\n return self.parent_connection._get_objects(object_type,\r\n sys_id=self.sys_id,\r\n **kwargs)", "def get_relationships_by_genus_type_for_destination(self, destination_id, relationship_genus_type):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_by_genus_type_for_destination\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'destinationId': str(destination_id),\n 'genusTypeId': str(relationship_genus_type)},\n **self._view_filter())).sort('_id', ASCENDING)\n return objects.RelationshipList(result, runtime=self._runtime)", "def _get_objects(self, object_type, **kwargs):\r\n\r\n return self.parent_connection._get_objects(object_type,\r\n sys_id=self.sys_id,\r\n jbod_id=self.jbod_id,\r\n **kwargs)", "def get_queryset(self):\n\n return Relationship.objects.filter(\n Q(from_person=self.request.user.person) |\n Q(to_person=self.request.user.person))", "def relationship_types(self):\n return frozenset(self._relationships_by_type.keys())", "def get_relationships_by_genus_type_for_peers(self, source_id, destination_id, relationship_genus_type):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_by_genus_type_for_peers\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'sourceId': str(source_id),\n 'destinationId': str(destination_id),\n 'genusTypeId': str(relationship_genus_type)},\n **self._view_filter())).sort('_id', ASCENDING)\n return objects.RelationshipList(result, runtime=self._runtime)", "def test_select_matching_relationships(self, select_by_values):\n # Set up mocks and test data\n select_by_values.return_value = ['one', 'two']\n test_relationship_type_name = 'eats'\n test_subject_name = 'otter'\n test_object_name = 'mussels'\n test_rel_number = 99\n \n # Make call\n matches = FactQuery._select_matching_relationships(test_relationship_type_name,\n subject_name=test_subject_name,\n object_name=test_object_name,\n relationship_number=test_rel_number)\n # Verify result\n self.assertEqual(['one', 'two'], matches)\n\n # Verify mocks\n select_by_values.assert_called_once_with(relationship_type_name=test_relationship_type_name,\n subject_name=test_subject_name,\n object_name=test_object_name,\n relationship_number=test_rel_number)", "def get_relationships_by_genus_type_for_source(self, source_id, relationship_genus_type):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_by_genus_type_for_source\n # NOTE: This implementation currently ignores plenary and effective views\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(\n dict({'sourceId': str(source_id),\n 'genusTypeId': str(relationship_genus_type)},\n **self._view_filter())).sort('_id', ASCENDING)\n return objects.RelationshipList(result, runtime=self._runtime)", "def search_stix21_objects(rel_list, object_name, rel_type='any') -> list:\n searched_rel_list = list()\n for relationship in rel_list:\n if relationship[3] == rel_type or rel_type == 'any':\n if relationship[0] == object_name and relationship[0] == relationship[2]:\n searched_rel_list.append(relationship)\n else:\n for position in range(len(relationship)):\n if relationship[position] == object_name:\n searched_rel_list.append(relationship)\n return searched_rel_list", "def _get_objects(self, object_type, **kwargs):\r\n\r\n return self.parent_connection._get_objects(object_type,\r\n sys_id=self.sys_id,\r\n brick_id=self.brick_id,\r\n **kwargs)", "def get_relationship_query(self):\n # Implemented from template for\n # osid.resource.ResourceQuerySession.get_resource_query_template\n return queries.RelationshipQuery(runtime=self._runtime)", "def test_which_reptiles__species_subject(self, select_relationships, concept_is_species, \n filter_by_concept_type):\n # Set up mocks and test data\n parsed_query = Mock(name='parsed_query',\n text='which reptiles eat bugs',\n subject_name='reptiles',\n object_name='bugs',\n relationship_type_name='eat',\n relationship_number=3,\n relationship_negation=False)\n fact_query = FactQuery(parsed_query=parsed_query)\n\n mock_match_1 = Mock(name='match_1',\n subject=Mock(concept_name='subject_1'))\n mock_match_2 = Mock(name='match_2',\n subject=Mock(concept_name='subject_2'))\n select_relationships.return_value = [mock_match_1, mock_match_2]\n concept_is_species.side_effect = [False, True]\n filter_by_concept_type.return_value = [mock_match_1, mock_match_2]\n\n # Make call\n results = fact_query._which_animal_query()\n\n # Verify results\n self.assertEqual(set(['subject_1', 'subject_2']), set(results))\n\n # Verify mocks\n select_relationships.assert_called_once_with(\n 'eat', object_name='bugs', relationship_number=3)\n\n call_args_list = concept_is_species.call_args_list\n self.assertEqual(2, len(call_args_list))\n self.assertEqual(call('bugs'), call_args_list[0])\n self.assertEqual(call('reptiles'), call_args_list[1])\n\n filter_by_concept_type.assert_called_once_with(\n [mock_match_1, mock_match_2], 'reptiles', relationship_attr='subject')", "def get_relationships_by_query(self, relationship_query):\n # Implemented from template for\n # osid.resource.ResourceQuerySession.get_resources_by_query\n and_list = list()\n or_list = list()\n for term in relationship_query._query_terms:\n if '$in' in relationship_query._query_terms[term] and '$nin' in relationship_query._query_terms[term]:\n and_list.append(\n {'$or': [{term: {'$in': relationship_query._query_terms[term]['$in']}},\n {term: {'$nin': relationship_query._query_terms[term]['$nin']}}]})\n else:\n and_list.append({term: relationship_query._query_terms[term]})\n for term in relationship_query._keyword_terms:\n or_list.append({term: relationship_query._keyword_terms[term]})\n if or_list:\n and_list.append({'$or': or_list})\n view_filter = self._view_filter()\n if view_filter:\n and_list.append(view_filter)\n if and_list:\n query_terms = {'$and': and_list}\n collection = JSONClientValidated('relationship',\n collection='Relationship',\n runtime=self._runtime)\n result = collection.find(query_terms).sort('_id', DESCENDING)\n else:\n result = []\n return objects.RelationshipList(result, runtime=self._runtime, proxy=self._proxy)", "def test_find_relation_types(self):\n pass", "def test_animal_attribute_query__species_subject(self, select_relationships, \n concept_is_species, filter_by_concept_type):\n # Set up mocks and test data\n mock_1 = Mock(name='mock_1')\n mock_2 = Mock(name='mock_2')\n mock_3 = Mock(name='mock_3')\n select_relationships.side_effect = [[], [mock_1, mock_2, mock_3]]\n concept_is_species.return_value = True\n filter_by_concept_type.return_value = [mock_1, mock_2]\n\n parsed_query = Mock(name='parsed_query',\n text='do birds have wings',\n subject_name='birds',\n object_name='wings',\n relationship_type_name='have',\n relationship_number=2)\n fact_query = FactQuery(parsed_query=parsed_query)\n\n # Make call\n result = fact_query._animal_attribute_query()\n \n # Verify results\n self.assertEqual('yes', result)\n\n # Verify mocks\n call_args_list = select_relationships.call_args_list\n self.assertEqual(2, len(call_args_list))\n expected_calls = [\n call('have', subject_name='birds', object_name='wings', relationship_number=2),\n call('have', object_name='wings', relationship_number=2)]\n self.assertEqual(expected_calls, call_args_list)", "def relation( self, obj, relType ):\n raise NotImplementedError(\"relation\")", "def relationships(self, r_type=None, n_ids=()):\n if r_type is None:\n r_sets = []\n else:\n r_sets = [self._relationships_by_type.get(r_type, frozenset())]\n if not n_ids or (hasattr(n_ids, \"__iter__\") and all(n_id is None for n_id in n_ids)):\n pass\n elif isinstance(n_ids, Sequence):\n for n_index, n_id in enumerate_nodes(n_ids):\n if n_id is not None:\n r_sets.append({r_id for r_id, i in self._relationships_by_node.get(n_id, ())\n if i == n_index})\n elif isinstance(n_ids, Set):\n for n_id in n_ids:\n if n_id is not None:\n r_sets.append({r_id for r_id, i in self._relationships_by_node.get(n_id, ())})\n else:\n raise TypeError(\"Nodes must be supplied as a Sequence or a Set\")\n if r_sets:\n return iter(reduce(and_operator, r_sets))\n else:\n return iter(self._relationships)", "def test_get_relationship_templates(self):\n pass" ]
[ "0.6642475", "0.6234097", "0.61171645", "0.5905707", "0.573527", "0.5697212", "0.558768", "0.55415577", "0.5488156", "0.5187914", "0.5047478", "0.50362355", "0.5031117", "0.5009696", "0.5007061", "0.4995968", "0.4954581", "0.49293298", "0.49109417", "0.48943478", "0.4888402", "0.48587754", "0.484546", "0.48172846", "0.48087224", "0.48063368", "0.47886306", "0.47740555", "0.47653723", "0.47555318" ]
0.7105433
0
Validate requests decorator with Cerberus
def validate_request_cerberus(schema): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): body_json = request.get_json() current_app.logger.info(body_json) v = Validator(schema, require_all=True) v.allow_unknown = True # TODO: allow request params other then the ones defined on the schema level if not v.validate(body_json): valid_params_list = ', '.join(schema.keys()) return response_fail(f"You must call with all request params: {valid_params_list}") return func(*args, **kwargs) return wrapper return decorator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def valid_request(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except BadRequest as e:\n raise InvalidRequest(description='request parameters, queries or body format are invalid.',\n code=e.code, message=e.data.get('message'))\n\n return wrapper", "def validate_twilio_request(func):\n @wraps(func)\n def decorated_function(request, *args, **kwargs):\n # Create an instance of the RequestValidator class\n validator = RequestValidator(os.environ.get('TWILIO_AUTH_TOKEN'))\n\n # Validate the request using its URL, POST data,\n # and X-TWILIO-SIGNATURE header\n request_valid = validator.validate(\n request.build_absolute_uri(),\n request.POST,\n request.META.get('HTTP_X_TWILIO_SIGNATURE', ''))\n\n # Continue processing the request if it's valid, return a 403 error if\n # it's not\n if request_valid:\n return func(request, *args, **kwargs)\n else:\n return HttpResponseForbidden()\n return decorated_function", "def check_request(request_schema):\n def decorator(f):\n @functools.wraps(f)\n def wrapper(self, addr, request):\n data, err = request_schema.load(request)\n if err:\n return Header.ERROR, Error.WRONG_REQUEST\n else:\n return f(self, addr, data)\n\n return wrapper\n return decorator", "def validate_request(f):\n\n @wraps(f)\n def wrap(self, **kwargs):\n\n data = {}\n is_error, errmsg, req = DomainConstraintView._get_req_data(kwargs)\n if is_error:\n return errmsg\n\n try:\n for key in req:\n if key == 'convalidated':\n data[key] = True if (req[key] == 'true' or req[key] is\n True) else False\n else:\n data[key] = req[key]\n\n except Exception as e:\n return internal_server_error(errormsg=str(e))\n\n self.request = data\n return f(self, **kwargs)\n\n return wrap", "def check_request(views_func):\n @wraps(views_func)\n def wrapper(*args, **kwargs):\n try:\n return views_func(*args, **kwargs)\n except (KeyError, ValueError) as ex:\n return HttpResponseBadRequest(str(ex))\n return wrapper", "def check_csrf_token(func):\n def new_fn(self, req):\n if 'csrf_token' not in req.params:\n return exc.HTTPForbidden(\"You must provide a CSRF token\")\n\n csrf_token = req.params['csrf_token']\n if not security.valid_csrf_token(csrf_secret, csrf_token):\n return exc.HTTPForbidden(\"Invalid CSRF token\")\n\n return func(self, req)\n\n new_fn.exposed = True\n return new_fn", "def validate_json(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n\n try:\n request.json\n except:\n return bad_request()\n\n return func(*args, **kwargs)\n\n return wrapper", "def request_is_valid(request):\n return 'method' in request", "def validator(data):\n\n request_validator = cerberus.Validator(SCHEMA)\n if request_validator.validate(data):\n return True\n else:\n return request_validator.errors", "def validate_schema(schema):\n def decorator(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n\n try:\n validate(request.json, schema)\n except:\n return bad_request()\n\n return func(*args, **kwargs)\n\n return wrapper\n\n return decorator", "def validate(**vkargs):\r\n depr('Use route wildcard filters instead.')\r\n def decorator(func):\r\n @functools.wraps(func)\r\n def wrapper(*args, **kargs):\r\n for key, value in vkargs.iteritems():\r\n if key not in kargs:\r\n abort(403, 'Missing parameter: %s' % key)\r\n try:\r\n kargs[key] = value(kargs[key])\r\n except ValueError:\r\n abort(403, 'Wrong parameter format for: %s' % key)\r\n return func(*args, **kargs)\r\n return wrapper\r\n return decorator", "def checkIsValid(f):\n\n @wraps(f)\n def wrapper(self, *args, **kwargs):\n if self.validator.isValid:\n return f(self, *args, **kwargs)\n else:\n error = self.validator._exceptionClass('Called: {} method before data validated'.format(f.__name__))\n self.validator._errors[f.__name__] = error\n if self.validator._errorHandler is not None:\n self.validator._errorHandler(error, self.getValidationContext())\n return\n\n return wrapper", "def validate(schema):\n def decorator(func):\n def wrapper(self, req, resp, *args, **kwargs):\n try:\n raw_json = req.stream.read()\n obj = json.loads(raw_json.decode('utf-8'))\n obj['req_id'] = req.context.get('request_id')\n except Exception:\n raise falcon.HTTPBadRequest(\n title='Invalid data',\n description='Could not properly parse the provided data as JSON',\n code='001'\n )\n\n try:\n jsonschema.validate(obj, schema)\n except jsonschema.ValidationError as e:\n raise falcon.HTTPBadRequest(\n title='Failed data validation',\n description=e.message,\n code='002'\n )\n\n return func(self, req, resp, *args, parsed=obj, **kwargs)\n return wrapper\n return decorator", "def post_required(func):\n def post_wrapper(request,*args,**kwds):\n res = http.ResponseBuilder()\n if request.method != 'POST':\n return res.error(\"post is required\").build_json()\n return func(request,*args,**kwds)\n return post_wrapper", "def schema_validation(schema):\n def decorator(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n data = {}\n if request.method in ['POST', 'PATCH', 'PUT']:\n data = request.get_json(force=True)\n elif request.method in ['GET', 'DELETE']:\n data = request.args.to_dict()\n\n v = Validator(schema)\n v.allow_unknown = True\n if v.validate(data):\n return function(*args, **kwargs)\n else:\n return jsonify({'errors': v.errors}), 400\n\n return wrapper\n return decorator", "def do_validate(self, request, _object):\n\n pass", "def check(self):\n invalid = []\n\n if not self.route:\n invalid.append(('route', 'missing'))\n elif not self.route[1] in ['GET', 'POST', 'PUT']:\n invalid.append(('route', 'invalid method: %s' % self.route[1]))\n\n has_2xx = False\n for rcode in self.return_codes:\n code = rcode[0]\n if code >= 200 and code < 300:\n has_2xx = True\n break\n if not has_2xx:\n invalid.append(('return_codes', 'Missing succes return code doc'))\n\n if self.client_auth is None:\n invalid.append(\n ('client_auth', 'Please provide client auth requirement'))\n\n if self.user_auth is None:\n invalid.append(\n ('user_auth', 'Please provide user auth requirement'))\n\n if invalid:\n msgs = []\n for error in invalid:\n msgs.append(\"%s: %s\" % error)\n raise ValueError(\n \"APIFunc for %s is invalid: %s\"\n % (self.viewfunc.__name__,\n ', '.join(msgs)))", "def user_required(f):\n def decorator(*args, **kwargs):\n if \"user\" not in g:\n abort(401)\n return f(*args, **kwargs)\n return decorator", "def check_honeypot(func=None, field_name=None):\n def inner(request, *args, **kwargs):\n response = verify_honeypot_value(request, field_name)\n if response:\n return response\n else:\n return func(request, *args, **kwargs)\n inner = wraps(func)(inner)\n\n if func is None:\n def decorator(func):\n return inner\n return decorator\n return inner", "def require_arguments(required):\n\n def decorator(func):\n def wrapper(request):\n request_params = get_dict_from_request(request)\n for param in required:\n if param not in request_params:\n return APIMissingArgumentResponse(error_msg=param)\n return func(request)\n\n return wrapper\n\n return decorator", "def token_required(func):\n def func_wrapper(self, *args, **kwargs):\n auth_token = self.request.headers.get('X-Auth-Token',\n self.request.get('token', ''))\n namespace = self.request.route_kwargs.get('namespace', '')\n try:\n token = base64.urlsafe_b64decode(str(auth_token))\n except TypeError:\n self.abort(412, 'Please update your token')\n try:\n token = auth_models.AuthToken.query(\n auth_models.AuthToken.token == token\n ).get()\n except datastore_errors.BadValueError:\n self.abort(401, 'Incorrect token')\n try:\n payload = jwt.decode(token.token, config.JWT_SECRET,\n algorithms=config.JWT_HASH_ALGORITHM)\n except (jwt.DecodeError, AttributeError):\n return self.abort(401)\n if payload['namespace'] != namespace:\n return self.abort(412, 'Token payload is incorrect.')\n return func(self, *args, **kwargs)\n return func_wrapper", "def validate_user_data(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n user_data = request.get_json()\n if not user_data:\n return {\"message\": \"bad request\"}, 400\n elif \"first_name\" not in user_data or \"last_name\" not in user_data \\\n or \"email\" not in user_data or \"password\" not in user_data:\n return {\"message\": \"bad request, give the required data\"}, 400\n elif user_data[\"first_name\"] == \"\" or user_data[\"last_name\"] == \"\" \\\n or user_data[\"email\"] == \"\" or user_data[\"password\"] == \"\":\n return {\"message\": \"bad request , enter all the required data\"}, 400\n elif user_data[\"first_name\"] == \" \" or user_data[\"last_name\"] == \" \" \\\n or user_data[\"email\"] == \" \" or user_data[\"password\"] == \" \":\n return {\"message\": \"bad request , enter all the required data\"}, 400\n elif \"@\" not in user_data[\"email\"] or \".\" not in user_data[\"email\"]:\n return {\"message\": \"invalid email provided\"}, 400\n return func(*args, **kwargs)\n return wrapper", "def __validate():\n # TODO: implement", "def requires_post(func):\n def decorator(request, *args, **kwargs):\n if DEBUG or request.method == 'POST':\n return func(request, *args, **kwargs)\n return HttpResponseNotAllowed(['POST'])\n return decorator", "def check_input(inputs: Optional[InputType] = None, **kwargs) -> None:\n\n if inputs is None:\n # empty inputs is considered as valid\n return\n\n if hasattr(inputs, '__call__'):\n # it is a function\n inputs = inputs()\n\n kwargs['data'] = inputs\n kwargs['exec_endpoint'] = '/'\n\n if inspect.isasyncgenfunction(inputs) or inspect.isasyncgen(inputs):\n raise ValidationError(\n 'checking the validity of an async generator is not implemented yet'\n )\n\n try:\n from ..request import request_generator\n\n r = next(request_generator(**kwargs))\n if not isinstance(r, Request):\n raise TypeError(f'{typename(r)} is not a valid Request')\n except Exception as ex:\n default_logger.error(f'inputs is not valid!')\n raise BadClientInput from ex", "def validate_request(response):\n openapi_spec = get_openapi_spec()\n\n request = TornadoOpenAPIRequest(response.request, openapi_spec)\n if V30RequestValidator:\n result = V30RequestValidator(openapi_spec).validate(request)\n else:\n result = openapi_request_validator.validate(openapi_spec, request)\n result.raise_for_errors()\n\n response = TornadoOpenAPIResponse(response)\n if V30ResponseValidator:\n result2 = V30ResponseValidator(openapi_spec).validate(request, response)\n else:\n result2 = openapi_response_validator.validate(openapi_spec, request, response)\n result2.raise_for_errors()", "def validate():", "def request_fields(*req_args):\n\tdef decorator(f):\n\t\t@wraps(f)\n\t\tdef decorated(*args, **kwargs):\n\t\t\tif not g.req: return json_response(dict(description='JSON object must be passed as HTTP body with this request'), 422)\n\t\t\tmissing = []\n\t\t\tfor arg in req_args:\n\t\t\t\tif not g.req.has_key(arg): missing.append(arg)\n\t\t\tif missing: return json_response(dict(description='Mandatory request fields missing', missing_fields=missing), 422)\n\t\t\treturn f(*args, **kwargs)\n\t\treturn decorated\n\treturn decorator", "def http_var_required(parameter_name):\n def wrap(func):\n def decorator(request, *args, **kwargs):\n if not (parameter_name in request.POST or parameter_name in request.GET):\n return HttpResponseBadRequest('Please define GET or POST parameter '+parameter_name)\n return func(request, *args, **kwargs)\n return decorator\n return wrap", "def valid(self, *args, **kwargs) -> Any:\n pass" ]
[ "0.68247503", "0.6648503", "0.6610281", "0.65511626", "0.64839965", "0.5961608", "0.59245205", "0.5921186", "0.589062", "0.5860312", "0.5833157", "0.5833084", "0.5802698", "0.57723325", "0.5756725", "0.5740091", "0.57035977", "0.5687878", "0.5684185", "0.5676626", "0.5673376", "0.5663634", "0.56463146", "0.5627601", "0.56071466", "0.5602063", "0.5601931", "0.55951035", "0.5592164", "0.55883795" ]
0.70384467
0
Plots the graph. If the nodes have a position, the nodes will be placed there. Otherwise, they will be placed in a random but elegant manner.
def plot_graph(self) -> None:
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_graph(self) -> None:\n\n nodes_on_graph = self.dw_graph.get_all_v()\n for k, v in nodes_on_graph.items():\n if v.position is None:\n x_rand = random.uniform(0.5, self.dw_graph.v_size())\n y_rand = random.uniform(0.5, self.dw_graph.v_size())\n v.position = (x_rand, y_rand)\n x_vals = []\n y_vals = []\n n = list(nodes_on_graph.keys())\n for k, v in nodes_on_graph.items(): # draw nodes\n x_vals.append(v.position[0])\n y_vals.append(v.position[1])\n\n fig, ax = plt.subplots()\n plt.plot(x_vals, y_vals, 'ro', markersize=5, data=\"d\")\n\n for p, txt in enumerate(n):\n ax.annotate(n[p], (x_vals[p]+0.00003, y_vals[p]), color='g')\n\n for n in nodes_on_graph:\n n1 = self.dw_graph.get_nodes(n)\n x = n1.position[0]\n y = n1.position[1]\n for r in self.dw_graph.all_out_edges_of_node(n):\n dx = self.dw_graph.get_nodes(r).position[0]\n dy = self.dw_graph.get_nodes(r).position[1]\n ax.quiver(x, y, dx-x, dy-y, angles='xy', scale_units='xy', scale=1)\n #plt.arrow(x, y, dx - x, dy - y, head_width=0.0009, width=0.00005, length_includes_head=True)\n\n\n plt.xlabel(\"x axis \")\n plt.ylabel(\"y axis \")\n plt.title(\"The title of the graph\")\n plt.show()", "def plot_nodes(self, node_list):\n points = Marker()\n #visualizations points and lines..\n points.header.frame_id = \"map\"\n points.header.stamp = rospy.get_rostime()\n points.ns = \"markers\"\n points.id = 0\n points.type = points.POINTS\n points.action = points.ADD\n points.pose.orientation.w = 1.0\n points.scale.x = 2*self.rviz_tuning_plt\n points.scale.y = 2*self.rviz_tuning_plt\n points.color.r = 0.0\n points.color.g = 1.0\n points.color.b = 0.0\n points.color.a = 1.0\n points.lifetime = rospy.Duration()\n\n for node in node_list:\n p1 = Point()\n p1.x = node.x\n p1.y = node.y\n p1.z = 0.01\n points.points.append(p1)\n \n self.pub_nodes.publish(points)", "def draw_nodes(self):\n pass", "def plot_graph(self):\n g = self.get_graph()\n plt.title(\"Our graph:\" + g.__str__())\n plt.xlabel(\"X\")\n plt.ylabel(\"-<\") # I should flip 'Y' letter so I decided to write it by a tricky way. :)\n for src, node in g.get_all_v().items():\n # Print the node point\n if node.location is None:\n pos = self.get_random_location() # get a elegant location\n node.location = GeoLocation(pos)\n plt.plot(node.location.x, node.location.y, marker='o', markerfacecolor='red', markersize=3, color='yellow')\n plt.text(node.location.x, node.location.y, str(node.key))\n # Print the edge line\n for dest in g.all_out_edges_of_node(src).keys():\n x1 = g.get_all_v()[src].location.x\n y1 = g.get_all_v()[src].location.y\n if g.get_all_v()[dest].location is None:\n pos = self.get_random_location()\n g.get_all_v()[dest].location = GeoLocation(pos)\n g.get_all_v()[dest].location = GeoLocation(pos)\n x2 = g.get_all_v()[dest].location.x\n y2 = g.get_all_v()[dest].location.y\n plt.arrow(x1, y1, x2 - x1, y2 - y1, width=0.00001, linewidth=0.05)\n plt.show()", "def plot_nodes(self,x_shift,y_shift):\n\n if not self.nodes: return # Bounce if option not selected\n\n self.ax.scatter(self.node_crds[:,0]+x_shift*self.pbc[0],self.node_crds[:,1]+y_shift*self.pbc[1],\n marker=\"o\",s=self.ms,c=self.mc,zorder=1)\n\n # for i,c in enumerate(self.node_crds):\n # self.ax.text(c[0],c[1],i,size=8)", "def plot(self):\n layout = self.graph.layout(\"kk\")\n bbox = igraph.BoundingBox(600, 600)\n figure = igraph.Plot(bbox=bbox, background=\"white\")\n bbox = bbox.contract(100)\n figure.add(self.graph, layout = layout, bbox=bbox)\n figure.show()", "def plot_graph(self):\n plt.axis(\"off\")\n pos = nx.kamada_kawai_layout(self.graph)\n return nx.draw_networkx(self.graph, pos=pos, node_size=400)", "def draw(self):\n\t\tnx_graph = self.parse_graph()\n\t\tpos = nx.spring_layout(nx_graph, k=0.15, iterations=20) # to spread out the nodes\n\n\t\tnx.draw(nx_graph, pos, edge_color=\"black\", width=1, linewidths=1, node_size=500, node_color=\"pink\", alpha=0.9, with_labels=True)\n\n\t\tedge_labels = {(edge[0], edge[1]):edge[2] for edge in self.edges}\n\t\tnx.draw_networkx_edge_labels(nx_graph, pos, edge_labels=edge_labels, font_color='red')\n\n\t\tplt.show()", "def draw_graph(graph, node_positions):\n nx.draw_networkx_nodes(graph, node_positions, node_color=set_colors(graph),\n node_size=50)\n nx.draw_networkx_edges(graph, node_positions, width=0.3, alpha=0.5)", "def plot_graph(self, input_graph, NX_GRAPHS):\n self.dgl_graph = input_graph\n self.NX_GRAPHS = NX_GRAPHS\n \n self.get_nodes()\n color_monomer = self.get_colors()\n \n print(dict(zip(range(len(self.nodes_list)), self.nodes_list)))\n print('Key Monomer is', self.nodes_list[np.argmax(self.node_weights)])\n \n fig, ax = plt.subplots()\n nx.draw_networkx(\n dgl.to_networkx(self.dgl_graph),\n arrows=False,\n node_size = 300*10**self.node_weights,\n node_color = [color_monomer[node] for node in self.nodes_list],\n font_size = 18,\n font_color = 'w',\n font_weight = 'bold',)\n\n plt.axis('off')\n ax.set_xlim([1.2*x for x in ax.get_xlim()])\n ax.set_ylim([1.2*y for y in ax.get_ylim()])\n plt.show()", "def paint(self):\n x = []\n y = []\n plt.figure(figsize=(10, 5), facecolor=\"silver\")\n ax = plt.axes()\n for node in self.graph.nodes.values():\n x.append(node.get_pos()[0])\n y.append(node.get_pos()[1])\n ax.scatter(x, y, color=\"black\", s=50)\n xl = ax.get_xlim()[1] - ax.get_xlim()[0]\n yl = ax.get_ylim()[1] - ax.get_ylim()[0]\n for nd in self.graph.nodes.values():\n for ed in self.graph.all_out_edges_of_node(Node.get_key(nd)).keys():\n desti: Node = self.graph.get_node(ed)\n destx = desti.get_pos()[0] - nd.get_pos()[0]\n desty = desti.get_pos()[1] - nd.get_pos()[1]\n ax.arrow(nd.get_pos()[0], nd.get_pos()[1], destx, desty, head_width=xl * 0.007,\n length_includes_head=True,\n head_length=yl * 0.02, width=xl * 0.0001 * yl, color='grey')\n plt.title(\"Your graph!\")\n plt.show()", "def draw_graph(self):\r\n G=nx.Graph()\r\n \r\n list_location1 = []\r\n list_location2 = []\r\n list_location3 = []\r\n list_location4 = []\r\n \r\n for citizen in self.citizens:\r\n G.add_node(citizen.id)\r\n if citizen.location == 1:\r\n list_location1.append(citizen.id)\r\n elif citizen.location == 2:\r\n list_location2.append(citizen.id)\r\n elif citizen.location == 3:\r\n list_location3.append(citizen.id)\r\n else: \r\n list_location4.append(citizen.id)\r\n\r\n for citizen in self.citizens:\r\n for friend in citizen.friends:\r\n G.add_edge(citizen.id,friend.id)\r\n\r\n pos = nx.random_layout(G)\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location1, node_color='r')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location2, node_color='g')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location3, node_color='b')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location4, node_color='y')\r\n nx.draw_networkx_edges(G,pos, width=1)\r\n\r\n plt.show()", "def plot_graph(self, graph, subplot=False, axes=None):\n if subplot:\n plt.sca(axes[1, 1])\n axes[1, 1].axis('off')\n else:\n plt.figure(figsize=(5, 5))\n if len(graph.nodes) == 4:\n pos = {(0, 0): [0, 1], (0, 1): [1, 1], (1, 0): [0, 0], (1, 1): [1, 0]}\n else:\n pos = nx.circular_layout(graph)\n nx.draw_networkx_nodes(\n graph, pos, node_size=1800, node_color='w', edgecolors='k')\n nx.draw_networkx_edges(\n graph,\n pos,\n node_size=1800,\n edge_color='k',\n arrowstyle='->',\n arrowsize=10,\n width=3)\n nx.draw_networkx_labels(self.G, pos, {x: x for x in self.V}, font_size=14)", "def plot_network(genome):\n g = genome.n\n # width = g.graph[\"size\"]\n # height = g.graph[\"size\"]\n\n # fig = plt.figure(figsize=(width,height))\n fig = plt.figure()\n fig.patch.set_facecolor('white')\n ax = fig.add_subplot(111, aspect='equal')\n # ax.set_axis_off()\n\n # collision_coords = find_collisions(genome)\n # das_coords = find_das_extended(genome)\n # slp_coords = find_slp(genome)\n slp_nodes = find_attacker_path(genome.n)\n\n # Plot the parent-child tree\n for n in g.nodes_iter():\n if g.node[n][\"parent\"] is not None:\n _line(g.node[n][\"coord\"], g.node[g.node[n][\"parent\"]][\"coord\"], zorder=0, color='k')\n\n for n in g.nodes_iter():\n coord = g.node[n][\"coord\"]\n shape = _circles\n colour = 'b'\n s = 0.4\n if n in slp_nodes:\n shape = _hexagons\n colour = 'y'\n s = 0.45\n if n == g.graph[\"source\"]:\n shape = _squares\n colour = 'g'\n if n == g.graph[\"sink\"]:\n shape = _octogons\n colour = 'k'\n s = 0.45\n shape(coord[0], coord[1], s, fc=\"white\", ec=colour)\n if(len(str(g.node[n][\"slot\"])) == 1):\n ax.text(coord[0]-0.15, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 2):\n ax.text(coord[0]-0.25, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 3):\n ax.text(coord[0]-0.4, coord[1]+0.15, str(g.node[n][\"slot\"]))\n else:\n ax.text(coord[0]-0.5, coord[1]+0.15, str(g.node[n][\"slot\"]))\n\n\n plt.gca().invert_yaxis()\n fig.show()", "def plotGraph(self, title = \"Multi Layer Perceptron (MLP)\"):\n graph, pos, colorMap = self.getGraph()\n\n fig = plt.figure()\n fig.canvas.set_window_title(\"Neural Network\")\n plt.plot()\n nx.draw_networkx_nodes(graph,pos, node_color = colorMap)\n nx.draw_networkx_edges(graph,pos)\n plt.axis('off')\n plt.title(title)\n #plt.savefig(\"autoencoder.svg\", transparent = True)\n plt.show()", "def plot_graph():\n name = request.args.get('instance')\n name = str(name)\n distance = request.args.get('distance')\n path = request.args.get('path')\n if name == 'Custom':\n coords = request.args.get('coords')\n coords = str(coords)\n nodes = custom_nodes(coords)\n else:\n nodes = create_nodes(name)\n fig = Figure()\n axis = fig.add_subplot(1, 1, 1)\n\n axis.set_title(name + \" - Distance: \"+ str(distance))\n path = str(path).split(',')\n path = [int(i) for i in path]\n for i in range(len(path) - 1):\n\n start_node = nodes[path[i]]\n x1, y1 = start_node.x, start_node.y\n axis.scatter(x1, y1, c = 'b', label = str(path[i]))\n axis.text(x1,y1, str(path[i]))\n end_node = nodes[path[i+1]]\n x2, y2 = end_node.x, end_node.y\n axis.plot([x1,x2], [y1, y2])\n\n last_node = nodes[path[len(path)-1]]\n x1, y1 = last_node.x, last_node.y\n axis.text(x1,y1, str(path[len(path)-1]))\n\n begin_node = nodes[path[0]]\n x2, y2 = begin_node.x, begin_node.y\n axis.scatter(x1, y1, c = 'b', label = str(path[len(path)-1]))\n axis.plot([x1,x2], [y1, y2])\n\n output = io.BytesIO()\n FigureCanvas(fig).print_png(output)\n return Response(output.getvalue(), mimetype=\"image/png\")", "def plot_nodes(self,ax=None,mask=None,values=None,sizes=20,labeler=None,clip=None,\n **kwargs):\n ax=ax or plt.gca()\n \n if mask is None:\n mask=~self.nodes['deleted']\n\n if clip is not None: # convert clip to mask\n mask=mask & self.node_clip_mask(clip)\n\n if values is not None:\n values=values[mask]\n kwargs['c']=values\n\n if labeler is not None:\n if labeler=='id':\n labeler=lambda n,rec: str(n)\n \n # weirdness to account for mask being indices vs. bitmask\n for n in np.arange(self.Nnodes())[mask]: # np.nonzero(mask)[0]:\n ax.text(self.nodes['x'][n,0],\n self.nodes['x'][n,1],\n labeler(n,self.nodes[n]))\n\n coll=ax.scatter(self.nodes['x'][mask][:,0],\n self.nodes['x'][mask][:,1],\n sizes,\n **kwargs)\n request_square(ax)\n return coll", "def draw_points():\n\n for node in self._nodes:\n\n x = node_properties[\"node_x\"][node]\n y = node_properties[\"node_y\"][node]\n ax.scatter(\n x,\n y,\n zorder=10,\n edgecolors=\"k\",\n linewidths=0.5,\n **self.get_node_data(node),\n )\n\n for label in self._nodes:\n\n x = node_properties[\"label_x\"][label]\n y = node_properties[\"label_y\"][label]\n rotation = node_properties[\"rotation\"][label]\n ha = node_properties[\"ha\"][label]\n\n attr = {**dict(backgroundcolor=\"white\"), **text_attr}\n ax.text(\n x,\n y,\n textwrap.shorten(text=label, width=TEXTLEN),\n rotation=rotation,\n ha=ha,\n va=\"center\",\n rotation_mode=\"anchor\",\n bbox=dict(\n facecolor=\"w\",\n alpha=1.0,\n edgecolor=\"gray\",\n boxstyle=\"round,pad=0.5\",\n ),\n zorder=11,\n **attr,\n )", "def create_graph(self):\n robot_pix = int(math.ceil(self.robot.size / self.resolution))\n ii = 0\n jj = 0\n for i in range(0, self.height, robot_pix):\n jj = 0\n for j in range(0, self.width, robot_pix):\n block = self.occ_grid[i:i+robot_pix, j:j+robot_pix].flatten()\n avg = np.mean(block)\n robot_block = self.tesselation_image[i:i+robot_pix, j:j+robot_pix].flatten()\n n_occur = np.bincount(robot_block)\n block_id = np.argmax(n_occur)\n \n p = Pose()\n p.position.x = self.resolution * j + self.resolution / 2.0 + self.origin.position.x\n p.position.y = self.height * self.resolution - (self.resolution * i + self.resolution / 2.0) + self.origin.position.y\n node = Node(ii, jj, p)\n idx = np.where(block > 20)\n if block_id == self.robot.robot_id:\n if 0 <= avg <= 20:\n print(\"Node in path\", node)\n node.valid = True\n else:\n node.valid = False\n elif block_id == 0:\n node.valid = False\n else:\n node.belongs = False\n self.nodes[ii,jj] = node\n jj += 1\n ii += 1\n\n\n height, width = self.nodes.shape\n print(\"Node shape: \", self.nodes.shape)\n for i in range(height):\n for j in range(width):\n min_i = max(0, i-1)\n max_i = min(height - 1, i+1) + 1\n min_j = max(0, j-1)\n max_j = min(width - 1, j+1) + 1\n\n node = self.nodes[i,j]\n neighbors = self.nodes[min_i:max_i, min_j:max_j].flatten()\n for n in neighbors:\n if not n or not node:\n print(\"None %d-%d\"%(i,j))\n continue\n if n != node:\n if n.valid:\n print(\"Neighbor appended\")\n self.nodes[i,j].neighbors.append(n)\n else:\n self.nodes[i,j].obstacle_neighbors.append(n)\n print(\"Graph is created!\")", "def plot_nodes(self, filename, **kwargs):\n\n g = graph.create_nx_graph(self.es, filename=filename, **kwargs)\n\n return g", "def fullgraphplot(time_lower,time_upper):\n\n edges_list,node_list,title_list = graphformation(time_lower,time_upper)\n node_size = []\n for i in range(len(node_list)):\n node_size.append(5)\n g = Network(\n height=\"750px\",\n width=\"100%\",\n bgcolor=\"#222222\",\n font_color=\"white\")\n g.add_nodes(node_list,label=node_list,title=title_list, size= node_size)\n g.add_edges(edges_list)\n g.show(\"nx.html\")\n return", "def _draw_nodes(ax: mpl.axes.Subplot, graph: nx.classes.Graph,\n pos: dict, draw_labels: False) -> dict:\n degree = np.array([deg for node, deg in graph.degree], dtype=float)\n degree /= degree.sum()\n\n flare_kwargs = {'alpha' : 0.2,\n 'edgecolor': (0, 0, 0, 1),\n 'facecolor': None}\n\n node_kwargs = {'alpha' : 0.8,\n 'edgecolor': (0, 0, 0, 1),\n 'facecolor': None}\n\n nodes = {}\n node_params = zip(pos.items())\n\n for i, (label, xy) in enumerate(pos.items()):\n size = graph.nodes[label]['size']\n fsize = graph.nodes[label]['fsize']\n flare_kwargs['facecolor'] = 'C{}'.format(i)\n flare = patches.Circle(xy, fsize, **flare_kwargs)\n\n node_kwargs['facecolor'] = 'C{}'.format(i)\n node = patches.Circle(xy, size, **node_kwargs)\n\n ax.add_patch(flare)\n ax.add_patch(node)\n if draw_labels:\n font_style = {'size':15, 'weight':'bold'}\n text_kwargs = {'color': (0, 0, 0, .8),\n 'verticalalignment': 'center',\n 'horizontalalignment': 'center',\n 'fontdict': font_style}\n ax.text(*xy, i+1, **text_kwargs)\n\n nodes[label] = node\n return nodes", "def display_graph(variables, relations):\n graph = as_networkx_graph(variables, relations)\n\n # Do not crash if matplotlib is not installed\n try:\n import matplotlib.pyplot as plt\n\n nx.draw_networkx(graph, with_labels=True)\n # nx.draw_random(graph)\n # nx.draw_circular(graph)\n # nx.draw_spectral(graph)\n plt.show()\n except ImportError:\n print(\"ERROR: cannot display graph, matplotlib is not installed\")", "def plot_graph(G):\r\n pos = nx.random_layout(G)\r\n nx.draw(G, pos)\r\n edge_labels = dict([((u, v, ), d['label']) for u, v, d in\r\n G.edges(data=True)])\r\n nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)\r\n nx.draw_networkx_labels(G, pos, labels={i:i for i in G.nodes()},\r\n font_size=16)\r\n plt.show()", "def drawGraph(G, novel_title):\n # Drawing with network x\n page_rank = nx.pagerank(G)\n \n pos = nx.nx_pydot.graphviz_layout(G)\n plt.figure(figsize=(15,10))\n\n font = {'fontsize' : 14}\n plt.title('Character Network for: ' + novel_title, font)\n \n label_pos = {}\n for i in pos:\n label_pos[i] = (pos[i][0] , pos[i][1] - (math.exp(page_rank[i]) * 12))\n \n labels = nx.draw_networkx_labels(G, label_pos, font_weight = 'bold', font_size = 9)\n nodes = nx.draw_networkx_nodes(G, pos, \n node_size = [2000 * page_rank[i] for i in list(nx.nodes(G))],\n node_color = range(len(nx.pagerank(G))),\n cmap = plt.cm.Spectral)\n \n nodes.set_edgecolor('black')\n \n nx.draw_networkx_edges(G, pos, edge_color = 'grey', alpha = .70)\n plt.axis('off')\n plt.savefig('test.png')\n plt.show()", "def show_plot(self):\r\n\t\tself.generate_plot()\r\n\t\tplt.show()", "def showGraph(self):\r\n self.graph_button['state'] = 'disabled'\r\n # Draw connection Graph\r\n self.axGraph.set_visible(True)\r\n nx.draw(self.G, ax=self.axGraph, with_labels=True)\r\n self.canvasPlot.draw()\r\n self.canvasPlot.flush_events()", "def visualize(G, color=None, figsize=(5, 5)):\n plt.figure(figsize=figsize)\n plt.xticks([])\n plt.yticks([])\n nx.draw_networkx(G,\n pos=nx.spring_layout(G, seed=42),\n with_labels=True,\n node_color=color,\n cmap=\"Set2\")\n plt.show();", "def plot_edges(self, node_list):\n tree = MarkerArray()\n id = 1\n for node in self.node_list:\n if node.parent:\n # edge between nodes\n path = Marker()\n path.header.frame_id = \"map\"\n path.header.stamp = rospy.get_rostime()\n path.ns = \"markers\"\n path.id = id\n id += 1\n path.type = path.LINE_STRIP\n path.action = path.ADD\n path.scale.x = self.rviz_tuning_plt\n path.color.a = 1.0\n\n path.color.r = 1.0\n path.color.g = 0.7\n path.color.b = 0.0\n\n path.lifetime = rospy.Duration()\n path.pose.orientation.w = 1.0\n\n p1 = Point()\n p1.x = node.parent.x\n p1.y = node.parent.y\n p1.z = 0.02\n path.points.append(p1)\n\n p2 = Point()\n p2.x = node.x\n p2.y = node.y\n p2.z = 0.02\n path.points.append(p2)\n \n tree.markers.append(path)\n\n self.pub_edges.publish(tree)", "def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])" ]
[ "0.7539952", "0.69758826", "0.6917775", "0.6841799", "0.6772024", "0.66144824", "0.66071594", "0.65934306", "0.65229076", "0.65064275", "0.6494702", "0.6480308", "0.6477845", "0.6361443", "0.63394064", "0.6281064", "0.62662697", "0.6260735", "0.6240371", "0.62216246", "0.6214042", "0.6182424", "0.6180613", "0.6179722", "0.6165587", "0.61483055", "0.61130697", "0.6102516", "0.6086002", "0.6077428" ]
0.7134664
1
Formats comparison as a strings
def format_comparison(objs): def formatter(comp): if not isinstance(comp, tuple): return str(comp) output = [] return "\n".join([comp.type] + [" "+errmessage for errmessage in output]) results = map(formatter,objs) return "\n".join(results) #obj1,obj2 = comp ### Sections #for i,s1,s2 in diffs: # if s1 and s2: # output.append(f"Section {i} does not match:") # result = compare_sections(s1,s2) # output.extend(almethods.linepadder(result)) # else: # if s1: # output.append(f"Door 2 missing Section {i}") # else: # output.append(f"Door 1 missing Section {i}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def comparison(self) -> str:\n return self._values.get('comparison')", "def generate_comparison_output_string(comparisons: List[Dict[str, Any]]) -> str:\n result_dict = generate_comparison_dict(comparisons)\n result_string = json.dumps(result_dict, sort_keys=True, indent=4)\n return result_string", "def format_condition(self, key, val1, val2):\n if val1 is not None and val2 is not None:\n condition = '{:.2f} < {:s} < {:.2f}'.format(val1, key, val2)\n elif val2 is None:\n condition = '{:s} == {:s}'.format(key, str(val1))\n return condition", "def comparison(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"comparison\")", "def for_comparison(self):\n\t\tif len(self.values) < 5:\n\t\t\treturn unicode(self)\n\t\telse:\n\t\t\treturn u'-'.join(self._string_values(increment=1))", "def __sub_comparison_ops(file_contents: str) -> str:\n\n return re.sub(r'(?:IS\\s+)?EQUALS?(?:\\s+TO)?', '=', file_contents)", "def _strHard(self):\n if self.checkGreaterThanThreshold:\n operator += \">\"\n else:\n operator += \"<\"\n return \"(Hard) %s %f Enabled: %s\" %(operator, self.threshold, str(self.enabled))", "def get_comparison(self, start, end):\n\n return 'https://{}/{}/{}/compare/{}...{}'.format(HOST_GITHUB, \\\n self.repo, self.product, start, end) + '\\n'", "def get_name(self):\n return str(self.comparison_type)", "def _repr_(self):\n s = 'An inequality '\n have_A = not self.A().is_zero()\n if have_A:\n s += repr(self.A()) + ' x '\n if self.b()>=0:\n if have_A:\n s += '+'\n else:\n s += '-'\n if have_A:\n s += ' '\n s += repr(abs(self.b())) + ' >= 0'\n return s", "def _repr_(self):\n s = 'An inequality '\n have_A = not self.A().is_zero()\n if have_A:\n s += repr(self.A()) + ' x '\n if self.b()>=0:\n if have_A:\n s += '+'\n else:\n s += '-'\n if have_A:\n s += ' '\n s += repr(abs(self.b())) + ' >= 0'\n return s", "def __str__(self):\n return \"{} != {} ({})\".format(self.var1.name,\n self.var2.name,\n self.satisfied())", "def __str__(self):\n r = []\n for item in sorted(self._data.keys()):\n correct, incorrect = self._data[item][True], self._data[item][False]\n acc = correct / (correct + incorrect)\n s = f\"{item:4} | Accuracy: {acc:.2f}% (diff {'+' if acc-item >=0 else ''}{acc-item:.2f}%) | correct: {correct:2}, incorrect: {incorrect:2}\" \n r.append(s)\n\n return \"\\n\".join(r)", "def _to_string(self):\n self.results.print_results()\n self.results.print_comparison()", "def __str__(self):\n\n return 'IF {0} THEN {1}'.format(', '.join([str(fv) for fv in self.fvals]),\n str(self.label))", "def print_test_comparison(test_name, expected, result):\n line = \"\\n\"\n line += \"-\" * 60 + \"\\n\"\n line += \"{}\\n\".format(test_name)\n line += \"-\" * 60 + \"\\n\"\n line += \"-\" * 26 + \"EXPECTED\" + \"-\" * 26 + \"\\n\"\n line += \"{}\\n\".format(expected)\n line += \"-\" * 28 + \"END\" + \"-\" * 29 + \"\\n\"\n line += \"-\" * 27 + \"RESULT\" + \"-\" * 27 + \"\\n\"\n line += \"{}\\n\".format(result)\n line += \"-\" * 28 + \"END\" + \"-\" * 29 + \"\\n\"\n line += \"\\n\"\n return line", "def __str__(self):\n\n return '{0} {1} {2}'.format(self.feat, '==' if self.pos else '!=',\n self.val)", "def comparison(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"comparison\")", "def comparison(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"comparison\")", "def _equals(cols, new_table, new_cols):\n return ', '.join('{}={}.{}'.format(cols[i], new_table, new_cols[i]) for i in range(len(cols)))", "def __repr__(self) -> str:\n result = \"Equal\" if self.got is None else \"Unequal\"\n return f\"<TestResultFile {self.test_id},{self.regression_test_id},{self.regression_test_output_id}: {result}>\"", "def print_comparison(node, comparisons, search_string):\n\n # dostane sa na dummy kluc\n if node is None:\n print(\"\\nPorovnanie\", str(comparisons + 1) + \":\",\n colored(\"\\n -- DUMMY kluc: \" + search_string + \" --\", \"green\", attrs=['bold']),\n \"\\n Hladany retazec:\", colored(search_string, \"green\", attrs=['bold']),\n \"\\n Zhoda:\", colored(True, \"green\", attrs=['bold']),\n \"\\n\\n*******************\")\n\n # medzivysledok\n else:\n color = \"green\" if node.value == search_string else \"red\"\n print(\"\\nPorovnanie\", str(comparisons) + \":\",\n \"\\n Aktualny kluc:\", colored(node.value, color, attrs=['bold']),\n \"\\n Hladany retazec:\", colored(search_string, color, attrs=['bold']),\n \"\\n Zhoda:\", colored(node.value == search_string, color, attrs=['bold']),\n \"\\n\\n*******************\")", "def format_errordict(self, errordict):\n errormsg = f'Comparison between {self.ad1.filename} and {self.ad2.filename}'\n for k, v in errordict.items():\n errormsg += f'\\nComparison failure in {k}'\n errormsg += '\\n' + ('-' * (22 + len(k))) + '\\n'\n errormsg += '\\n '.join(v)\n return errormsg", "def __str__(self):\n\t\treturn \"{min} ~ {max}\".format(min=str(self.min), max=str(self.max))", "def __str__(self):\n return '\\n'+'\\n'.join([\"%-15s: %s\" % (qq(w), str(v)) for w, v in sorted(self.value.items())]) + '\\0'", "def compare(self, other: Optional['PDFState']) -> str:\n ret_value = ''\n if (\n other is None or self.font_family != other.font_family or\n self.font_mode != other.font_mode or self.size != other.size\n ):\n ret_value += ' /{} {} Tf'.format(self.font.ref, round(self.size, 3))\n if other is None or self.color != other.color:\n ret_value += ' ' + str(self.color)\n if other is None or self.rise != other.rise:\n ret_value += ' {} Ts'.format(round(self.rise, 3))\n\n return ret_value", "def print_comparison(name, dates, times, orig_data, comp_data):\n\n # Output comparison of data\n print(' ORIGINAL COMPUTED')\n print(f' DATE TIME {name.upper():>9} {name.upper():>9} DIFFERENCE')\n print('------- ------ --------- --------- ----------')\n zip_data = zip(dates, times, orig_data, comp_data)\n for date, time, orig, comp in zip_data:\n diff = orig - comp\n print(f'{date} {time:>6} {orig:9.6f} {comp:9.6f} {diff:10.6f}')", "def get_compare_value_texts(self):\n return self.compare_value_texts", "def __cmp__(self, other):\n return cmp(repr(self), repr(other))", "def format(self) -> str:" ]
[ "0.7065585", "0.6774839", "0.66851884", "0.66676676", "0.6445696", "0.64411914", "0.64020616", "0.63991475", "0.6312643", "0.6112813", "0.6112813", "0.610843", "0.6023886", "0.6010248", "0.59334326", "0.5931873", "0.59163105", "0.5899037", "0.5899037", "0.5896669", "0.5889875", "0.58821243", "0.58778167", "0.5857649", "0.58547544", "0.58478045", "0.5822179", "0.58094436", "0.58093524", "0.5797635" ]
0.69769394
1
Catches a difference when one or both of the objects are None (since it is handled the same across methods)
def none_comparison(func): @functools.wraps(func) def inner(obj1,obj2): if obj1 is not None and obj2 is not None: return func(obj1, obj2) if obj1 is None and obj2 is None: return [] if obj1 is not None and obj2 is None: return Difference(f"Second {obj1.__class__.__name__} is None",(obj1,None)) return Difference(f"First {obj2.__class__.__name__} is None",(None,obj2)) return inner
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_none(self) -> PossibleResult[T]:\n if self.constructor == type(None):\n if not self.obj is None:\n raise DeserializeError(\n type(None), self.obj, self.new_depth, self.key\n )\n return self.obj # type: ignore\n return NO_RESULT", "def compare_with_none():\n value = {};\n if value is not None:\n print(\"value is not none\")\n else:\n print(\"value is none\")", "def assert_is_not_none(self, obj):\n if obj is None:\n raise AssertionError('unexpectedly None')", "def is_none(obj):\n return obj is None", "def __eq__(self, other: Any) -> bool:\n return isinstance(other, Nothing)", "def _merge_sanity_check(self, other):\n if self._fields is not None and (\n set(self.query.values_select) != set(other.query.values_select)\n or set(self.query.extra_select) != set(other.query.extra_select)\n or set(self.query.annotation_select) != set(other.query.annotation_select)\n ):\n raise TypeError(\n \"Merging '%s' classes must involve the same values in each case.\"\n % self.__class__.__name__\n )", "def test_none(self):\n esnA = ESN(N_in,N_out,random_state=None)\n esnB = ESN(N_in,N_out,random_state=None)\n self._compare(esnA,esnB,should_be=\"different\")", "def test_none(self):\n base1 = Base(None)\n base2 = Base(None)\n base3 = Base(None)\n self.assertEqual(base1.id, base3.id - 2)", "def test_do_check_event_type(self):\n self.assertEqual(self.a.get_type(), None)\n self.assertEqual(self.b.get_type(), None)\n self.assertTrue(self.a.do_check_event_type(self.a))\n\n self.a = +self.a\n self.assertFalse(self.a.do_check_event_type(self.b))", "def assert_type_or_none(obj, classes):\n if obj is not None:\n assert_type(obj, classes)", "def is_not_none(e):\n return e is not None", "def assert_is_none(self, obj):\n if obj is not None:\n raise AssertionError('%s is not None' % (str(obj),))", "def get_none1(self):\n pass", "def __ne__(self, other):\n if other != None:\n return self != other\n else:\n return True", "def __ne__(self, other):\n if other != None:\n return self != other\n else:\n return True", "def none(self):", "def testNoneAssignment(self):\n class MyMessage(messages.Message):\n\n my_field = messages.StringField(1)\n\n m1 = MyMessage()\n m2 = MyMessage()\n m2.my_field = None\n self.assertEquals(m1, m2)", "def assertIsNotNone(self, obj, msg=None):\r\n if obj is None:\r\n standardMsg = 'unexpectedly None'\r\n self.fail(self._formatMessage(msg, standardMsg))", "def interferes(self, other):\n return True", "def return_none() -> None:\n pass", "def test_no_rhs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = object()\n rhs = None\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(lhs, result)", "def _check_union(self) -> PossibleResult[T]:\n if _is_union(self.constructor):\n args = get_args(self.constructor)\n is_optional = len(args) == 2 and type(None) in args\n is_optional_property = len(args) == 2 and Undefined in args\n if is_optional and self.obj is None:\n return None # type: ignore\n if is_optional_property and self.obj is UNDEFINED:\n return UNDEFINED # type: ignore\n for argument in args:\n convert_primitives = self.convert_primitives and (\n (is_optional and argument != type(None))\n or (is_optional_property and argument != Undefined)\n )\n try:\n return Deserialize(\n obj=self.obj,\n constructor=argument,\n depth=self.new_depth,\n convert_primitives=convert_primitives,\n ).run()\n except DeserializeError:\n pass\n raise DeserializeError(\n self.constructor, self.obj, self.new_depth, self.key\n )\n return NO_RESULT", "def __ne__(self, other):\r\n\t\treturn (self.type != other.type or self.value != other.value)", "def test_product_nullables(self):\n self.assertIsNone(self.product3.main_image)\n self.assertIsNone(self.product3.protein)\n self.assertIsNone(self.product3.fat)\n self.assertIsNone(self.product3.carbs)\n self.assertIsNone(self.product3.calories)", "def nulltest():", "def test_equal_on_not_equal_value(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = objects.OpaqueObject(self.bytes_b, enums.OpaqueDataType.NONE)\n self.assertFalse(a == b)\n self.assertFalse(b == a)", "def __neq__(self, other): \n return not self == other", "def assertIsNone(self, obj, msg=None):\r\n if obj is not None:\r\n standardMsg = '%s is not None' % (safe_repr(obj),)\r\n self.fail(self._formatMessage(msg, standardMsg))", "def is_equal(o1: object, o2: object) -> bool:\n if o1 is None and o2 is None:\n return True\n if o1 is None:\n return False\n return o1 == o2", "def test_none_input(self):\n eq_(None, output())" ]
[ "0.6248695", "0.5997152", "0.59885675", "0.59798694", "0.5903614", "0.5886935", "0.58787954", "0.5814186", "0.58113503", "0.57685584", "0.5712491", "0.56863844", "0.56762654", "0.56702787", "0.56702787", "0.5659557", "0.563092", "0.5625755", "0.5596324", "0.55960506", "0.5590203", "0.5568005", "0.55187255", "0.55032694", "0.54724413", "0.54694337", "0.5465812", "0.5464359", "0.54436016", "0.54423535" ]
0.7289079
0
Compares Attributes between 2 objects via getattr, returning the attribute values as a tuple if they do not match
def attr_comparison(obj1,obj2,attrs): return [Difference(f"{obj1.__class__.__name__}.{attr}",(result1,result2)) for attr in attrs if (result1 := getattr(obj1,attr)) != (result2 := getattr(obj2,attr))]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def attrs_to_tuple(obj):\n return tuple(getattr(obj, a) for a in attrs)", "def compare(current_formation):\n\n attribute_tuple = ()\n for attr in attributes:\n\n if attr in current_formation:\n attribute_tuple += (current_formation[attr],)\n elif attr in ['position_all']:\n position_list = list(current_formation['positions'].keys())\n attribute_tuple += (position_list,)\n else:\n print \"Invalid Attribute: %s\" % attr\n\n return attribute_tuple", "def _compare_attributes(self, first: Node, second: Node) -> bool:\n # If opsets of nodes are different, then nodes have different attributes.\n fst_opset = first.get_opset()\n snd_opset = second.get_opset()\n if fst_opset != snd_opset:\n return False\n\n if fst_opset not in ['opset1', 'opset4']:\n fst_name = first.soft_get('name', first.id)\n snd_name = second.soft_get('name', second.id)\n raise Error('Unsupported opset {} for nodes with names {} and {}'.format(fst_opset, fst_name, snd_name))\n\n if fst_opset == 'opset1':\n return self._compare_attributes_of_interpolate1(first, second)\n else:\n return self._compare_attributes_of_interpolate4(first, second)", "def compare(a, b, attrs, f):\n for attr in attrs:\n if not f(getattr(a, attr), getattr(b, attr)):\n return False\n return True", "def _attributes(self, ext1, ext2):\n errorlist = []\n for attr in ['data', 'mask', 'variance', 'OBJMASK', 'OBJCAT']:\n attr1 = getattr(ext1, attr, None)\n attr2 = getattr(ext2, attr, None)\n if (attr1 is None) ^ (attr2 is None):\n errorlist.append(f'Attribute error for {attr}: '\n f'{attr1 is not None} v {attr2 is not None}')\n elif attr1 is not None:\n if isinstance(attr1, Table):\n if len(attr1) != len(attr2):\n errorlist.append(f'attr lengths differ: '\n f'{len(attr1)} v {len(attr2)}')\n else: # everything else is pixel-like\n if attr1.dtype.name != attr2.dtype.name:\n errorlist.append(f'Datatype mismatch for {attr}: '\n f'{attr1.dtype} v {attr2.dtype}')\n if attr1.shape != attr2.shape:\n errorlist.append(f'Shape mismatch for {attr}: '\n f'{attr1.shape} v {attr2.shape}')\n if 'int' in attr1.dtype.name:\n try:\n assert_most_equal(attr1, attr2, max_miss=self.max_miss)\n except AssertionError as e:\n errorlist.append(f'Inequality for {attr}: '+str(e))\n else:\n try:\n assert_most_close(attr1, attr2, max_miss=self.max_miss,\n rtol=self.rtol, atol=self.atol)\n except AssertionError as e:\n errorlist.append(f'Mismatch for {attr}: '+str(e))\n return errorlist", "def sub_comparison(obj1,obj2,translate):\n return [Difference(f\"{obj1.__class__.__name__} > {meth.__name__}\",result) for (meth,attr) in translate if (result := meth(getattr(obj1,attr),getattr(obj2,attr))) is not None]", "def getter_attributes_test(name, from_xml, from_dict, result):\n assert getattr(from_xml, name) == result\n assert getattr(from_dict, name) == result", "def attrs_eq(received, **expected):\n for k, v in expected.iteritems():\n eq_(v, getattr(received, k))", "def assert_attributes_equal(self, video, attrs):\r\n for key, value in attrs.items():\r\n self.assertEquals(getattr(video, key), value)", "def cmpAttributeValues(self, dcObj, ignoreOrder=True, **kwargs):\n rL = []\n floatRelTolerance = kwargs.get(\"floatRelTolerance\", 1.0e-05)\n floatAbsTolerance = kwargs.get(\"floatAbsTolerance\", 1.0e-04)\n try:\n sa = set(self.getAttributeList())\n sb = set(dcObj.getAttributeList())\n atNameComList = list(sa & sb)\n #\n lenEq = self.getRowCount() == dcObj.getRowCount()\n if not lenEq:\n return [(atName, False) for atName in atNameComList]\n #\n for atName in atNameComList:\n dataType, _ = self.__getAttributeInfo(atName)\n if dataType in [\"string\", \"integer\"]:\n if ignoreOrder:\n same = sorted(self.getAttributeValueList(atName)) == sorted(dcObj.getAttributeValueList(atName))\n else:\n same = self.getAttributeValueList(atName) == dcObj.getAttributeValueList(atName)\n elif dataType in [\"float\"]:\n aVL = self.getAttributeValueList(atName)\n bVL = dcObj.getAttributeValueList(atName)\n if ignoreOrder:\n for aV, bV in zip(sorted(aVL), sorted(bVL)):\n same = self.__isClose(aV, bV, relTol=floatRelTolerance, absTol=floatAbsTolerance)\n if not same:\n break\n else:\n for aV, bV in zip(aVL, bVL):\n same = self.__isClose(aV, bV, relTol=floatRelTolerance, absTol=floatAbsTolerance)\n if not same:\n logger.info(\"%s %s (rel=%r) (abs=%r) %r (%r)\", self.getName(), atName, aV * floatRelTolerance, floatAbsTolerance, aV, abs(aV - bV))\n break\n rL.append((atName, same))\n #\n return rL\n except Exception as e:\n if self._raiseExceptions:\n raise e\n return rL", "def _PairUpAttributes(attributes):\n names = sorted(set(attr.id for attr in attributes))\n getters = {}\n setters = {}\n for attr in attributes:\n if attr.is_fc_getter:\n getters[attr.id] = attr\n elif attr.is_fc_setter and 'Replaceable' not in attr.ext_attrs:\n setters[attr.id] = attr\n return [(getters.get(id), setters.get(id)) for id in names]", "def with_cmp(attrs):\n def attrs_to_tuple(obj):\n \"\"\"\n Create a tuple of all values of *obj*'s *attrs*.\n \"\"\"\n return tuple(getattr(obj, a) for a in attrs)\n\n def eq(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) == attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def ne(self, other):\n result = eq(self, other)\n if result is NotImplemented:\n return NotImplemented\n else:\n return not result\n\n def lt(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) < attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def le(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) <= attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def gt(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) > attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def ge(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) >= attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def hash_(self):\n return hash(attrs_to_tuple(self))\n\n def wrap(cl):\n cl.__eq__ = eq\n cl.__ne__ = ne\n cl.__lt__ = lt\n cl.__le__ = le\n cl.__gt__ = gt\n cl.__ge__ = ge\n cl.__hash__ = hash_\n\n return cl\n return wrap", "def attr_is_equal(first_obj, second_obj, attr):\n import numpy as np\n\n # Avoid comparing None's.\n return attr_has_same_shape(first_obj, second_obj, attr) and np.array_equal(\n getattr(first_obj, attr), getattr(second_obj, attr)\n )", "def compare_values(self, other, value):\n if value in self.__dir__() and value in other.__dir__():\n return float(self.__getattribute__(value)) > float(other.__getattribute__(value))\n else:\n return \"Can't compare values\"", "def check_values(layer1, layer2, attr):\n attr1 = getattr(layer1, attr, None)\n attr2 = getattr(layer2, attr, None)\n if not attr1:\n return not attr2\n return numpy.allclose(attr1.eval(), attr2.eval())", "def test_mix_positional_with_attribute_access():\n data = \"{0.__class__.__name__}: {0}\".format(42)\n data2 = \"{0[0]}: {0}\".format([1])\n return (data, data2)", "def parse_attributes(self, ds_to_check=None):\n\n if ds_to_check is None:\n ds_to_check = self.ds\n\n print(\"Parsing attributes.\")\n for i in ds_to_check.attrs.keys():\n if i in self._attrs.keys():\n print(\"{} is both a property of the object and an attribute of the dataset\".format(i))\n if ds_to_check.attrs[i] == self._attrs[i]:\n print(\" ... and they are equal\")\n else:\n print(\" ... and they NOT are equal!!!\")\n\n ds_to_check.attrs = self._attrs", "def compare(obj_a, obj_b):\n\n return tuple_to_string(obj_a) == tuple_to_string(obj_b)", "def match_attribute_names(*arrays):\n rep = arrays[0].sdbtype.full_rep\n result = [arrays[0]]\n for a in arrays[1:]:\n renames = []\n reserved = list(a.att_names) # reserved att names\n for r in a.sdbtype.full_rep:\n nm = r[0]\n if _att_match(rep, r):\n reserved.append(nm)\n continue\n newname = _find_rename(rep, r, reserved)\n if newname is None:\n raise ValueError(\"Cannot rename %s in %s\" % (nm, a))\n renames.extend((nm, newname))\n reserved.append(newname)\n if renames:\n a = a.attribute_rename(a, *renames)\n result.append(a)\n return tuple(result)", "def equality_check(a, b):\n\n def check_item(x, y, attr):\n if isinstance(x, hoomd.operation._HOOMDGetSetAttrBase):\n equality_check(x, y)\n return\n if isinstance(x, Mapping):\n for k, v in x.items():\n assert k in y, f\"For attr {attr}, key difference {k}\"\n check_item(v, y[k], \".\".join((attr, str(k))))\n return\n if not isinstance(x, str) and hasattr(x, \"__len__\"):\n assert len(x) == len(y)\n for i, (v_x, v_y) in enumerate(zip(x, y)):\n check_item(v_x, v_y, attr + f\"[{i}]\")\n return\n if isinstance(x, float):\n assert numpy.isclose(x, y), f\"attr '{attr}' not equal:\"\n return\n assert x == y, f\"attr '{attr}' not equal:\"\n\n if not isinstance(a, hoomd.operation._HOOMDGetSetAttrBase):\n return a == b\n assert type(a) == type(b)\n\n _check_obj_attr_compatibility(a, b)\n\n for attr in a.__dict__:\n if attr in a._skip_for_equality:\n continue\n\n if attr == \"_param_dict\":\n param_keys = a._param_dict.keys()\n b_param_keys = b._param_dict.keys()\n # Check key equality\n assert param_keys == b_param_keys, \"Incompatible param_dict keys:\"\n # Check item equality\n for key in param_keys:\n check_item(a._param_dict[key], b._param_dict[key], key)\n continue\n\n if attr == \"_typeparam_dict\":\n keys = a._typeparam_dict.keys()\n b_keys = b._typeparam_dict.keys()\n # Check key equality\n assert keys == b_keys, \"Incompatible _typeparam_dict:\"\n # Check item equality\n for key in keys:\n for type_, value in a._typeparam_dict[key].items():\n check_item(value, b._typeparam_dict[key][type_], \".\".join(\n (key, str(type_))))\n continue\n\n check_item(a.__dict__[attr], b.__dict__[attr], attr)", "def get_attributes(self, attributes, default=''):\n if isinstance(attributes, str):\n attributes = [attributes]\n\n attrs = [getattr(self, attr, default) for attr in attributes]\n\n if len(attrs) == 1:\n return attrs[0]\n\n return tuple(attrs)", "def getattrnames(instance):\n return tuple(sorted([attr_name for attr_name in vars(instance).keys()\n if not attr_name.startswith('_')]))", "def test_attributes_equal(self):\n test1 = self.Test({ 'id': 2, 'name': 'Poop Head' })\n test2 = self.Test({ 'id': 2, 'name': 'Poop Head' })\n self.assertEqual(test1, test2)", "def _compare_attributes_of_interpolate1(self, first: Node, second: Node) -> bool:\n # If some of attributes 'mode', 'align_corners', 'antialias', 'pads_begin', 'pads_end' are different,\n # then attributes of nodes are not identical.\n op = Interpolate(graph=first.graph, attrs={})\n for attr in ['mode', 'align_corners', 'antialias', 'pads_begin', 'pads_end']:\n if first.soft_get(attr, default=op.attrs[attr]) != second.soft_get(attr, default=op.attrs[attr]):\n return False\n return True", "def assertNodesEqual(self, first, second):\n def get_attrs(l):\n result = []\n for n in l:\n result.append((n.service, n.address, n.version, n.properties))\n return result\n self.assertEqual(get_attrs(first), get_attrs(second))", "def _compare_ioc_properties(old: Dict[str, IOC], new: Dict[str, IOC]):\n new_iocs = set()\n changed_iocs = set()\n removed_iocs = set()\n\n _attributes = [\"macros\", \"pvs\", \"pvsets\", \"simlevel\", \"restart\", \"autostart\"]\n\n for ioc_name in new.keys():\n if ioc_name not in old.keys():\n # If not in previously then add it to new iocs\n new_iocs.add(ioc_name)\n elif any(getattr(old[ioc_name], attr) != getattr(new[ioc_name], attr) for attr in _attributes):\n # If any attributes have changed, add to changed iocs\n changed_iocs.add(ioc_name)\n\n for ioc_name in old.keys():\n if ioc_name not in new:\n removed_iocs.add(ioc_name)\n\n return new_iocs, changed_iocs, removed_iocs", "def _exact_compare(tree1, tree2):\n attrs = ['name', 'length', 'support']\n for n1, n2 in zip(tree1.postorder(), tree2.postorder()):\n for attr in attrs:\n if getattr(n1, attr, None) != getattr(n2, attr, None):\n return False\n return True", "def __eq__(self, other):\n\t\treturn all((getattr(self, attr, None) == getattr(other, attr, None) for attr in self.attrs))", "def _mergeAttributes(this, other, attrName):\n attr1 = getattr(this, attrName)\n attr2 = getattr(other, attrName)\n if attr1 is not None and attr2 is not None:\n raise AttributeError(\n \"Cannot merge {} and {}, the attribute `{}` has been assigned on both\"\n \"instances.\".format(this, other, attrName)\n )\n return attr1 if attr1 is not None else attr2", "def changed_attrs(old_version, new_version, interesting_attrs):\n # Use an OrderedDict so that we preserve the order from interesting_attrs\n changed = OrderedDict()\n for attr in interesting_attrs:\n if attr in old_version and attr not in new_version:\n changed[attr] = [old_version[attr], None]\n elif attr in new_version and attr not in old_version:\n changed[attr] = [None, new_version[attr]]\n elif old_version[attr] != new_version[attr]:\n changed[attr] = [old_version[attr], new_version[attr]]\n return changed" ]
[ "0.6573749", "0.64668816", "0.6218231", "0.61928684", "0.61631376", "0.6161874", "0.61611706", "0.61574703", "0.5891995", "0.58485514", "0.5843817", "0.5834111", "0.57660866", "0.57439977", "0.5732433", "0.5712868", "0.56471366", "0.5642409", "0.5622416", "0.5610146", "0.55761474", "0.55702764", "0.55648893", "0.5556978", "0.5552826", "0.5535728", "0.5497517", "0.5476264", "0.54568684", "0.5446643" ]
0.73285353
0
Given a list of tuples comparised of (subcomparison method, attr name for comparison), returns any Difference tuple retunred by each method using the given attr of obj1 and obj2 as arguments (if that method is not None)
def sub_comparison(obj1,obj2,translate): return [Difference(f"{obj1.__class__.__name__} > {meth.__name__}",result) for (meth,attr) in translate if (result := meth(getattr(obj1,attr),getattr(obj2,attr))) is not None]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def attr_comparison(obj1,obj2,attrs):\n return [Difference(f\"{obj1.__class__.__name__}.{attr}\",(result1,result2)) for attr in attrs if (result1 := getattr(obj1,attr)) != (result2 := getattr(obj2,attr))]", "def with_cmp(attrs):\n def attrs_to_tuple(obj):\n \"\"\"\n Create a tuple of all values of *obj*'s *attrs*.\n \"\"\"\n return tuple(getattr(obj, a) for a in attrs)\n\n def eq(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) == attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def ne(self, other):\n result = eq(self, other)\n if result is NotImplemented:\n return NotImplemented\n else:\n return not result\n\n def lt(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) < attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def le(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) <= attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def gt(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) > attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def ge(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) >= attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def hash_(self):\n return hash(attrs_to_tuple(self))\n\n def wrap(cl):\n cl.__eq__ = eq\n cl.__ne__ = ne\n cl.__lt__ = lt\n cl.__le__ = le\n cl.__gt__ = gt\n cl.__ge__ = ge\n cl.__hash__ = hash_\n\n return cl\n return wrap", "def deep_cmp(obj1, obj2):\n pass", "def diffs(self):\n diffs = []\n # XXX i know, we are using the ZODB, so sorry for the cheesy eval()\n # uhm, some logic is not right here as well, we need to look at keys\n # in both the before and after sets :(\n if not self.before or not self.after:\n return []\n before = eval(self.before)\n # pfft!\n if not before:\n return []\n after = eval(self.after)\n for k,v in before.items():\n if k in ['objectClass','userPassword']:\n continue\n try:\n if k == 'uniqueMember':\n added, removed = uniqueMemberDiff(\n v, after['uniqueMember'] )\n diffs.append( {'attribute' : k,\n 'added' : added,\n 'removed' : removed,\n }\n )\n elif str(v) != str(after[k]):\n diffs.append( { 'attribute' : k,\n 'before' : before[k],\n 'after' : after[k] }\n )\n except KeyError:\n pass\n return diffs", "def diff(*args):\n return reduce(lambda x, y: x - y, args)", "def BackfillComparisons (cls):\n\n def applyconvert (cls, derived):\n for (opn, opx) in derived:\n opx.__name__ = opn\n opx.__doc__ = getattr(int, opn).__doc__\n setattr(cls, opn, opx)\n\n applyconvert(cls, (\n ('__gt__', lambda self, other: not (self.__lt__(other) or self.__eq__(other))),\n ('__le__', lambda self, other: self.__lt__(other) or self.__eq__(other)),\n ('__ge__', lambda self, other: not self.__lt__(other))\n ))\n applyconvert(cls, (\n ('__ne__', lambda self, other: not self.__eq__(other)),\n ))\n return cls", "def DataDiff(source, target, compare_list_as_value=True, depth=0, no_difference_value=None):\n # Ensure recursion doesnt go out of control\n if depth > 150:\n raise Exception('DataDiff recurlsion depth has hit limit (50), aborting.')\n\n # If we are not working with 2 different containers we can inspect, then do a simple check\n if type(source) not in (list, tuple, dict) or type(target) not in (list, tuple, dict):\n # If the types are different, the data is different (and cant be compared more)\n if type(source) != type(target):\n return (source, target)\n # Else, theyre the same types, if the values are different\n elif source != target:\n return (source, target)\n # Else, theyre the same types and value\n else:\n # This should only happen if this is a fresh DataDiff() call, depth==0\n if depth == 0:\n return (no_difference_value, no_difference_value)\n else:\n raise Exception('This should never happen, having a mismatching value different in anywhere but depth=0')\n\n\n if type(source) in (list, tuple):\n source_diff = []\n elif type(source) == dict:\n source_diff = {}\n else:\n raise Exception('Unhandled source_diff data type: %s' % type(source))\n\n if type(target) in (list, tuple):\n target_diff = []\n elif type(target) == dict:\n target_diff = {}\n else:\n raise Exception('Unhandled target_diff data type: %s' % type(target))\n\n # Check for incompatible types, and just return them both as theyre totally different\n if type(source_diff) != type(target_diff):\n return (source, target)\n\n # If we're handling a Dictionary compare\n if type(source_diff) == dict:\n # Process the source keys first\n for key in source.keys():\n _CompareDictValue(key, source, target, source_diff, target_diff, compare_list_as_value, no_difference_value, depth)\n\n # Process the target keys next, skipping any source keys we already processed\n for key in target.keys():\n # Skip any keys we already processed in source\n if key in source:\n continue\n\n # Reverse target/source, so that the reverse comparison/set is done\n _CompareDictValue(key, target, source, target_diff, source_diff, compare_list_as_value, no_difference_value, depth)\n\n # Else, if we're handling a List compare\n elif type(source_diff) == list:\n # If lists must be compared in total because the order of a list is important\n if compare_list_as_value:\n if source != target:\n return (list(source), list(target))\n\n # Else, compare each element of the list\n else:\n for count in range(0, len(source)):\n if count >= len(target):\n source_diff.append(source[count])\n elif source[count] != target[count]:\n source_diff.append(source[count])\n target_diff.append(target[count])\n\n # If the target has more elements than the source, add the rest \n if len(target) > len(source):\n target_diff += target[-(len(source) - len(target)):]\n\n else:\n raise Exception('Unspecified type handler for data: %s. Only dict and list/tuple types are accepted.')\n\n return (source_diff, target_diff)", "def _compare_and_set_attributes(curr, dag, muts, phen, comparison):\n params = dag.node[curr]\n\n # Get the children of this node\n children = dag.successors(curr)\n\n assert len(children) == 2, \"Tree node with #children != 2.\"\n\n x_params = dag.node[children[0]]\n y_params = dag.node[children[1]]\n x_key = x_params['dataset']\n y_key = y_params['dataset']\n value = None\n\n if x_key is None:\n if y_key is None:\n # Neither child has a dataset.\n params['dataset'] = None\n else:\n # Y has a dataset, but not X.\n params['genes'] = y_params['genes']\n params['dataset'] = y_key\n params['function'] = compare.ds_y\n params['value'] = y_params['value']\n else:\n if y_key is None:\n # X has a dataset, but not Y.\n params['genes'] = x_params['genes']\n params['dataset'] = x_key\n params['function'] = compare.ds_x\n params['value'] = x_params['value']\n else:\n # Both have datasets. This is the normal case.\n params['genes'] = x_params['genes'] + y_params['genes']\n function, dataset, value, *etc = compare.best_combination(\n muts[x_key], muts[y_key], phen, comparison)\n params['function'] = function\n params['dataset'] = curr\n muts[curr] = dataset\n params['value'] = value\n\n return value", "def _iterativediff(t1, t2, subdir):\n if t1 is None:\n t1 = {}\n if t2 is None:\n t2 = {}\n\n for e1 in t1:\n realname = subdir + pycompat.fsencode(e1.name)\n\n if e1.type == pygit2.GIT_OBJ_TREE:\n try:\n e2 = t2[e1.name]\n if e2.type != pygit2.GIT_OBJ_TREE:\n e2 = None\n except KeyError:\n e2 = None\n\n stack.append((realname + b'/', e1, e2))\n else:\n n1, fl1 = self.find(realname)\n\n try:\n e2 = t2[e1.name]\n n2, fl2 = other.find(realname)\n except KeyError:\n e2 = None\n n2, fl2 = (None, b'')\n\n if e2 is not None and e2.type == pygit2.GIT_OBJ_TREE:\n stack.append((realname + b'/', None, e2))\n\n if not match(realname):\n continue\n\n if n1 != n2 or fl1 != fl2:\n result[realname] = ((n1, fl1), (n2, fl2))\n elif clean:\n result[realname] = None\n\n for e2 in t2:\n if e2.name in t1:\n continue\n\n realname = subdir + pycompat.fsencode(e2.name)\n\n if e2.type == pygit2.GIT_OBJ_TREE:\n stack.append((realname + b'/', None, e2))\n elif match(realname):\n n2, fl2 = other.find(realname)\n result[realname] = ((None, b''), (n2, fl2))", "def compare(a, b, attrs, f):\n for attr in attrs:\n if not f(getattr(a, attr), getattr(b, attr)):\n return False\n return True", "def cmpAttributeValues(self, dcObj, ignoreOrder=True, **kwargs):\n rL = []\n floatRelTolerance = kwargs.get(\"floatRelTolerance\", 1.0e-05)\n floatAbsTolerance = kwargs.get(\"floatAbsTolerance\", 1.0e-04)\n try:\n sa = set(self.getAttributeList())\n sb = set(dcObj.getAttributeList())\n atNameComList = list(sa & sb)\n #\n lenEq = self.getRowCount() == dcObj.getRowCount()\n if not lenEq:\n return [(atName, False) for atName in atNameComList]\n #\n for atName in atNameComList:\n dataType, _ = self.__getAttributeInfo(atName)\n if dataType in [\"string\", \"integer\"]:\n if ignoreOrder:\n same = sorted(self.getAttributeValueList(atName)) == sorted(dcObj.getAttributeValueList(atName))\n else:\n same = self.getAttributeValueList(atName) == dcObj.getAttributeValueList(atName)\n elif dataType in [\"float\"]:\n aVL = self.getAttributeValueList(atName)\n bVL = dcObj.getAttributeValueList(atName)\n if ignoreOrder:\n for aV, bV in zip(sorted(aVL), sorted(bVL)):\n same = self.__isClose(aV, bV, relTol=floatRelTolerance, absTol=floatAbsTolerance)\n if not same:\n break\n else:\n for aV, bV in zip(aVL, bVL):\n same = self.__isClose(aV, bV, relTol=floatRelTolerance, absTol=floatAbsTolerance)\n if not same:\n logger.info(\"%s %s (rel=%r) (abs=%r) %r (%r)\", self.getName(), atName, aV * floatRelTolerance, floatAbsTolerance, aV, abs(aV - bV))\n break\n rL.append((atName, same))\n #\n return rL\n except Exception as e:\n if self._raiseExceptions:\n raise e\n return rL", "def complete_comparisons(cls):\n assert cls.__lt__ is not object.__lt__, \"{0} must define < and ideally ==\".format(cls.__name__)\n if cls.__eq__ is object.__eq__:\n cls.__eq__ = lambda self, other: not (cls.__lt__(self, other) or cls.__lt__(other, self))\n cls.__ne__ = lambda self, other: not cls.__eq__(self, other)\n cls.__gt__ = lambda self, other: cls.__lt__(other, self)\n cls.__le__ = lambda self, other: not cls.__lt__(other, self)\n cls.__ge__ = lambda self, other: not cls.__lt__(self, other)\n return cls", "def createFromTwoTuples(cls, tuple1, tuple2, **kwargs):\n return cls([c2 - c1 for (c1, c2) in zip(tuple1, tuple2)], **kwargs)", "def none_comparison(func):\n @functools.wraps(func)\n def inner(obj1,obj2):\n if obj1 is not None and obj2 is not None:\n return func(obj1, obj2)\n if obj1 is None and obj2 is None:\n return []\n if obj1 is not None and obj2 is None:\n return Difference(f\"Second {obj1.__class__.__name__} is None\",(obj1,None))\n return Difference(f\"First {obj2.__class__.__name__} is None\",(None,obj2))\n return inner", "def _compare_list(self, name, actual, expect):\n raise NotImplementedError(\"base class, not implement!\")", "def comparisons(self, ctx: Context) -> Iterator[AnnotatedExpression]:\n for type, expr_group in ctx.groupby_type():\n if type in (bool, Callable):\n continue\n for operator in self.compare_operators:\n for left, right in combinations(expr_group, 2):\n yield AnnotatedExpression(\n ast.Compare(\n left=left.expr, ops=[operator()], comparators=[right.expr]\n ),\n TypeAnnotation(bool),\n )", "def _compare_scalars(self, old, new, name=None):\n # Explicitly excluded arguments\n if old != new:\n return {'---': old, '+++': new}\n else:\n return None", "def get_comparison_data(self, context):\n if context is None:\n operand1 = [x for x in self[0].select()]\n operand2 = [x for x in self[1].select()]\n else:\n operand1 = [x for x in self[0].select(context.copy())]\n operand2 = [x for x in self[1].select(context.copy())]\n\n if self.parser.compatibility_mode:\n # Boolean comparison if one of the results is a single boolean value (1.)\n try:\n if isinstance(operand1[0], bool):\n if len(operand1) == 1:\n return [(operand1[0], self.boolean_value(operand2))]\n if isinstance(operand2[0], bool):\n if len(operand2) == 1:\n return [(self.boolean_value(operand1), operand2[0])]\n except IndexError:\n return []\n\n # Converts to float for lesser-greater operators (3.)\n if self.symbol in ('<', '<=', '>', '>='):\n return [\n (float(self.data_value(value1)), float(self.data_value(value2)))\n for value1 in operand1 for value2 in operand2\n ]\n\n return [(self.data_value(value1), self.data_value(value2))\n for value1 in operand1 for value2 in operand2]", "def diff(before: list, after: list) -> (list, list):\n additions = [item for item in after if item not in before]\n removals = [item for item in before if item not in after]\n return additions, removals", "def _compare_elements(self, old, new):\n res = None\n # We want to go through the tree post-order\n if isinstance(old, dict):\n res_dict = self.compare_dicts(old, new)\n if (len(res_dict) > 0):\n res = res_dict\n # Now we are on the same level\n # different types, new value is new\n elif (type(old) != type(new)):\n res = {'---': old, '+++': new}\n # recursive arrays\n # we can be sure now, that both new and old are\n # of the same type\n elif (isinstance(old, list)):\n res_arr = self._compare_arrays(old, new)\n if (len(res_arr) > 0):\n res = res_arr\n # the only thing remaining are scalars\n else:\n scalar_diff = self._compare_scalars(old, new)\n if scalar_diff is not None:\n res = scalar_diff\n\n return res", "def comparison(op):\n def comp(*args):\n if args:\n item = args[0]\n for o in args[1:]:\n if op(item, o):\n item = o\n else:\n return Boolean(False)\n return Boolean(True)\n else:\n return Boolean(True)\n return comp", "def format_comparison(objs):\n def formatter(comp):\n if not isinstance(comp, tuple):\n return str(comp)\n output = []\n return \"\\n\".join([comp.type] + [\" \"+errmessage for errmessage in output])\n\n results = map(formatter,objs)\n return \"\\n\".join(results)\n \n #obj1,obj2 = comp\n\n\n ### Sections\n #for i,s1,s2 in diffs:\n # if s1 and s2:\n # output.append(f\"Section {i} does not match:\")\n # result = compare_sections(s1,s2)\n # output.extend(almethods.linepadder(result))\n # else:\n # if s1:\n # output.append(f\"Door 2 missing Section {i}\")\n # else:\n # output.append(f\"Door 1 missing Section {i}\")", "def _PairUpAttributes(attributes):\n names = sorted(set(attr.id for attr in attributes))\n getters = {}\n setters = {}\n for attr in attributes:\n if attr.is_fc_getter:\n getters[attr.id] = attr\n elif attr.is_fc_setter and 'Replaceable' not in attr.ext_attrs:\n setters[attr.id] = attr\n return [(getters.get(id), setters.get(id)) for id in names]", "def test_comparison_overrides(self):\n\n # adding these methods directly to each class to avoid decoration\n # by the testlib decorators.\n class H1(object):\n def __init__(self, value='abc'):\n self.value = value\n def __nonzero__(self):\n return False\n def __hash__(self):\n return hash(self.value)\n def __eq__(self, other):\n if isinstance(other, type(self)):\n return self.value == other.value\n return False\n class H2(object):\n def __init__(self, value='abc'):\n self.value = value\n def __nonzero__(self):\n return False\n def __hash__(self):\n return hash(self.value)\n def __eq__(self, other):\n if isinstance(other, type(self)):\n return self.value == other.value\n return False\n class H3(object):\n def __init__(self, value='abc'):\n self.value = value\n def __nonzero__(self):\n return False\n def __hash__(self):\n return hash(self.value)\n def __eq__(self, other):\n if isinstance(other, type(self)):\n return self.value == other.value\n return False\n class H6(object):\n def __init__(self, value='abc'):\n self.value = value\n def __nonzero__(self):\n return False\n def __hash__(self):\n return hash(self.value)\n def __eq__(self, other):\n if isinstance(other, type(self)):\n return self.value == other.value\n return False\n\n \n mapper(H1, t1, properties={\n 'h2s': relation(H2, backref='h1'),\n 'h3s': relation(H3, secondary=t4, backref='h1s'),\n 'h1s': relation(H1, secondary=t5, backref='parent_h1'),\n 't6a': relation(H6, backref='h1a',\n primaryjoin=t1.c.id==t6.c.ht1a_id),\n 't6b': relation(H6, backref='h1b',\n primaryjoin=t1.c.id==t6.c.ht1b_id),\n })\n mapper(H2, t2)\n mapper(H3, t3)\n mapper(H6, t6)\n\n s = create_session()\n for i in range(3):\n h1 = H1()\n s.save(h1)\n\n h1.h2s.append(H2())\n h1.h3s.extend([H3(), H3()])\n h1.h1s.append(H1())\n\n s.flush()\n self.assertEquals(t1.count().scalar(), 4)\n\n h6 = H6()\n h6.h1a = h1\n h6.h1b = h1\n\n h6 = H6()\n h6.h1a = h1\n h6.h1b = x = H1()\n assert x in s\n\n h6.h1b.h2s.append(H2())\n\n s.flush()\n\n h1.h2s.extend([H2(), H2()])\n s.flush()\n\n h1s = s.query(H1).options(eagerload('h2s')).all()\n self.assertEqual(len(h1s), 5)\n\n self.assert_unordered_result(h1s, H1,\n {'h2s': []},\n {'h2s': []},\n {'h2s': (H2, [{'value': 'abc'},\n {'value': 'abc'},\n {'value': 'abc'}])},\n {'h2s': []},\n {'h2s': (H2, [{'value': 'abc'}])})\n\n h1s = s.query(H1).options(eagerload('h3s')).all()\n\n self.assertEqual(len(h1s), 5)\n h1s = s.query(H1).options(eagerload_all('t6a.h1b'),\n eagerload('h2s'),\n eagerload_all('h3s.h1s')).all()\n self.assertEqual(len(h1s), 5)", "def difference(*colls):\n\n # Get all the leaf paths for each collection: make each path a tuple\n leaf_paths_by_coll = list(map(lambda c: list(map(tuple, get_all_leaf_paths(c))), colls))\n\n # Find the union of all leaf paths: merge all the paths and keep only the unique paths\n union_leaf_paths = list(distinct(concat(*leaf_paths_by_coll)))\n\n # Get the values corresponding to these leaf paths in every collection: if a leaf path doesn't exist, assumes None\n values_by_coll = list(map(lambda lp: list(map(lambda coll: tz.get_in(lp, coll), colls)), union_leaf_paths))\n\n # Filter out the leaf paths that have identical values across the collections\n keep_leaf_paths = list(map(0, filter(lambda t: not allequal(t[1]), zip(union_leaf_paths, values_by_coll))))\n keep_values = list(map(1, filter(lambda t: not allequal(t[1]), zip(union_leaf_paths, values_by_coll))))\n\n # Rearrange to construct a list of dictionaries -- one per original collection.\n # Each of these dictionaries maps a 'kept' leaf path to its corresponding\n # value in the collection\n differences = list(map(lambda vals: dict(zip(keep_leaf_paths, vals)), list(zip(*keep_values))))\n\n return differences", "def compare(*fields, **kwargs):\n\tfrom sqlalchemy import select, cast, Date\n\tfrom sqlalchemy.orm import object_session\n\tfrom sqlalchemy.sql import column\n\t\n\tsession = object_session(fields[0])\n\tt = column('t')\n\tR = Record.__table__\n\tdef sel(field):\n\t\tT = session.query(Record).filter(Record.field_id==field.id).first().__table__\n\t\treturn select([t,column('x')]).select_from(R.join(T)).where(R.c.field_id==field.id).alias()\n\n\tfor i,f in enumerate(fields):\n\t\tif i==0:\n\t\t\ts0 = sel(f)\n\t\t\ts = select([s0.c.t,s0.c.x]).order_by(t)\n\t\telse:\n\t\t\ts1 = sel(f)\n\t\t\tif kwargs.get('datecast',False):\n\t\t\t\ts = s.column(s1.c.x).where(cast(s0.c.t,Date)==cast(s1.c.t,Date))\n\t\t\telse:\n\t\t\t\ts = s.column(s1.c.x).where(s0.c.t==s1.c.t)\n\t\t\tif kwargs.get('diff',False):\n\t\t\t\ts = s.where(func.round(s0.c.x)!=func.round(s1.c.x))\n\tif kwargs.get('plot',False):\n\t\tif len(fields)!=2:\n\t\t\tprint \"Works only with exactly 2 fields as input.\"\n\t\t\treturn None\n\t\timport matplotlib.pyplot as plt\n\t\ta = float(fields[0].mult) * fields[0].units.convert(fields[1].units)\n\t\tb = float(fields[1].mult) * fields[1].units.convert(fields[0].units)\n\t\tl = Session.execute(s).fetchall()\n\t\tfig = plt.figure(figsize=(6.2,6))\n\t\tplt.scatter([float(r[1])*a for r in l],[float(r[2])*b for r in l])\n\t\tplt.xlabel(fields[0].name+' '+str(fields[0].station_id))\n\t\tplt.ylabel(fields[1].name+' '+str(fields[1].station_id))\n\t\ttry: \n\t\t\tx = kwargs['xlim']\n\t\t\ty = x\n\t\texcept:\n\t\t\tx = plt.xlim()\n\t\t\ty = plt.ylim()\n\t\tplt.plot(x,x)\n\t\tfig.axes[0].set_xlim(x)\n\t\tfig.axes[0].set_ylim(y)\n\t\tfig.show()\n\telse:\n\t\treturn session.execute(s).fetchall()", "def difference(A, B, *C):\n return setutils(\"difference\", A, B, *C)", "def do_list_merge(li1, li2=None, attr=None, unique_fn=None, set_fn=set):\n if not li1 and not li2:\n return []\n elif li2 and not li1:\n li1, li2 = li2, li1\n\n new_list = li1[:]\n\n if li2 is None:\n pass\n\n elif attr is None and unique_fn is None:\n new_list.extend(li2)\n\n else:\n if attr is not None:\n if isinstance(attr, basestring):\n def unique_fn(d):\n return d[attr]\n\n if unique_fn is not None:\n unique_fn = GlobalFns(unique_fn)\n\n comparables_1 = {unique_fn(el): idx for idx, el in enumerate(li1)}\n if len(set_fn(comparables_1)) < len(comparables_1):\n raise ValueError(\"li1 is not unique wrt. unique_fn\")\n\n comparables_2 = [unique_fn(el) for el in li2]\n if len(set_fn(comparables_2)) < len(comparables_2):\n raise ValueError(\"li2 is not unique wrt. unique_fn\")\n\n for idx2, cmp_2 in enumerate(comparables_2):\n el2 = li2[idx2]\n if cmp_2 in comparables_1:\n idx1 = comparables_1[cmp_2]\n new_list[idx1] = el2\n else:\n new_list.append(el2)\n\n return new_list", "def compare_values(\n cls: Type[Object_T],\n ours: Optional[Object_T],\n theirs: Optional[Object_T],\n *,\n our_schema: s_schema.Schema,\n their_schema: s_schema.Schema,\n context: ComparisonContext,\n compcoef: float,\n ) -> float:\n similarity = 1.0\n\n if ours is not None and theirs is not None:\n if type(ours) is not type(theirs):\n similarity /= 1.4\n else:\n our_name = context.get_obj_name(our_schema, ours)\n their_name = theirs.get_name(their_schema)\n if our_name != their_name:\n similarity /= 1.2\n else:\n # If the new and old versions share a reference to\n # an object that is being deleted, then we must\n # delete this object as well.\n if (type(ours), our_name) in context.deletions:\n return 0.0\n\n elif ours is not None or theirs is not None:\n # one is None but not both\n similarity /= 1.2\n\n if similarity < 1.0:\n return compcoef\n else:\n return 1.0", "def GenerateDiff(self, args):\n raise NotImplementedError(\n \"abstract method -- subclass %s must override\" % self.__class__)" ]
[ "0.6840443", "0.57526386", "0.5622923", "0.54995364", "0.54768133", "0.5458876", "0.5385492", "0.5371001", "0.5325865", "0.5241565", "0.51938784", "0.5181397", "0.51578325", "0.51564384", "0.5137712", "0.50981116", "0.50660944", "0.50567687", "0.50435317", "0.5015092", "0.5006091", "0.50037897", "0.49938032", "0.49924865", "0.49335596", "0.4925117", "0.49073595", "0.4897069", "0.48746058", "0.48631263" ]
0.7408652
0
Postmortem, using a custom debug function if passed
def post_mortem(*args, debug_fn: Optional[Callable] = None, **kwargs) -> None: if debug_fn is None: import pdb debug_fn = pdb.post_mortem debug_fn()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def debug():", "def debug_run(self):\n raise NotImplementedError", "def xpm(Pdb=Pdb):\n info = sys.exc_info()\n print(traceback.format_exc())\n post_mortem(info[2], Pdb)", "def after_step(context, step):\n if context.config.userdata.getbool(\"debug\") and step.status == \"failed\":\n spost_mortem(step.exc_traceback)", "def debug(self, *args, **kwargs):", "def debug(state: bool, /) -> None:", "def debug() -> bool:", "def do_debug(self, arg):\n orig_trace = sys.gettrace()\n if orig_trace:\n sys.settrace(None)\n globals = self.curframe.f_globals\n locals = self.curframe_locals\n Config = self.ConfigFactory\n\n class PdbppWithConfig(self.__class__):\n def __init__(self_withcfg, *args, **kwargs):\n kwargs.setdefault(\"Config\", Config)\n super(PdbppWithConfig, self_withcfg).__init__(*args, **kwargs)\n\n # Backport of fix for bpo-31078 (not yet merged).\n self_withcfg.use_rawinput = self.use_rawinput\n\n local.GLOBAL_PDB = self_withcfg\n local.GLOBAL_PDB._use_global_pdb_for_class = self.__class__\n\n prev_pdb = local.GLOBAL_PDB\n p = PdbppWithConfig(self.completekey, self.stdin, self.stdout)\n p._prompt = \"({}) \".format(self._prompt.strip())\n self.message(\"ENTERING RECURSIVE DEBUGGER\")\n self._flush_sticky_messages()\n try:\n with self._custom_completer():\n sys.call_tracing(p.run, (arg, globals, locals))\n except Exception:\n exc_info = sys.exc_info()[:2]\n self.error(traceback.format_exception_only(*exc_info)[-1].strip())\n finally:\n local.GLOBAL_PDB = prev_pdb\n self.message(\"LEAVING RECURSIVE DEBUGGER\")\n\n if orig_trace:\n sys.settrace(orig_trace)\n self.lastcmd = p.lastcmd", "def debug():\n assert current_app.debug == False, \"Don't panic! You're here by request of debug()\"", "def __debug(msg):\n\n pass", "def pm(conn):\n #pdb.post_mortem(conn.root.getconn()._last_traceback)\n redir = redirected_stdio(conn)\n try:\n conn.modules.pdb.post_mortem(conn.root.getconn()._last_traceback)\n finally:\n redir.restore()", "def debug_option(args, run):\n run.debug = True", "def debug(self, *args: Any, **kwargs) -> None:\n ...", "def debug(msg):\n #print(msg)\n pass\n #end debug", "def debug(self):\n self._debug = True\n self.run()\n self._debug = False", "def gdb_breakpoint():\n _gdb_python_call_gen('gdb_breakpoint')()", "def _debug_trace():\n from PyQt4.QtCore import pyqtRemoveInputHook\n from pdb import set_trace\n pyqtRemoveInputHook()\n set_trace()", "def debug_on(*skip_exceptions):\n if not skip_exceptions:\n skip_exceptions = ()\n\n def decorator(f):\n global DEBUG\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n if not DEBUG:\n return f(*args, **kwargs)\n try:\n return f(*args, **kwargs)\n except Exception as e:\n for skip in skip_exceptions:\n if isinstance(e, skip):\n raise e\n print '\\n'\n for line in traceback.format_tb(sys.exc_info()[2]):\n print line\n print str(e.__class__.__name__) + ': ' + str(e) + '\\n'\n pdb.post_mortem(sys.exc_info()[2])\n raise e\n return wrapper\n\n return decorator", "def debugging_tests():\n logging.warning(\"Running debugging tests...\")\n pass", "def debug(self, message):\r\n pass", "def pdb(item, item2=None):\n import pdb # noqa\n pdb.set_trace() # noqa", "def debug():\n # written before I knew about the pdb module\n caller = currentframe().f_back\n method_name = caller.f_code.co_name\n line_no = getframeinfo(caller).lineno\n print(method_name + \": line \" + str(line_no))\n code.interact(local=dict(globals(), **caller.f_locals))", "def debug(msg):", "def debuggable(f):\n \n debugging = f.func_globals.get(\"DEBUGGING\",False)\n if debugging: return f\n\n f.func_code = __transform_codeobjects__(f.func_code,__debuggable__)\n return f", "def debug(self):\r\n debug = _DebugResult()\r\n self._wrapped_run(debug, True)\r\n self._tearDownPreviousClass(None, debug)\r\n self._handleModuleTearDown(debug)", "def report_debug(self, rc):\n pass", "def fault_debug(value: bool = False) -> None:", "def debugger(self, force=False):\r\n from IPython.utils.warn import error\r\n if not (force or self.call_pdb):\r\n return\r\n\r\n if not hasattr(sys, 'last_traceback'):\r\n error('No traceback has been produced, nothing to debug.')\r\n return\r\n\r\n from pudb import pm\r\n\r\n with self.readline_no_record:\r\n pm()", "def pdb_view(request):\n import pdb; pdb.set_trace()\n return HttpResponse(\"This works.\")", "def debug(msg):\n if(CONFIG['debug']):\n logIt(msg)" ]
[ "0.7278622", "0.69504553", "0.68875915", "0.6863419", "0.6848077", "0.6839908", "0.6770106", "0.66839147", "0.66665906", "0.66077083", "0.660491", "0.6530755", "0.6507357", "0.6416765", "0.63856596", "0.63856345", "0.6353304", "0.63263094", "0.6322992", "0.6319444", "0.63086915", "0.6280563", "0.6278247", "0.6272527", "0.6190115", "0.61791235", "0.6174057", "0.61706305", "0.61599326", "0.6112147" ]
0.8282989
0
Simple forward step with crossentropy loss.
def _cross_entropy_forward_step(batch, model): timers = get_timers() # Get the batch. timers('batch-generator', log_level=2).start() try: batch_ = next(batch) except BaseException: batch_ = batch tokens, types, labels, attention_mask = process_batch(batch_) timers('batch-generator').stop() # Forward model. output_tensor = model(tokens, attention_mask, tokentype_ids=types) return output_tensor, partial(cross_entropy_loss_func, labels)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward_train(self, preds_T: torch.Tensor) -> torch.Tensor:\n fake_label = preds_T.data.max(1)[1]\n return F.cross_entropy(preds_T, fake_label)", "def train_step(self, X_batch: np.ndarray, Y_batch: np.ndarray):\n\n # Almost the same as previous task, calculates the cross entropy loss for multiple classes using the softmax loss equation provided in the assignment.\n targets = Y_batch\n outputs = self.model.forward(X_batch)\n self.model.backward(X_batch, outputs, targets)\n \n self.model.w += -self.learning_rate*self.model.grad\n \n loss = cross_entropy_loss(targets, outputs)\n return loss", "def _forward(self):\n\n tf.summary.image(\"image\", tensor=tf.reshape(self.x, (self.batch_size, 28, 28, 1)), max_outputs=10)\n x = self.x\n\n # x = layers.dropout(self.x, keep_prob=0.7)\n # with tf.variable_scope(\"layer1\") as scope:\n h = tf.nn.relu(layers.fully_connected(x, num_outputs=self.input_size // 2, activation_fn=None))\n # tf.summary.histogram(\"moving_mean1\", tf.get_variable(scope + \"moving_mean\"))\n # with tf.variable_scope(\"layer2\") as scope:\n # h = tf.nn.relu(layers.fully_connected(h, num_outputs=32, activation_fn=None))\n # tf.summary.histogram(\"moving_mean2\", tf.get_variable(\"moving_mean\"))\n # with tf.variable_scope(\"layer3\") as scope:\n self.logits = layers.fully_connected(h, num_outputs=10, activation_fn=None)\n # tf.summary.histogram(\"moving_mean3\", tf.get_variable(\"moving_mean\"))\n\n self.probability = tf.nn.softmax(self.logits)\n self.prediction = tf.argmax(self.probability, axis=1)", "def forward_train(self, *args, **kwargs):\n pass", "def forward(self, x):\n x = self.first_deconv(x)\n x = self.first_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.second_deconv(x)\n x = self.second_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.third_deconv(x)\n x = self.third_batch_norm(x)\n x = F.leaky_relu(x)\n\n x = self.fourth_deconv(x)\n x = self.fourth_batch_norm(x)\n\n x = self.fifth_deconv(x)\n x = self.fifth_batch_norm(x)\n\n x = self.sixth_deconv(x)\n x = self.sixth_batch_norm(x)\n\n x = self.seventh_deconv(x)\n\n # sigmoid_out = nn.functional.sigmoid(x)\n tanh_out = nn.functional.tanh(x)\n\n out = (tanh_out + 1) * 255 / 2\n\n # print 'out.shape =', out.shape\n\n return out", "def train_step(input, target, model, loss_fn, optimizer, **unused):\r\n model.train()\r\n output = model(input)\r\n loss = loss_fn(output, target)\r\n optimizer.backward(loss)\r\n optimizer.step()", "def forward(self, x):\n x = self.feature_extractor(x)\n batch_size, hidden = x.size()\n\n x = self.layer_1(x)\n x = torch.relu(x)\n x = self.layer_2(x)\n x = torch.relu(x)\n x = self.layer_3(x)\n\n x = torch.log_softmax(x, dim=1)\n return x", "def forward(self, X, training=False):\n pass", "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n for l in range(len(self.layers)):\n if l == 0:\n z = self.layers[l].forward(x)\n else:\n z = self.layers[l].forward(a)\n a = self.activations[l].forward(z)\n\n # output from softmax layer\n out = a\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "def training_step(self, x):\n self.train()\n rec_error, feat, y = self.forward(x)\n # Reconstruction Loss\n rec_loss = torch.mean(rec_error)\n loss = rec_loss\n\n self.zero_grad()\n loss.backward()\n self.optimizer.step()\n self.eval()\n print('Rec Loss: {}'.format(rec_loss.cpu().data))\n print()\n return loss, feat, y", "def crossentropy_fn(args: StepFunctionArgs) -> SingleScorePerStepTensor:\n return -torch.log2(probability_fn(args))", "def forward(self, x):\r\n y = self.en_fc1(x)\r\n y = F.relu(y)\r\n y = self.en_fc2(y)\r\n y = F.relu(y)\r\n y = self.en_fc3(y)\r\n y = F.relu(y)\r\n\r\n mean = self.en_mu(y)\r\n stddev_p = self.en_log(y)\r\n \r\n n = x.shape[0]\r\n z = torch.randn(n,self.latent_dim)\r\n std = torch.exp(stddev_p/2.0)\r\n z = z.mul(std) + mean\r\n \r\n xhat = self.de_fc1(z)\r\n xhat = F.relu(xhat)\r\n xhat = self.de_fc2(xhat)\r\n xhat = F.relu(xhat)\r\n xhat = self.de_fc3(xhat)\r\n xhat = F.sigmoid(xhat)\r\n \r\n return y,mean,stddev_p,z,xhat", "def forward(self, input, target):\n target = target.squeeze_()\n return self.ratio * F.cross_entropy(input, target, weight=self.weight, ignore_index=self.ignore_index, reduction=self.reduction)", "def training_step(self, train_batch, batch_idx):\n x, y = train_batch\n logits = self.forward(x)\n loss = self.cross_entropy_loss(logits, y)\n logs = {\"train_loss\": loss}\n return {\"loss\": loss, \"log\": logs}", "def training_step(self, x):\n self.train() # Sets network to train mode\n rec_error, feat, y = self.forward(x)\n # Reconstruction Loss\n rec_loss = torch.mean(rec_error)\n loss = rec_loss\n\n self.zero_grad()\n loss.backward()\n self.optimizer.step()\n self.eval() # Sets network to evaluation mode\n print('Rec Loss: {}'.format(rec_loss.cpu().data))\n print()\n return loss, feat, y", "def train_step(x_batch, y_batch):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob,\n }\n _, step, loss = sess.run([train_op, global_step, cnn.loss], feed_dict)", "def training_step(self, input_tensor: torch.Tensor, target_tensor: torch.Tensor):\n self.forward(input_tensor)\n self.backprop(target_tensor)", "def forward(self, x):\n h = self.linear1(x)\n h = torch.nn.functional.relu(h)\n h = self.linear2(h)\n h = torch.nn.functional.relu(h)\n h = self.linear3(h)\n y_pred = torch.sigmoid(h)\n return y_pred", "def step(self, inputs=None, targets=None):\n if not self.training:\n self.train_mode()\n\n outputs, loss = self.forward(\n inputs=inputs,\n targets=targets\n )\n\n self.update(\n loss=loss,\n inputs=inputs,\n targets=targets,\n outputs=outputs\n )\n\n return outputs, loss", "def forward(self, observation: Tensor) -> Tensor:\n pass", "def train_step(x_batch, y_batch):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: opts[\"dropout_keep_prob\"]\n }\n _, step, loss, accuracy = sess.run(\n [train_op, global_step, cnn.loss, cnn.accuracy],\n feed_dict)", "def train_step(x_batch, y_batch):\n\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n _, step, summaries, loss, accuracy = sess.run(\n [train_op, global_step, train_summary_op, cnn.loss,\n cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n logger.info(\"{}: step {}, loss {:g}, acc {:g}\".format(\n time_str, step, loss, accuracy))\n train_summary_writer.add_summary(summaries, step)", "def forward(self, x):\n h = self.linear1(x)\n h = self.linear2(h)\n y_pred = torch.sigmoid(h)\n return y_pred", "def train_step(x_batch, y_batch):\n feed_dict = {\n cnn.x: x_batch,\n cnn.y_: y_batch,\n step_time_placeholder : last_step_time,\n cnn.keep_prob : FLAGS.keep_prob\n }\n _, step, summaries, loss, accuracy = sess.run(\n [train_op, global_step, train_summary_op, cnn.cross_entropy, cnn.accuracy],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n train_summary_writer.add_summary(summaries, step)\n train_summary_writer.flush()", "def forward(self, x):\n h_relu = self.linear1(x).clamp(min=0)\n y_pred = self.linear2(h_relu)\n return y_pred", "def train_step(x_batch, y_batch):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n _, step, loss, accuracy = sess.run(\n [train_op, global_step, cnn.loss, cnn.accuracy],\n feed_dict)", "def forward_tensor(self, x):\n pass", "def train_step(x_batch, y_batch, x_batch_lex):\n feed_dict = {\n cnn.input_x: x_batch,\n cnn.input_y: y_batch,\n # lexicon\n cnn.input_x_lexicon: x_batch_lex,\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\n }\n _, step, summaries, loss, accuracy, neg_r, neg_p, f1_neg, f1_pos, avg_f1 = sess.run(\n [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy,\n cnn.neg_r, cnn.neg_p, cnn.f1_neg, cnn.f1_pos, cnn.avg_f1],\n feed_dict)\n time_str = datetime.datetime.now().isoformat()\n # print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n #print(\"{}: step {}, loss {:g}, acc {:g}, neg_r {:g} neg_p {:g} f1_neg {:g}, f1_pos {:g}, f1 {:g}\".\n # format(time_str, step, loss, accuracy, neg_r, neg_p, f1_neg, f1_pos, avg_f1))\n train_summary_writer.add_summary(summaries, step)", "def forwardpass_train(self, X):\n # hidden_1\n h1_input = np.dot(X, self.W1) + self.b1\n h1_output = functions.relu(h1_input)\n # hidden_2\n h2_input = np.dot(h1_output, self.W2) + self.b2\n h2_output = functions.relu(h2_input)\n # output\n o_input = np.dot(h2_output, self.W3) + self.b3\n final_output = functions.softmax(o_input)\n return h1_input, h1_output, h2_input, h2_output, final_output", "def train_step(x_batch, y_batch):\r\n feed_dict = {\r\n cnn.input_x: x_batch,\r\n cnn.input_y: y_batch,\r\n cnn.dropout_keep_prob: FLAGS.dropout_keep_prob\r\n }\r\n\r\n _, step, summaries, loss, accuracy, predictions,y_actual = sess.run(\r\n [train_op, global_step, train_summary_op, cnn.loss, cnn.accuracy, cnn.predictions,cnn.y],\r\n feed_dict)\r\n\r\n time_str = datetime.datetime.now().isoformat()\r\n # print(\"train_f1_score:\", f1_score(y_actual, predictions, average=None))\r\n # print (predictions)\r\n # print(y_actual)\r\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\r\n return accuracy\r\n\r\n train_summary_writer.add_summary(summaries, step)" ]
[ "0.7079546", "0.7007037", "0.6882113", "0.67776626", "0.6773821", "0.6759933", "0.6734602", "0.67251396", "0.66867614", "0.66795677", "0.6665437", "0.66561353", "0.6633398", "0.66322726", "0.6624705", "0.6619393", "0.6619258", "0.6606617", "0.65844935", "0.65608215", "0.6553423", "0.65445954", "0.65419996", "0.6540261", "0.653904", "0.6534821", "0.65156615", "0.6515253", "0.65108985", "0.6510812" ]
0.7801202
0
Build a looped dataloader with infinite size.
def _build_infinite_size_dataloader(dataloader): iterator = dataloader.__iter__() while True: try: yield iterator.__next__() except StopIteration: iterator = dataloader.__iter__()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, len):\n self.data = []\n i = 0\n while i < len:\n i += 1\n self.data.append(self.Data())\n self.length = len", "def __iter__(self):\n\t\tfor i, data in enumerate(self.dataloader):\n\t\t\tif i * self.opt.batch_size >= self.opt.max_dataset_size:\n\t\t\t\tbreak\n\t\t\tyield data", "def make_data(self, limit: int):", "def one_shot_iterator(dataloader):\n while True:\n for data in dataloader:\n yield data", "def __iter__(self):\n for i, data in enumerate(self.dataloader):\n if i * self.opt.batch_size >= self.opt.max_dataset_size:\n break\n yield data", "def __init__(self, size: int = 100):\n self.data = [None] * size\n self.size = size", "def dynamic(seq: List[int]):\n return Data._create_dataset(seq, pad=False)", "def create_dataloader(data):\r\n input_ids = torch.LongTensor([sent['input_ids'] for sent in data])\r\n input_mask = torch.LongTensor([sent['input_mask'] for sent in data])\r\n segment_ids = torch.LongTensor([sent['segment_ids'] for sent in data])\r\n label_ids = torch.LongTensor([sent['label_ids'] for sent in data])\r\n\r\n dataset = TensorDataset(input_ids, input_mask, segment_ids, label_ids)\r\n\r\n train_sampler = RandomSampler(dataset)\r\n dataloader = DataLoader(dataset, sampler=train_sampler, batch_size=BATCH_SIZE)\r\n\r\n return dataloader", "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def data_generator(delta=1, batch_size=32):\n while True:\n yield generate_samples(delta=delta, n=batch_size)", "def training_start(self, dataloader):\n self.datasize = len(dataloader)", "def data_loader(\n self, batch_size: int = 1, iter_steps: int = 0, batch_as_list: bool = True\n ) -> DataLoader:\n data = self.data\n datasets = []\n\n for _, dat in data.items():\n datasets.append(dat.dataset())\n\n if len(datasets) < 1:\n raise FileNotFoundError(\n \"no datasets available for this model to create a loader from\"\n )\n\n return DataLoader(\n *datasets,\n batch_size=batch_size,\n iter_steps=iter_steps,\n batch_as_list=batch_as_list,\n )", "def open_dataset(dataset_path, batch_size, img_shape, infinite=True):\n dataset = generate_paths()\n\n dataset_gen = dataset_generator(\n dataset,\n batch_size=batch_size, infinite=infinite,\n img_shape=img_shape\n )\n steps = len(dataset) // batch_size\n return dataset_gen, steps", "def train_dataloader(self) -> DataLoader:\n return DataLoader(\n self.train_dataset,\n batch_size=self.hparams.batch_size,\n shuffle=True,\n num_workers=multiprocessing.cpu_count(),\n )", "def build_dataloader(cfg, augmentor=None, mode='train', dataset=None, rank=None,\n dataset_class=VolumeDataset, dataset_options={}, cf=collate_fn_train):\n assert mode in ['train', 'val', 'test']\n print('Mode: ', mode)\n\n if mode == 'train':\n batch_size = cfg.SOLVER.SAMPLES_PER_BATCH\n elif mode == 'val':\n batch_size = cfg.SOLVER.SAMPLES_PER_BATCH * 4\n else:\n cf = collate_fn_test # update the collate function\n batch_size = cfg.INFERENCE.SAMPLES_PER_BATCH * cfg.SYSTEM.NUM_GPUS\n\n if dataset is None: # no pre-defined dataset instance\n if cfg.MODEL.TARGET_OPT_MULTISEG_SPLIT is not None:\n dataset_class = VolumeDatasetMultiSeg\n dataset = get_dataset(cfg, augmentor, mode, rank, dataset_class, dataset_options)\n\n sampler = None\n num_workers = cfg.SYSTEM.NUM_CPUS\n if cfg.SYSTEM.DISTRIBUTED:\n num_workers = cfg.SYSTEM.NUM_CPUS // cfg.SYSTEM.NUM_GPUS\n if cfg.DATASET.DISTRIBUTED == False:\n sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n\n # In PyTorch, each worker will create a copy of the Dataset, so if the data\n # is preload the data, the memory usage should increase a lot.\n # https://discuss.pytorch.org/t/define-iterator-on-dataloader-is-very-slow/52238/2\n img_loader = torch.utils.data.DataLoader(\n dataset, batch_size=batch_size, shuffle=False, collate_fn=cf,\n sampler=sampler, num_workers=num_workers, pin_memory=True)\n\n return img_loader", "def RandomDataloader(num_batches,\n batch_size,\n seq_width,\n min_len,\n max_len):\n for batch_num in range(num_batches):\n\n # All batches have the same sequence length\n seq_len = random.randint(min_len, max_len)\n seq = np.random.binomial(1, 0.5, (seq_len, batch_size, seq_width))\n seq = torch.from_numpy(seq)\n\n # The input includes an additional channel used for the delimiter\n inp = torch.zeros(seq_len + 1, batch_size, seq_width + 1)\n inp[:seq_len, :, :seq_width] = seq\n inp[seq_len, :, seq_width] = 1.0 # delimiter in our control channel\n outp = seq.clone()\n\n yield inp.float(), outp.float()", "def generate_dataset():\n num_list = 10\n return [generate_list() for _ in range(num_list)]", "def some_payloaded_data(length=1000000, size=32, var=0):\n for datum in some_simple_data(length):\n yield DataWithPayload(datum, some_payload(size, var))", "def _buffered_func(dataset, size):\n\n class _EndSignal(object):\n pass\n\n end = _EndSignal()\n\n def _read_worker(r, q):\n for d in r:\n q.put(d)\n q.put(end)\n\n def _data_reader():\n r = dataset()\n q = multiprocessing.Queue(maxsize=size)\n t = multiprocessing.Process(\n target=_read_worker, args=(\n r,\n q, ))\n t.daemon = True\n t.start()\n e = q.get()\n while e != end:\n yield e\n e = q.get()\n\n return _data_reader", "def load(self, handler, name, size, \n batch_size=None, shuffle=False, \n sample_transform=None, batch_transform=None):\n if sample_transform is None:\n sample_transform = self.sample_transform\n if batch_transform is None:\n batch_transform = self.batch_transform\n dataset = DatasetIterator(name, size, handler, \n shuffle=shuffle,\n transform=sample_transform)\n if batch_size is None:\n return dataset\n batches = BatchIterator(dataset, \n batch_size=batch_size, \n transform=batch_transform)\n return batches", "def _get_dataloader(samples, batch_size):\n print(\"Cogiendo dataloader\")\n return DataLoader(samples, shuffle=True, batch_size=batch_size)", "def build(self, block_size):", "def dynamic_batch(data, max_frames_in_batch=12000):\n buf = []\n longest_frames = 0\n for sample in data:\n assert \"feat\" in sample\n assert isinstance(sample[\"feat\"], torch.Tensor)\n new_sample_frames = sample[\"feat\"].size(0)\n longest_frames = max(longest_frames, new_sample_frames)\n frames_after_padding = longest_frames * (len(buf) + 1)\n if frames_after_padding > max_frames_in_batch:\n yield buf\n buf = [sample]\n longest_frames = new_sample_frames\n else:\n buf.append(sample)\n if len(buf) > 0:\n yield buf", "def __init__(self, size = 0):\n self.data = []\n self.size = size", "def train_dynamic(batch_size=10):\n \n return", "def static_batch(data, batch_size=16):\n buf = []\n for sample in data:\n buf.append(sample)\n if len(buf) >= batch_size:\n yield buf\n buf = []\n if len(buf) > 0:\n yield buf", "def batch(self, data, size):\n\n return [data[x : x + size] for x in range(0, len(data), size)]", "def init_batch(self, src):\n batch, bos = src.size(1), self.src_dict.get_bos()\n return src.data.new(1, batch).fill_(bos)", "def train_dataloader(self, batch_size: Optional[int] = None) -> DataLoader:\n dataset = TabularDataset(\n task=self.config.task,\n data=self.train,\n categorical_cols=self.config.categorical_cols,\n continuous_cols=self.config.continuous_cols,\n embed_categorical=(not self.do_leave_one_out_encoder()),\n target=self.target,\n )\n return DataLoader(\n dataset,\n batch_size if batch_size is not None else self.batch_size,\n shuffle=True if self.train_sampler is None else False,\n num_workers=self.config.num_workers,\n sampler=self.train_sampler,\n pin_memory=self.config.pin_memory,\n )", "def get_data_loader(batch_size=10, num_workers=2):\n \n data_loader = torch.utils.data.DataLoader(dataset=TempuckeyDataSet(),\n batch_size=batch_size,\n shuffle=True,\n pin_memory=True,\n num_workers=num_workers,\n collate_fn=collate)\n return data_loader" ]
[ "0.62806386", "0.62140703", "0.62047887", "0.6165703", "0.6139719", "0.5913532", "0.5898376", "0.58914024", "0.5870807", "0.58379287", "0.5795319", "0.5757869", "0.5757244", "0.57561076", "0.57423776", "0.5669886", "0.5666476", "0.5624543", "0.5618841", "0.5613911", "0.5603851", "0.55625093", "0.55617183", "0.5551853", "0.55263984", "0.5503303", "0.5502659", "0.54529005", "0.5445508", "0.5438684" ]
0.83487415
0
Construct solver from Caffe solver prototxt file.
def from_caffe_solver_protoxt(cls, caffe_solver_prototxt_file: Path): solver_param = caffe_pb2.SolverParameter() with open(caffe_solver_prototxt_file, 'rt') as f: pb2.text_format.Merge(f.read(), solver_param) dictionary = {'lr_policy': solver_param.lr_policy, 'base_lr': solver_param.base_lr, 'gamma': solver_param.gamma, 'momentum': solver_param.momentum, 'max_iter': solver_param.max_iter, 'stepsize': solver_param.stepsize, 'stepvalues': solver_param.stepvalue, 'weight_decay': solver_param.weight_decay, 'iter_size': solver_param.iter_size, 'from_prototxt': caffe_solver_prototxt_file} return cls(**dictionary)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_solver(self):\n # Create a temporary solver file.\n fname = '__solver__.prototxt'\n f = open(fname, 'w')\n f.write(self.to_proto())\n f.close()\n # Get solver from file.\n solver = caffe.get_solver_from_file(fname)\n # Remove the temporary solver file and return solver.\n os.remove(fname)\n return solver", "def from_CMTSOLUTION_file(self, filename):\n with open(filename, \"rt\") as f:\n f.readline()\n f.readline()\n time_shift = float(f.readline().strip().split()[-1])\n f.readline()\n latitude = float(f.readline().strip().split()[-1])\n longitude = float(f.readline().strip().split()[-1])\n depth_in_m = float(f.readline().strip().split()[-1]) * 1e3\n\n m_rr = float(f.readline().strip().split()[-1]) / 1e7\n m_tt = float(f.readline().strip().split()[-1]) / 1e7\n m_pp = float(f.readline().strip().split()[-1]) / 1e7\n m_rt = float(f.readline().strip().split()[-1]) / 1e7\n m_rp = float(f.readline().strip().split()[-1]) / 1e7\n m_tp = float(f.readline().strip().split()[-1]) / 1e7\n\n return self(latitude, longitude, depth_in_m, m_rr, m_tt, m_pp, m_rt,\n m_rp, m_tp, time_shift)", "def from_file(csp_file_name):\n\n with open(csp_file_name, 'r') as problem_file:\n file_lines = problem_file.readlines()\n variables = {}\n constraints = Constraints()\n largest_value = 0\n # Make a list of variable names.\n for line in file_lines:\n words = line.split()\n next_variable = CSP.__get_variable_from_dictionary(variables, words[0])\n next_relation = Relation.as_function(words[1])\n next_value = words[2]\n if next_value.isdigit():\n next_value = int(next_value)\n if next_value > largest_value:\n largest_value = next_value\n constraints.add_unary_constraint(next_variable, next_relation, next_value)\n else:\n next_value = CSP.__get_variable_from_dictionary(variables, next_value)\n constraints.add_binary_constraint(next_variable, next_relation, next_value)\n # Find d and v.\n d = len(variables)\n v = largest_value\n # Set domains.\n for var in variables.values():\n var.domain = set(xrange(max(d, (v - 1))))\n new_csp = CSP(variables.values(), constraints)\n return new_csp", "def from_file(cls, fn):\n dct = store.get_dict(fn, 'trainalgorithm')\n return cls.from_dict(dct)", "def solve(ctx):\n my_solver(ctx.obj['filename'])", "def load(file_path):\n with open(file_path, \"rb\") as file:\n data = pickle.load(file)\n ocp = OptimalControlProgram(**data[\"ocp_initilializer\"])\n for key in data[\"versions\"].keys():\n if data[\"versions\"][key] != ocp.version[key]:\n raise RuntimeError(\n f\"Version of {key} from file ({data['versions'][key]}) is not the same as the \"\n f\"installed version ({ocp.version[key]})\"\n )\n out = [ocp, data[\"sol\"]]\n if \"sol_iterations\" in data.keys():\n out.append(data[\"sol_iterations\"])\n return out", "def write_solver(workdir, lr='0.0001', lrp='\"fixed\"'): \n solver = bct.CaffeSolver() \n solver.sp['base_lr'] = lr\n solver.sp['test_interval'] = '60000' \n solver.sp['lr_policy'] = lrp\n solver.write(osp.join(workdir, 'solver.prototxt'))", "def from_file(cls, configID, configDir):\n if configDir not in sys.path:\n sys.path.insert(0, configDir)\n configObj = __import__(configID)\n try:\n if configObj.config[\"solver\"] in cvx.installed_solvers():\n return cls(configID, configObj.config)\n else:\n return None\n except: # pragma: no cover\n warn(\"Could not import configuration: \" + configID)\n return None", "def from_file(cls, filename: str) -> \"NDOptimiser\":\n from autode.opt.coordinates.cartesian import CartesianCoordinates\n\n lines = open(filename, \"r\").readlines()\n n_atoms = int(lines[0].split()[0])\n\n title_line = NumericStringDict(lines[1])\n optimiser = cls(\n maxiter=int(title_line[\"maxiter\"]),\n gtol=GradientRMS(title_line[\"gtol\"]),\n etol=PotentialEnergy(title_line[\"etol\"]),\n )\n\n for i in range(0, len(lines), n_atoms + 2):\n raw_coordinates = np.zeros(shape=(n_atoms, 3))\n gradient = np.zeros(shape=(n_atoms, 3))\n\n for j, line in enumerate(lines[i + 2 : i + n_atoms + 2]):\n _, x, y, z, dedx, dedy, dedz = line.split()\n raw_coordinates[j, :] = [float(x), float(y), float(z)]\n gradient[j, :] = [float(dedx), float(dedy), float(dedz)]\n\n coords = CartesianCoordinates(raw_coordinates)\n coords.e = NumericStringDict(lines[i + 1])[\"E\"]\n coords.g = gradient.flatten()\n\n optimiser._history.append(coords)\n\n return optimiser", "def __init__(self, filename, num_particles, max_iteration, maxFlip, maxTabuSize, w, c1, c2):\n #Read cnf formula from file\n self.clauses, self.num_literals, self.num_clauses = self.w_clauses_from_file(filename)\n\n #Parameters of PSO\n self.num_particles = num_particles\n self.max_iteration = max_iteration\n self.w = w\n self.c1 = c1\n self.c2 = c2\n self.max_flip = maxFlip\n\n #Tabu list parameters\n self.tabuList = []\n self.maxTabuSize = maxTabuSize\n\n #Initialize particles\n self.swarm = self.init_particles(self.num_particles, self.num_literals)\n\n #Initialize global best and it's fitness\n self.global_best = self.swarm[0].position\n self.global_best_fitness = self.fitness(self.global_best)", "def __init__(self, filename):\n self.from_file(filename)\n self.parse_cell()\n self.parse_atom()\n self.apply_symops()", "def from_crystfel_file(cls, filename):\n return translate.load_crystfel(cls, filename)", "def from_crystfel_file(cls, filename):\n return translate.load_crystfel(cls, filename)", "def buildModelFromFile(fname):\n directory = os.path.dirname(fname)\n\n f = open(fname, \"r\")\n in_map = yaml.safe_load(f)\n f.close()\n\n expression = \"\"\n\n return build_model_from_dict(in_map)", "def fromfile(self, path):\n\t\tdata = filetools.read_data(path)\n\t\tprint \"File read: %i lines\" % len(data)\n\t\tself.build_matrix(data)", "def buildFromCSV(self, filepath):\r\n\t\t# TODO: Implement\r\n\t\traise NotImplementedError('This function has not yet been implemented.')\r\n\t\t# with open(filepath, 'r') as scheduleFile:\r\n\t\t# \t# Reusing Parser.parseCSVs(), but not in the intended way; ok because validation is not yet implemented\r\n\t\t# \t# TODO: Split Parser.parseCSVs() into separate people/set file parsers \r\n\t\t# \tn, people, setConstraints = Parser.parseCSVs(-1, scheduleFile, [])\r", "def cl_program_from_file(context, filename):\n return cl.Program(context, open(os.path.join(CL_PATH, filename)).read())", "def __init__(self, inputfolder, gpu_id=0, model=None):\n CaffeLoader.__init__(self, inputfolder, gpu_id=gpu_id)\n logger.info('loading deploy.prototxt from %s' % inputfolder)\n if model:\n self.setModel(model)\n if self.caffemodel and self.protofile:\n self.net = caffe.Net(self.protofile, caffe.TEST)\n else:\n logger.error('Cannot find prototxt or caffemodel file')\n sys.exit(0)", "def from_file(cls, file):\n instance = cls()\n with open(file) as f:\n for line in f:\n line = line.strip()\n if len(line) > 0 and not line.startswith('#'):\n instance.parse_and_add_clause(line)\n return instance", "def from_config(param_file, coeff_file, **kwargs):\n with open(coeff_file) as f:\n coeff_lines = f.readlines()\n coeff_lines = [line for line in coeff_lines if not line.startswith(\"#\")]\n element_profile = {}\n ne, nbc = coeff_lines[0].split()\n ne, nbc = int(ne), int(nbc)\n for n in range(ne):\n specie, r, w = coeff_lines[1 + n * (nbc + 1)].split()\n r, w = float(r), float(w)\n element_profile[specie] = {\"r\": r, \"w\": w}\n\n rcut_pattern = re.compile(r\"rcutfac (.*?)\\n\", re.S)\n twojmax_pattern = re.compile(r\"twojmax (\\d*)\\n\", re.S)\n quadratic_pattern = re.compile(r\"quadraticflag (.*?)(?=\\n|$)\", re.S)\n\n with zopen(param_file, \"rt\") as f:\n param_lines = f.read()\n\n rcut = float(rcut_pattern.findall(param_lines)[-1])\n twojmax = int(twojmax_pattern.findall(param_lines)[-1])\n if quadratic_pattern.findall(param_lines):\n quadratic = bool(int(quadratic_pattern.findall(param_lines)[-1]))\n else:\n quadratic = False\n\n describer = BispectrumCoefficients(\n rcutfac=rcut, twojmax=twojmax, element_profile=element_profile, quadratic=quadratic, pot_fit=True\n )\n model = SKLModel(model=LinearRegression(), describer=describer, **kwargs)\n coef = np.array(\n np.concatenate([coeff_lines[(2 + nbc * n + n) : (2 + nbc * (n + 1) + n)] for n in range(ne)]),\n dtype=np.float64,\n )\n model.model.coef_ = coef\n model.model.intercept_ = 0\n return SNAPotential(model=model)", "def init_from_file(self, filepath, batch_settings, effects_log):\n # don't forget to update the module docstring with changes here\n input_template_name = 'cost_factors_energysecurity'\n input_template_version = 0.3\n input_template_columns = {\n 'calendar_year',\n 'dollar_basis',\n 'dollars_per_bbl',\n 'oil_import_reduction_as_percent_of_total_oil_demand_reduction',\n }\n\n df = read_input_file(filepath, effects_log)\n validate_template_version_info(df, input_template_name, input_template_version, effects_log)\n\n # read in the data portion of the input file\n df = read_input_file(filepath, effects_log, skiprows=1)\n validate_template_column_names(filepath, df, input_template_columns, effects_log)\n\n df = df.loc[df['dollar_basis'] != 0, :]\n\n df = batch_settings.ip_deflators.adjust_dollars(batch_settings, df, effects_log, 'dollars_per_bbl')\n\n self._data = df.set_index('calendar_year').to_dict(orient='index')", "def from_file(path, name=None, seq_types=None):\n ext = path.split(\".\")[-1]\n if name is None:\n name = path.split(\"/\")[-1].replace(f\".{ext}\", \"\")\n with open(path, \"r\") as f:\n netlist = f.read()\n if ext == \"v\":\n return verilog_to_circuit(netlist, name, seq_types)\n elif ext == \"bench\":\n return bench_to_circuit(netlist, name)\n else:\n raise ValueError(f\"extension {ext} not supported\")", "def from_file(cls, filename):\n constructor_args = _load_serialized_mesh(filename)\n return cls(*constructor_args)", "def readsol_CBC(self,filename, lp, vs):\n\t\tf = file(filename)\r\n##\t\tfor i in range(len(lp.constraints)): f.readline()\r\n\t\tvalues = {}\r\n\t\tfor v in vs:\r\n\t\t\tvalues[v.name] = 0.0\r\n\t\t\tpass\r\n\t\tfor line in f:\r\n\t\t\tl = line.split()\r\n\t\t\tvalues[l[1]] = float(l[2])\r\n\t\t\tpass\n##\t\tfor v in vs:\r\n##\t\t\tl = f.readline().split()\r\n##\t\t\tvalues[v.name] = float(l[1])\r\n\t\tstatus = LpStatusUndefined # No status info\n\t\treturn status, values", "def preprocess(file: TextIO, args: Optional[List[str]] = None) -> MipsProgram:\n filename = os.path.abspath(file.name)\n memory = Memory()\n\n argv = [filename]\n if args:\n argv.extend(args)\n\n linesofcode: List[SourceLine] = process_file(file)\n\n labels: Dict[str, Label] = {}\n # Collect Preprocessor Directives.\n includes, eqvs, linesofcode = preprocessor_directives(linesofcode)\n\n # Gather .data/.text sections into separate lists\n unprocessed_labels, unprocessed_code = split_to_sections(linesofcode)\n\n # First process all the .data labels so they can be replaced in .text\n data_labels(labels, unprocessed_labels, memory)\n # Second gather the code labels,\n # this also replaces all labels in code with the correct value\n processed_code = code_labels(labels, unprocessed_code)\n\n # Cannot run a program without a main\n if not (\"main\" in labels and labels[\"main\"].location == mipsRE.TEXT_SEC):\n raise MipsException(f\"Cannot locate main label in {filename}\")\n\n registers = Registers()\n load_args(registers, memory, argv)\n\n registers[\"pc\"] = labels[\"main\"].value\n registers[\"$sp\"] = registers[\"$fp\"] = registers[\"$gp\"] = memory.ram[\"stack\"][\"stops\"]\n\n memory.extend_stack(bytes([ord(\"@\")] * Memory.PAGE_SIZE))\n\n return MipsProgram(name=filename, filenames=[filename, *includes], labels=labels, memory=memory, source=processed_code, registers=registers, eqvs=eqvs,)", "def buildSolverModel(self, lp):\n self._extract(lp)\n try:\n # Apply controls, warmstart etc. We do this here rather than in\n # callSolver() so that the caller has a chance to overwrite things\n # either using the `prepare` argument to callSolver() or by\n # explicitly calling\n # self.buildSolverModel()\n # self.callSolver()\n # self.findSolutionValues()\n # This also avoids setting warmstart information passed to the\n # constructor from actualResolve(), which would almost certainly\n # be unintended.\n model = lp.solverModel\n # Apply controls that were passed to the constructor\n for key, name in [\n (\"gapRel\", \"MIPRELSTOP\"),\n (\"timeLimit\", \"MAXTIME\"),\n (\"heurFreq\", \"HEURFREQ\"),\n (\"heurStra\", \"HEURSTRATEGY\"),\n (\"coverCuts\", \"COVERCUTS\"),\n (\"preSolve\", \"PRESOLVE\"),\n ]:\n value = self.optionsDict.get(key, None)\n if value is not None:\n model.setControl(name, value)\n\n # Apply any other controls. These overwrite controls that were\n # passed explicitly into the constructor.\n for option in self.options:\n if isinstance(option, tuple):\n name = optione[0]\n value = option[1]\n else:\n fields = option.split(\"=\", 1)\n if len(fields) != 2:\n raise PulpSolverError(\"Invalid option \" + str(option))\n name = fields[0].strip()\n value = fields[1].strip()\n try:\n model.setControl(name, int(value))\n continue\n except ValueError:\n pass\n try:\n model.setControl(name, float(value))\n continue\n except ValueError:\n pass\n model.setControl(name, value)\n # Setup warmstart information\n if self.optionsDict.get(\"warmStart\", False):\n solval = list()\n colind = list()\n for v in sorted(lp.variables(), key=lambda x: x._xprs[0]):\n if v.value() is not None:\n solval.append(v.value())\n colind.append(v._xprs[0])\n if _ismip(lp) and self.mip:\n # If we have a value for every variable then use\n # loadmipsol(), which requires a dense solution. Otherwise\n # use addmipsol() which allows sparse vectors.\n if len(solval) == model.attributes.cols:\n model.loadmipsol(solval)\n else:\n model.addmipsol(solval, colind, \"warmstart\")\n else:\n model.loadlpsol(solval, None, None, None)\n # Setup message callback if output is requested\n if self.msg:\n\n def message(prob, data, msg, msgtype):\n if msgtype > 0:\n print(msg)\n\n model.addcbmessage(message)\n except (xpress.ModelError, xpress.InterfaceError, xpress.SolverError) as err:\n raise PulpSolverError(str(err))", "def __init__(self, opts: dict, solver_opts: dict):\n self.name = opts.get(\"name\", \"Undefined\") # Name of the problem\n self.gp = opts.get(\"grid_points\") # Number of grid points\n self.nadir_p = opts.get(\"nadir_points\") # Nadir points\n self.eps = opts.get(\"penalty_weight\", 1e-3) # Penalty weight\n self.round = opts.get(\"round_decimals\", 9) # Decimal places to round to\n self.nadir_r = opts.get(\"nadir_ratio\", 1) # Nadir ratio\n self.logdir = opts.get(\"logging_folder\", \"logs\") # Folder to save logs\n self.early_exit = opts.get(\"early_exit\", True) # Whether to enable early exit\n self.bypass = opts.get(\"bypass_coefficient\", True) # Whether to enable bypass coefficient\n self.flag = opts.get(\"flag_array\", True) # Whether to use flag array\n self.cpu_count = opts.get(\"cpu_count\", cpu_count()) # Number of CPUs to use\n self.redivide_work = opts.get(\"redivide_work\", True) # Whether to redivide work\n self.model_fn = opts.get(\"pickle_file\", \"model.p\") # Pickle file name\n self.shared_flag = opts.get(\"shared_flag\", True) # Whether to use shared flag array\n self.output_excel = opts.get(\"output_excel\", True) # Whether to output to Excel\n self.process_logging = opts.get(\"process_logging\", False) # Whether to enable process logging\n self.process_timeout = opts.get(\"process_timeout\", None) # Timeout for processes\n self.solver_name = opts.get(\"solver_name\", \"gurobi\") # Name of solver\n self.solver_io = opts.get(\"solver_io\", \"python\") # IO mode of solver\n\n self.solver_opts = solver_opts # Solver options\n self.solver_opts[\"MIPGap\"] = solver_opts.get(\"MIPGap\", 0.0) # MIP gap\n self.solver_opts[\"NonConvex\"] = solver_opts.get(\"NonConvex\", 2) # Nonconvex setting\n\n # Remove None values from dict when user has overriden them\n for key, value in dict(self.solver_opts).items():\n if value is None or value:\n del self.solver_opts[key]\n\n self.time_created = time.strftime(\"%Y%m%d-%H%M%S\") # Time the options object was created\n self.log_name = self.name + \"_\" + str(self.time_created) # Name of log file", "def fromXmlFile(filename, plant, orderList, simulator, evaluator):\n\t\tfile = open(filename, \"r\")\n\t\tdoc = minidom.parse(file)\n\t\toptimizer = Optimizer.fromXml(doc, plant, orderList, simulator, evaluator)\n\t\tfile.close()\n\t\treturn optimizer", "def main():\n\n clues_file = \"data/part1-clues.txt\"\n parsed_clues_file = \"data/part1-parsedclues.txt\"\n cp = ClueParser()\n\n clues = loadList(clues_file)\n gold_parsed_clues = loadList(parsed_clues_file)\n assert(len(clues) == len(gold_parsed_clues))\n\n cp.train(clues, gold_parsed_clues)\n parsed_clues = cp.parseClues(clues)\n cp.evaluate(parsed_clues, gold_parsed_clues)", "def read(cls, file_name=None, lexclude=[], lonly=[], verbose=False):\n###################################################################\n\n # import\n import numpy as np\n \n # init\n \n vf = Velocity_Field()\n\n # fake 4-letters code generation using hexadecimal\n def __gen_fake_code__(n):\n \n FAKE = []\n for i in np.arange(n):\n fake_code = (\"%4s\" % hex(i).split('x')[-1].replace('L', '')).replace(' ', '0')\n FAKE.append(fake_code.upper())\n \n return(np.array(FAKE))\n\n # reads psvelo file\n\n if verbose:\n print(\"-- Reading GMT psvelo file: %s \" % file_name)\n \n try:\n np_vel = np.array(np.mat(np.genfromtxt(file_name, comments='#')))\n except:\n raise IOError(\"!!! Could not read file: %s\" % file_name)\n \n # empty psvelo file\n if np_vel.size == 0:\n return( vf )\n \n if (np_vel.shape[1] == 8):\n if verbose:\n print(\"-- file %s has 8 columns\" % file_name)\n\n np_vel = np.delete(np_vel, -1, axis=1)\n np_code = np.array(np.mat(np.genfromtxt(file_name, comments='#', usecols=(7), dtype=str))).flatten()\n \n elif (np_vel.shape[1] == 3):\n\n if verbose:\n print(\"-- file %s has 3 columns\" % file_name)\n\n np_vel = np.delete(np_vel, -1, axis=1)\n np_code = np.array(np.mat(np.genfromtxt(file_name, comments='#', usecols=(2)))).flatten()\n\n elif (np_vel.shape[1] not in [3, 8]):\n np_code = __gen_fake_code__(np_vel.shape[0])\n else:\n raise IOError(\"!!! Could not decipher file content: %s\", file_name)\n\n # populates velocity field\n \n from pyacs.lib.gmtpoint import GMT_Point\n\n lgmt_points = []\n\n for i in np.arange(np_vel.shape[0]):\n\n code = np_code[i]\n \n if np_vel.shape[1] >= 7:\n lon, lat, Ve, Vn, SVe, SVn, SVen = np_vel[i, :]\n M = GMT_Point(lon=lon, lat=lat, Ve=Ve, Vn=Vn, SVe=SVe, SVn=SVn, SVen=SVen, code=code)\n else:\n lon, lat = np_vel[i, :]\n M = GMT_Point(lon=lon, lat=lat, code=code)\n\n if verbose:\n M.get_info(display=True)\n \n # tests whether site will be added\n \n if lonly != []:\n if M.code in lonly:\n lgmt_points.append(M)\n \n else:\n if lexclude != []:\n if M.code not in lexclude:\n lgmt_points.append(M)\n else:\n lgmt_points.append(M)\n \n vf.file_name = file_name\n vf.sites = lgmt_points\n \n return vf" ]
[ "0.6062727", "0.5984447", "0.5803342", "0.57631385", "0.55716205", "0.5436299", "0.54261035", "0.53917223", "0.532022", "0.52781713", "0.52740806", "0.52685714", "0.52685714", "0.5249456", "0.5244701", "0.52160436", "0.51900196", "0.5182835", "0.5182635", "0.5168049", "0.5160513", "0.51493496", "0.514495", "0.5131988", "0.51242775", "0.5122958", "0.51137614", "0.51133955", "0.5113283", "0.5103981" ]
0.78207666
0
Refreshes the Job's details by querying the workspace.
def refresh(self): self.details = self.workspace.get_job(self.id).details
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh(self): # noqa\n data = self.connection.hgetall(self.key)\n if not data:\n raise NoSuchJobError('No such job: {0}'.format(self.key))\n self.restore(data)", "def reload(self):\n self.job_proto = self.serving_stub.GetJob(GetJobRequest(job=self.job_proto)).job", "def refresh_details(self) -> None:\n data = request(\n 'get',\n f'/api/v0/projects/{self.id}/',\n ).json()\n self.data.update(data)", "def refresh(self):\n\t\tif self.id is None:\n\t\t\tprint(\"({cls}): self.id is None, can't refresh\".format(cls=self.__class__.__name__))\n\t\t\treturn\n\n\t\tif self.needs_project_id and self.project_id is None:\n\t\t\tprint(\"({cls}): self.project_id is None, can't refresh\".format(cls=self.__class__.__name__))\n\t\t\treturn\n\n\t\tif self.needs_project_id:\n\t\t\targs = [self.project_id, self.id]\n\t\telse:\n\t\t\targs = [self.id]\n\n\t\tres = getattr(self._client, \"get_\" + self.method)(*args, raw=True)\n\t\tself._create_fields(res)", "def refresh_jobs(self):\n\n jobs = self.backend.get_jobs()\n\n if not isinstance(jobs, list):\n warning(self.iface, \"Error loading Jobs from the backend (Response status code not 200)\")\n jobs = []\n\n if not self.jobs_changed(jobs):\n return\n\n self.init_jobs()\n self.jobsTableWidget.setSortingEnabled(False)\n self.jobsTableWidget.setRowCount(len(jobs))\n row = 0\n self.jobs_table = {}\n for job in jobs:\n\n if job.updated:\n str_date = job.updated.strftime(\"%Y-%m-%d_%H-%M-%S\")\n qitem = QTableWidgetItem(str_date)\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 1, qitem)\n elif job.created:\n str_date = job.created.strftime(\"%Y-%m-%d_%H-%M-%S\")\n qitem = QTableWidgetItem(str_date)\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 1, qitem)\n\n if not job.title:\n qitem = QTableWidgetItem(\"Untitled Job!\")\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 0, qitem)\n else:\n qitem = QTableWidgetItem(job.title)\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 0, qitem)\n\n exec_btn = QPushButton(self.jobsTableWidget)\n exec_btn.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/execute_icon.svg')))\n\n if job.status:\n qitem = QTableWidgetItem(job.status)\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 2, qitem)\n\n if job.status == \"finished\":\n self.jobsTableWidget.item(row, 2).setBackground(QColor(75, 254, 40, 160))\n disp_btn = QPushButton(self.jobsTableWidget)\n disp_btn.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/display_icon.svg')))\n disp_btn.setIconSize(QSize(29, 29))\n self.jobsTableWidget.setCellWidget(row, 4, disp_btn)\n disp_btn.clicked.connect(lambda *args, job_id=job.id: self.job_display(job_id))\n disp_btn = QPushButton(self.jobsTableWidget)\n disp_btn.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/download.png')))\n disp_btn.setIconSize(QSize(29, 29))\n disp_btn.clicked.connect(lambda *args, job_id=job.id: self.job_download(job_id))\n self.jobsTableWidget.setCellWidget(row, 5, disp_btn)\n iface.actionZoomIn().trigger()\n elif job.status == \"running\":\n self.jobsTableWidget.item(row, 2).setBackground(QColor(254, 178, 76, 200))\n exec_btn.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/stop-button.png')))\n elif job.status == \"canceled\":\n self.jobsTableWidget.item(row, 2).setBackground(QColor(254, 178, 76, 200))\n elif job.status == \"error\":\n self.jobsTableWidget.item(row, 2).setBackground(QColor(254, 100, 100, 200))\n\n exec_btn.setIconSize(QSize(21, 21))\n self.jobsTableWidget.setCellWidget(row, 3, exec_btn)\n\n if job.status == \"running\":\n exec_btn.clicked.connect(lambda *args, job_id=job.id: self.job_stop(job_id))\n else:\n exec_btn.clicked.connect(lambda *args, job_id=job.id: self.job_execute(job_id))\n\n info_btn2 = QPushButton(self.jobsTableWidget)\n info_btn2.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/edit_icon.png')))\n info_btn2.setIconSize(QSize(25, 25))\n self.jobsTableWidget.setCellWidget(row, 6, info_btn2)\n info_btn2.clicked.connect(lambda *args, job_id=job.id: self.adapt_job(job_id))\n\n info_btn3 = QPushButton(self.jobsTableWidget)\n info_btn3.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/info_icon.png')))\n info_btn3.setIconSize(QSize(25, 25))\n self.jobsTableWidget.setCellWidget(row, 7, info_btn3)\n info_btn3.clicked.connect(lambda *args, job_id=job.id: self.job_info(job_id))\n\n info_btn4 = QPushButton(self.jobsTableWidget)\n info_btn4.setIcon(QIcon(os.path.join(os.path.dirname(__file__), 'images/deleteFinalBtn.png')))\n info_btn4.setIconSize(QSize(25, 25))\n self.jobsTableWidget.setCellWidget(row, 8, info_btn4)\n info_btn4.clicked.connect(lambda *args, job_id=job.id: self.delete_job_final(job_id))\n\n self.refreshButton.setEnabled(True)\n self.refreshButton_service.setEnabled(True)\n\n self.jobs_table[row] = job\n\n row += 1\n\n self.jobsTableWidget.setSortingEnabled(True)", "def on_job_update(_job):\n nonlocal job\n job = _job", "def on_job_update(_job):\n nonlocal job\n job = _job", "def on_job_update(_job):\n nonlocal job\n job = _job", "def on_job_update(_job):\n nonlocal job\n job = _job", "def update(self) -> None:\n self.previous_status = self.status\n\n jobs = self._client.describe_jobs(jobs = [ self.id ])[\"jobs\"]\n\n try:\n self.state = jobs[0]\n except IndexError:\n raise ValueError(\"Invalid or unknown job id %s\" % self.id) from None", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n aq_data.add_aq_to_db()\n DB.session.commit()\n return 'Data refreshed!'", "def resubmit(self):\n self.keep_data = True\n ManagedJob.submit(self)", "def reload_job(self):\n if self.ui['main_window'].widgets['live_preview'].get_active():\n self._update_preview()", "def refresh_jobs(self):\n jobs = self.connection.user_jobs()\n\n self.init_jobs()\n self.jobsTableWidget.setRowCount(len(jobs))\n row = 0\n for val in jobs:\n\n if \"id\" in val:\n qitem = QTableWidgetItem(val[\"id\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 0, qitem)\n\n if \"error\" in val:\n if val[\"error\"]:\n if \"message\" in val[\"error\"]:\n qitem = QTableWidgetItem(val[\"error\"][\"message\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 1, qitem)\n elif \"description\" in val:\n qitem = QTableWidgetItem(val[\"description\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 1, qitem)\n\n if \"submitted\" in val:\n qitem = QTableWidgetItem(val[\"submitted\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 2, qitem)\n\n execBtn = QPushButton(self.jobsTableWidget)\n execBtn.setText('Execute')\n\n if \"status\" in val:\n qitem = QTableWidgetItem(val[\"status\"])\n qitem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.jobsTableWidget.setItem(row, 3, qitem)\n\n if val[\"status\"] == \"finished\":\n dispBtn = QPushButton(self.jobsTableWidget)\n dispBtn.setText('Display')\n self.jobsTableWidget.setCellWidget(row, 5, dispBtn)\n dispBtn.clicked.connect(lambda *args, row=row: self.job_display(row))\n\n self.jobsTableWidget.setCellWidget(row, 4, execBtn)\n execBtn.clicked.connect(lambda *args, row=row: self.job_execute(row))\n\n row += 1", "def fetchJob(self):\n \n mpDlg = MultipleValDialog(title='Get Job',\n initialvalues=('','my job1'),\n labels=('ID','Your label',),\n types=('string','string'),\n parent=self.mainwin)\n if mpDlg.result == True:\n jobid = mpDlg.results[0]\n name = mpDlg.results[1]\n else:\n return\n job = PEATSA.WebApp.Data.Job(jobid, self.connection) \n if job != None: \n print 'adding job id %s to list' %job.identification\n self.storeJob(name, job)\n self.updateJobs()\n return", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def refresh_history(self):\n\n self.old_jobs = self.secretary_bot.history_bullshit_filter(self.old_jobs)\n self.jobs_save(self.old_jobs, 'overwrite')", "async def refresh(ctx):\n await update_tournament_list()\n res = await refresh_algorithm()\n if res == True:\n await ctx.send(\"Successfully refreshed data from sheet.\")\n else:\n await ctx.send(\":warning: Unsuccessfully refreshed data from sheet.\")", "def update(self):\n self._log.debug(\"About to update job {0}\".format(self.id))\n resp = self._api.get_job(self.id)\n\n if resp.success:\n self.submission = self._format_submission(resp.result)\n return True\n\n else:\n raise resp.result", "def refresh(self):\n connection = self._connection\n with self._refresh_lock:\n self._aiexperiment = connection.aiexperiments(self.id).fetch()", "def execute_queries():\n fetch_job_listings(engine)\n update_job_listing(engine)", "def refresh(self):\n self.dto = self.res.get()\n log.debug(f\"Refreshed {self.url}\")", "def _refresh(self):\n url = self.urlobject_single.format(self._cb.credentials.org_key, self._model_unique_id)\n resp = self._cb.get_object(url)\n self._info = resp\n self._last_refresh_time = time.time()\n return True", "def refresh(self):\n # exists state\n self.shoprefobj = self.sc.get_shopref_obj({'Alias': self.Alias})\n self.exists = self.sc.exists(self.shoprefobj)\n\n if not self.exists:\n raise ShopDisappearedError(\"Could not find the shop anymore!\")\n\n # data from the server\n self.infoshopobj = self.sc.get_infoshop_obj({'Alias': self.Alias})\n self.shopinfo = self.sc.get_info(self.infoshopobj)\n\n self._from_dict(self.shopinfo)", "def RefreshReport(self):\r\n report = self.data.getRefreshReport()\r\n if report: showInfo(self,report,self.data.title)", "def refresh(self):\n pass", "def refresh(self):\n pass", "def refresh(self):\n\n self._refreshed_on = time.time() * 1000" ]
[ "0.7204651", "0.6387912", "0.5996785", "0.5930605", "0.58091706", "0.5802573", "0.5802573", "0.5802573", "0.5802573", "0.5802385", "0.57152075", "0.5710763", "0.5668991", "0.5663364", "0.56334144", "0.5627976", "0.5627976", "0.5627976", "0.56269634", "0.56071436", "0.56027734", "0.55932873", "0.5577526", "0.5574648", "0.5562469", "0.55377233", "0.553554", "0.5530145", "0.5530145", "0.55272985" ]
0.80415493
0
Create a unique id for a new job.
def create_job_id() -> str: return str(uuid.uuid1())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_job_id():\n # CAIP job id can contains only numbers, letters and underscores.\n unique_tag = str(uuid.uuid4()).replace(\"-\", \"_\")\n return \"tf_cloud_train_{}\".format(unique_tag)", "def _get_job_id(self):\n return uuid.uuid4().hex", "def create_task_id():\n return str(int(round(time.time() * 10**9)))", "def create_new_job(self, search_id: Hashable) -> Hashable:\n partial_id = (\n self._redis.incr(f\"search:{search_id}.job_id_counter\", amount=1) - 1\n )\n partial_id = f\"{partial_id}\" # converting to str\n job_id = f\"{search_id}.{partial_id}\"\n self._redis.rpush(f\"search:{search_id}.job_id_list\", job_id)\n self._redis.json().set(\n f\"job:{job_id}\", \".\", {\"in\": None, \"metadata\": {}, \"out\": None}\n )\n return job_id", "def create_id():\n unique_id = UniqueId()\n unique_id.put()\n return unique_id.key().id()", "def create_id():\n unique_id = UniqueId()\n unique_id.put()\n return unique_id.key().id()", "def get_id(self):\n\n self.redis.setnx('job_id', '-1')\n return self.redis.incr('job_id')", "def job_create(self, sender, name=None):\n self._require_running()\n name = name or self.DEFAULT_JOB_NAME\n job_id = uuid.uuid4().hex\n assert job_id not in self._jobs\n assert sender is not None\n assert sender.connection\n job = Job(\n job_id,\n name,\n self._session_root.joinpath(job_id),\n sender,\n self._loop\n )\n self._jobs[job_id] = job\n self._jobs_by_connection[sender.connection][job_id] = job\n self._log.debug('Created job %s', job)\n return job_id", "def generate_job_id(*args):\n md5 = hashlib.md5()\n for arg in args:\n md5.update(arg.encode(\"utf-8\"))\n return md5.hexdigest()", "async def create_job(response: Response,\n request: Request,\n job: Job = Body(\n ...,\n example={\n \"id_video\": \"bbb_0.mp4\",\n \"bitrate\": 7000,\n \"speed\": \"ultrafast\",\n },\n )\n ): \n \n\n # get an ID and return to client\n id_job = mngr.getID()\n logger.debug(\"got id_job %s\" %id_job)\n resp = [\"http:/\"]\n resp.append(request.headers['host'])\n resp.append(id_job)\n response.headers[\"Location\"] = \"/\".join(resp)\n\n # create the task\n mngr.newJob(id_job, \n job.id_video, \n job.bitrate, \n job.speed)\n\n return id_job", "def _job_id(resource_uuid: str) -> str:\n return resource_uuid if \".\" in resource_uuid else f\"{resource_uuid}.0\"", "def id(self):\n return self.job_proto.id", "def _get_job_id(self) -> str:\n return self.split_name[2][3:]", "def create_job(project, description):\n randomnames = open(os.path.join(\"Anemone\", \"templates\", \"namegen.html\")).readlines()\n jobname = (\"Quick.\" +\n random.choice(randomnames)[:-1] + # for some reason choice gives extra space\n random.choice(randomnames)[:-1]) # for some reason choice gives extra space\n\n newjob = Job.create(project=project, name=jobname, description=description)\n newjob.name = newjob.name + \".{0:0=3d}\".format(newjob.id)\n newjob.save()\n return newjob", "def makeid(cls):\n return str(uuid.uuid4().hex)", "def created_job(new_job, bulk_request):\n bulk_request.return_value = '''<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <jobInfo xmlns=\"http://www.force.com/2009/06/asyncapi/dataload\">\n <id>THEJOBID</id>\n <operation>update</operation>\n <object>Lead</object>\n </jobInfo>\n '''\n new_job.create()\n return new_job", "def create_tag_id():\n return uuid.uuid1().int", "def make_id(self, name: str) -> str:\n # id_cache is intentionally mutable\n id = self.id_cache.get(name)\n if not id:\n id = 'epub-%d' % self.env.new_serialno('epub')\n self.id_cache[name] = id\n return id", "def job_id(self) -> JobId:\r\n return self._job_id", "def _make_task_id(self, task):\n index = self._tasks.add(task)\n task_id = '{name}-{idx}'.format(name=task.name, idx=index)\n\n return task_id", "def _job_id(files: list, extra: str):\n files_str = \"\"\n for file in files:\n files_str += file\n job_id = hashlib.sha1(files_str.encode() + extra.encode()).hexdigest()\n return job_id", "def _CreateRecordId(self):\n self._record_count += 1\n return '%s_%s' % (self._unique_id, self._record_count)", "def job_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"job_id\")", "def _get_unique_id(self):\n now = datetime.now()\n\n u_id = now.second + 60*(now.minute + 60*(now.hour + 24*(now.day + 31*(now.month + 366*(now.year)))))\n return \"instance\" + str(u_id)", "def unique_id() -> str:", "def get_or_create_unique_id(self):\n if not self.unique_id:\n self.unique_id = uuid.uuid4().hex\n return self.unique_id", "def createRunId(self):\n # runid is in the form of <login>_YYYY_MMDD_HHMMSS\n now = datetime.now()\n username = pwd.getpwuid(os.geteuid()).pw_name\n runid = \"%s_%02d_%02d%02d_%02d%02d%02d\" % (username, now.year, now.month,\n now.day, now.hour, now.minute, now.second)\n self.runid = runid\n return runid", "def get_job_id(self):\n return {'job_id': self._job_id}", "def get_job_id():\n # Get yarn application or K8s experiment ID when running distributed training\n if env.get_env(_JOB_ID_ENV_VAR) is not None:\n return env.get_env(_JOB_ID_ENV_VAR)\n else: # set Random ID when running local training\n job_id = uuid.uuid4().hex\n os.environ[_JOB_ID_ENV_VAR] = job_id\n return job_id", "def rule_01_set_job_id(session):\n\n my_id = \"\".join(\"%02x\" % random.randint(0,255) for _ in xrange(4))\n\n session[\"config\"][\"tags\"][\"instavpn\"] = my_id\n show.output(\"Instavpn Task ID\", \"is %s\" % my_id)\n\n return True" ]
[ "0.7751475", "0.7743574", "0.74370795", "0.73037905", "0.7232655", "0.7232655", "0.6982425", "0.6982323", "0.6908271", "0.6889731", "0.68492854", "0.67642814", "0.6763294", "0.6702419", "0.6602108", "0.65147835", "0.65133655", "0.65115803", "0.6507985", "0.64827", "0.6455972", "0.64288855", "0.6410233", "0.64082557", "0.6406856", "0.6403862", "0.638045", "0.6375566", "0.63718086", "0.6368601" ]
0.88138694
0
Applies post processing to all the outputs in the provided run results. This is a convenience function to avoid the need for manual iteration over the run_results dictionary.
def postprocess(run_results, postprocess_func): G_LOGGER.start(f"Applying post-processing to outputs: {postprocess_func.__name__}") for _, iteration_results in run_results: for index, iter_res in enumerate(iteration_results): iteration_results[index] = postprocess_func(iter_res) G_LOGGER.finish("Finished applying post-processing") return run_results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self, results):\n raise NotImplementedError", "def process_results(self, results=None, **value): # pragma: no cover\n return default_result_processor(results=results, **value)", "def _post_process_result(result: Any) -> Any:\n return result", "def process_results(self, response, results):\n return results", "def process_results(self, response, results):\n return results", "def decode_results(self, outputs):\n ...", "def _postprocess(self, responses):\n for idx, response in enumerate(responses):\n responses[idx] = {'id': response[0],\n 'text': self.target_test[response[0]]}\n\n for jdx, score in enumerate(response[1:]):\n responses[idx]['score_' + str(jdx)] = response[1:][jdx]\n\n return responses", "def postprocess_result(self):\n output_file = self.analyzer_result_file\n LOG.debug_analyzer(self.analyzer_stdout)\n tidy_stdout = self.analyzer_stdout.splitlines()\n generate_plist_from_tidy_result(output_file, tidy_stdout)\n\n if self.report_hash_type == 'context-free':\n report.use_context_free_hashes(output_file)", "def postprocess(\n self,\n preds: Any,\n visualization: List[np.ndarray],\n return_datasample=False,\n **kwargs,\n ) -> dict:", "def __call__(self, results):\n\n results = self._mixup_transform(results)\n return results", "def post_postprocessor(result=None, **kw):\n logger.info(\"start post_postprocessor\")\n logger.info(result)\n logger.info(\"end post_postprocessor\")\n pass", "def __call__(self, results):\n for key in results.get('img_fields', ['img']):\n results[key] = general_ocr.imnormalize(results[key], self.mean, self.std,\n self.to_rgb)\n results['img_norm_cfg'] = dict(\n mean=self.mean, std=self.std, to_rgb=self.to_rgb)\n return results", "def postprocess(self, predicted_output, original_input=None, stats=None,\n **kwargs):\n pass", "def post_process(self, res):\n # some lists are better converted to numpy arrays\n convert_to_arr = (\n 'prediction_rank',\n 'cumulative_area',\n 'prediction_values',\n 'cumulative_crime',\n 'cumulative_crime_count',\n 'cumulative_crime_max',\n 'pai'\n )\n for k in convert_to_arr:\n if k in res:\n # this allows for optional components such as prediction values\n res[k] = np.array(res[k])", "def process_pr_results(self, results_files, custom_report):\n \n\n \n output_file = open(os.path.join(self.path, 'raw_results.txt'), 'w')\n \n #Keep track of the last read line before a newline; this will be the best value from an optimization run\n last_line = ''\n #Match a string of the format ( 0.0995749 0.101685 0.108192 0.091224 ) 0.091224 0 100\n #Contains parameter values, the best optimization value, the cpu time, and some other values, e.g. particle numbers that Copasi likes to add. These could be removed, but they seem useful.\n output_string = r'.*\\(\\s(?P<params>.+)\\s\\)\\s+(?P<best_value>\\S+)\\s+(?P<cpu_time>\\S+)\\s+(?P<function_evals>\\S+)\\.*'\n output_re = re.compile(output_string)\n \n best_value = None\n best_line = None\n \n #Copy the contents of the first file to results.txt\n for line in open(os.path.join(self.path, results_files[0]), 'r'):\n output_file.write(line)\n try:\n if line != '\\n':\n if output_re.match(line):\n current_value = float(output_re.match(line).groupdict()['best_value'])\n if best_value != None:\n if current_value < best_value:\n best_value = current_value\n best_line = line\n elif best_value == None:\n best_value = current_value\n best_line = line\n else:\n pass\n except Exception as e:\n if custom_report:\n pass\n else:\n raise e\n \n #And for all other files, copy everything but the last line\n for filename in results_files[1:]:\n firstLine = True\n for line in open(os.path.join(self.path, filename), 'r'):\n if not firstLine:\n output_file.write(line)\n try:\n if line != '\\n':\n if output_re.match(line):\n current_value = float(output_re.match(line).groupdict()['best_value'])\n if current_value < best_value:\n best_value = current_value\n best_line = line\n else:\n pass\n except Exception as e:\n if custom_report:\n pass\n else:\n raise e\n firstLine = False\n \n \n output_file.close()\n \n #Write the best value to results.txt\n output_file = open(os.path.join(self.path, 'results.txt'), 'w')\n \n output_file.write('Best value\\tCPU time\\tFunction evals\\t')\n \n for parameter in self.get_parameter_estimation_parameters():\n\n output_file.write(parameter[0].encode('utf8'))\n output_file.write('\\t')\n output_file.write('\\n')\n\n best_line_dict = output_re.match(best_line).groupdict()\n\n output_file.write(best_line_dict['best_value'])\n output_file.write('\\t')\n output_file.write(best_line_dict['cpu_time'])\n output_file.write('\\t')\n output_file.write(best_line_dict['function_evals'])\n output_file.write('\\t')\n \n for parameter in best_line_dict['params'].split('\\t'):\n output_file.write(parameter)\n output_file.write('\\t')\n output_file.close()\n \n if best_value != None:\n return True\n else:\n return False", "def update_results(self, results):\n pass", "def transform(self, results: Dict) -> Dict:\n\n # Apply mapping\n inputs = self._map_input(results, self.mapping)\n # Apply wrapped transforms\n outputs = self._apply_transforms(inputs)\n # Apply remapping\n outputs = self._map_output(outputs, self.remapping)\n\n results.update(outputs) # type: ignore\n return results", "def _process_results(self, timestamp, results):\n\n topic_value = self.create_topic_values(results)\n\n _log.debug('Processing Results!')\n if mode:\n _log.debug(\"ACTUATE ON DEVICE.\")\n actuator_error = False\n if make_reservations and results.devices:\n results, actuator_error = self.actuator_request(results)\n if not actuator_error:\n self.actuator_set(topic_value)\n if make_reservations and results.devices and not actuator_error:\n self.actuator_cancel()\n\n for value in results.log_messages:\n _log.debug(\"LOG: {}\".format(value))\n for key, value in results.table_output.items():\n _log.debug(\"TABLE: {}->{}\".format(key, value))\n if output_file_prefix is not None:\n results = self.create_file_output(results)\n if command_output_file is not None:\n self.create_command_file_output(timestamp, topic_value)\n # if len(results.table_output.keys()):\n # results = self.publish_analysis_results(results)\n return results", "def post_task_run(self, results, extra_events: Optional[dict] = None):\n\n if extra_events is None:\n extra_events = {}\n\n # No need to expose the RETURN_KEYS_KEY\n try:\n del results[RETURN_KEYS_KEY]\n except (TypeError, KeyError):\n pass\n\n # Print the post-call header\n self.print_postcall_header(results)\n\n # Send a custom task-succeeded event with the results\n if not self.request.called_directly:\n self.send_event('task-results', firex_result=convert_to_serializable(results), **extra_events)\n self.send_firex_data(self.abog)", "def _map_output_parameters(self, results, algorithm):\n if results is not None:\n\n # update python data objects\n for result_name in results:\n result_type = algorithm.get_type_from_output_name(result_name)\n if result_type is None:\n raise exceptions.PacmanTypeError(\n \"Unrecognised result name {} for algorithm {} with \"\n \"outputs {}\".format(\n result_name, algorithm.algorithm_id,\n algorithm.outputs))\n self._internal_type_mapping[result_type] = results[result_name]\n elif len(algorithm.outputs) != 0:\n raise exceptions.PacmanAlgorithmFailedToGenerateOutputsException(\n \"Algorithm {} did not generate any outputs\".format(\n algorithm.algorithm_id))", "def process_results(self):\n return self._do_action_under_lock(self._process_results)", "def transform(self, results: Dict) -> Optional[Dict]:\n if self.random_apply():\n return self.transforms(results) # type: ignore\n else:\n return results", "def postProcessOutput(self):\n\n logging.info(\" ========> Analysis %20s called postProcessOutput:\"%(self.name))\n\n if self.checkExpectedOutputFiles() == False:\n raise Exception(\"Missing expected output files. Number missing are [%d]\"%(len(self.missing_output_files)))\n\n FileUtils.checkDirExists(self.output_dir)\n\n tmpfiles = []\n\n logging.info(\" ========> Analysis %20s called postProcessOutput: Moving files from %s to %s \"%(self.name,self.working_dir,self.output_dir))\n try:\n for srcfile in self.expected_output_files:\n\n fullsrcfile = os.path.join(self.working_dir,srcfile)\n destfile = os.path.join(self.output_dir,srcfile)\n\n FileUtils.checkDirExistsForFile(destfile)\n\n res = shutil.move(fullsrcfile,destfile)\n\n if res == None:\n res = \"OK\"\n else:\n res = \"FAILED\"\n\n print \"Checking %s\"%destfile\n tmpfiles.append(destfile)\n \n logging.info(\" ========> Analysis %20s called postProcessOutput: Result of file move for %s = %s\" % (self.name,srcfile,res))\n\n except Exception as e:\n logging.info(\" ========> Analysis %20s file move failed %s\"%(self.name,e))\n raise\n\n self.output_files = tmpfiles\n\n for f in self.temp_output_files:\n logging.info(\" ========> Analysis %20s removing temp file %s \"%(self.name,f))\n\t res = os.remove(f)", "def __call__(self, results):\n\n for key in results.get('seg_fields', []):\n if self.scale_factor != 1:\n results[key] = general_ocr.imrescale(\n results[key],\n self.scale_factor,\n interpolation='nearest',\n backend=self.backend)\n return results", "def postprocess_model_outputs(self, predictions, expected):\n\n predictions = {k: t.numpy() for k, t in predictions.items()}\n\n return predictions, expected", "def postprocess(self, inference_output):\n ret = []\n quantiles = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n # for each request\n for inference_output_request in inference_output:\n ret_request = []\n # for each time series\n for i in inference_output_request:\n l = {}\n l[\"item_id\"] = i.item_id\n l[\"quantiles\"] = {}\n for q in quantiles:\n l[\"quantiles\"][str(q)] = i.quantile(q).tolist()\n l[\"mean\"] = i.mean.tolist()\n ret_request.append(json.dumps(l))\n ret.append('\\n'.join(ret_request) + '\\n')\n return ret", "def transform(self, results: Dict) -> Optional[Dict]:\n for t in self.transforms:\n results = t(results) # type: ignore\n if results is None:\n return None\n return results", "def compute_metrics(self, results: list) -> dict:\n dump(results, self.out_file_path)\n print_log(\n f'Results has been saved to {self.out_file_path}.',\n logger='current')\n return {}", "def postprocess_model_outputs(self, predictions, expected):\n\n for key, val in predictions.items():\n predictions[key] = val.numpy()\n\n for key, val in expected.items():\n expected[key] = val.numpy()\n\n return predictions, expected", "def update_running_totals_from_load_step_results(self, results: dict) -> None:\n for result in results[\"step_results\"].values():\n sobject_name = result[\"sobject\"]\n totals = self.sobject_counts[sobject_name]\n totals.errors += result[\"total_row_errors\"]\n totals.successes += result[\"records_processed\"] - result[\"total_row_errors\"]" ]
[ "0.65581095", "0.64528775", "0.62099826", "0.61681396", "0.61681396", "0.6150747", "0.61191654", "0.6098182", "0.6078971", "0.60327035", "0.6017863", "0.60096914", "0.59998536", "0.59641546", "0.5962654", "0.593701", "0.5910838", "0.5900604", "0.5772284", "0.5731572", "0.5715091", "0.5704457", "0.5674019", "0.56517893", "0.56443274", "0.5637599", "0.5632728", "0.5623844", "0.56205946", "0.56177664" ]
0.8450894
0
Turns Freshbooks tickets from the past x days into Toggl projects.
def sync(self, no_of_days=1): zd = Zendesk() tg = Toggl() try: self.print("Syncing...") self.print_divider(30) tickets = zd.get_tickets(no_of_days) for ticket in tickets: project_title = self.format_title(ticket.id, ticket.subject) if ticket.organization: client_id = tg.get_client_id(name=ticket.organization.name) if not client_id: new_client = tg.create_client(ticket.organization.name) client_id = new_client['id'] else: client_id = False self.print("Ticket '%s' has no associated organization!" % (project_title)) all_projects = tg.get_projects() if not self.already_created(ticket.id, all_projects): self.print("Creating project '%s'..." % (project_title)) result = tg.create_project(project_title, client_id, is_private=False) self.print("Toggl response:") self.log(result, silent=False) else: self.print("There is already a Toggl project for Zendesk ticket #%s!" % ticket.id) pass # TODO: edit Toggl project # tg.edit_project(project_id, name=ticket.subject) self.print_divider(30) self.print("Done!") except: self.log(traceback.format_exc(), silent=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resume():\n # We now retrieve all entries in the previous month.\n # Getting the current date and the date from a month before.\n time_year = time.localtime()[0] \n time_month = time.localtime()[1]\n time_day = time.localtime()[2]\n if time_month == 1:\n prev_time_month = 12\n prev_time_year = time_year - 1\n else:\n prev_time_month = time_month - 1\n prev_time_year = time_year\n cur_date = str(time_year) + '-' + ('%02d' % time_month) + '-' + ('%02d' % time_day)\n prev_date = str(prev_time_year) + '-' + ('%02d' % prev_time_month) + '-' + ('%02d' % time_day)\n\n entries = toggl.entries_between(prev_date, cur_date)\n entry_list = []\n \n for entry in entries:\n if is_entry_in_list(entry, entry_list) == False:\n entry_list.append(entry)\n\n print(\">>> You can resume the following entries:\")\n n = 1\n for entry in entry_list:\n tags = []\n if 'tags' in entry:\n [tags.append(i) for i in entry['tags']]\n print('> {} - {} [{}]'.format(str(n),\n entry['description'],\n \",\".join(tags)))\n n += 1\n choice = int(input(\">>> Type an entry number: \"))\n\n if choice >= 1 and choice <= len(entry_list):\n res_entry = entry_list[choice-1]\n start_toggl(res_entry['description'], res_entry['tags'])\n else:\n print(\"You typed an unavailable number.\")\n\n \"\"\"\n >>> You can resume the following entries:\n > 1 - test [project]\n > 2 - another [other project]\n >>> Type an entry number: \n \"\"\"", "def push_historic_data(project):\n defects = []\n\n logger.info(\"Starting {}...\".format(project))\n jira_issues = get_jira_defects(project)\n last_upload = datetime.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0) + relativedelta(weekday=SA(-1))\n logger.debug(\"Fetched {} issues successfully for {}\".format(len(jira_issues), project))\n for issue in jira_issues:\n try:\n created = datetime.datetime.strptime(issue.fields.created, DATE_FORMAT)\n jira_dict = jira_obj_to_dict(issue, datetime.datetime.utcnow().strftime(DATE_FORMAT))\n\n historic_data = []\n # Last Friday of the report ran\n report_date = last_upload\n while(report_date > created):\n jira_dict = jira_for_date(jira_dict, issue.changelog, report_date)\n historic_data.insert(0, create_defect(jira_dict, issue))\n report_date -= datetime.timedelta(weeks=1)\n defects.append(historic_data)\n except Exception as e:\n logger.debug(\"Exception processing {} {}\".format(jira_dict[\"key\"], e))\n logger.exception(\"Exception\")\n logger.debug(\"Missing values {}\".format(str(jira_dict)))\n pass\n if len(defects) < len(jira_issues):\n logger.debug(\"{delta} defects not added in the {} report\".format(project, delta=len(jira_issues) - len(defects)))\n defects_as_list = []\n for defect in defects:\n defects_as_list.extend(defect)\n return post_defects(project, jira_issues, defects_as_list)", "def time_tracking(self):\n fb = FreshBooks()\n tg = Toggl()\n self.print_splash()\n self.print(\"Tip: You can always enter 'skip' when you want to skip a time entry.\", format='warn')\n days = self.get_interactive_days() # number of days to go back\n self.print(\"OK, I'll run you through the Toggl time entries of the past %i day(s).\" % (days))\n timestamp = self.get_timestamp(days) # unix timestamp including tz\n time_entries = tg.get_time_entries(timestamp)\n if len(time_entries) == 0:\n self.print(\"No Toggl entries in this time span!\", 'warn')\n return False\n time_entries = self.merge_toggl_time_entries(time_entries) # merge Toggl entries\n fb_projects = fb.get_projects()\n # Loop through merged Toggl time entries:\n for entry in time_entries:\n # Get and convert all necessary info:\n client_id = tg.get_client_id(project_id=entry.get('pid'))\n client_name = tg.get_client_name(client_id)\n project = tg.get_project(entry.get('pid'))\n duration = int(entry['duration']) / 60 / 60 # convert duration to hours\n duration = round(duration * 4 ) / 4 # round hours to nearest .25\n description = self.format_description(project['name'], entry['description'])\n date = str(parser.parse(entry['start']).date())\n # Print info in a nice way:\n self.print_divider(30)\n self.print(\"Description: \" + description)\n self.print(\"Date: \" + date)\n self.print(\"Hours spent: \" + str(duration))\n # Skip if Toggl entry is already booked:\n if entry.get('tags') and tg.BOOKED_TAG in entry['tags']:\n self.print(\"Skipping this entry because it is already in Freshbooks.\", 'cross')\n # Skip if duration is below 0.25:\n elif duration < 0.25:\n self.print(\"Skipping this entry because there are less than 0.25 hours spent.\", 'cross')\n # If billable, add to Freshbooks:\n elif entry['billable']:\n # Get FreshBooks project name through interactive search:\n try:\n self.print(\"Project: \\U0001F50D \")\n fb_project_name = self.interactive_search(fb_projects.keys(), client_name)\n # Handle KeyboardInterrupt\n except KeyboardInterrupt:\n answer = input(\"\\nKeyboardInterrupt! Skip current entry or quit time tracking? (S/q) \")\n if answer.lower() == 's' or answer == '':\n self.clear_lines(1)\n self.print(\"Skipping this entry.\", 'cross')\n continue\n else:\n self.clear_lines(1)\n self.print(\"Ok, stopping time tracking.\", 'cross')\n sys.exit()\n # If user requests so, skip this entry:\n self.clear_lines(1)\n if not fb_project_name:\n self.print(\"Skipping this entry.\", 'cross')\n continue\n # Otherwise, add entry to FreshBooks and tag Toggl entry/entries:\n self.print(\"Project: \" + fb_project_name)\n project_id = fb.get_project_id(fb_project_name)\n fb.add_entry(project_id, duration, description, date)\n tg.tag_projects(entry['merged_ids'], tg.BOOKED_TAG)\n # If not billable, skip entry:\n else:\n self.print(\"Skipping this entry because it is not billable.\", 'cross')\n self.print_divider(30)\n answer = input(\"All done! Open FreshBooks in browser to verify? (Y/n) \")\n if answer.lower() == 'y' or answer == '':\n webbrowser.open('https://%s.freshbooks.com/timesheet' % fb.fb_creds['subdomain'])", "def getPastProjects(self)->list:\n returnList=[]\n for i in range(0,randint(1, 10)):\n randumProjectId = randint(0, 109)\n if randumProjectId not in returnList:\n returnList.append(randumProjectId)\n\n return returnList", "def scrum(project):\r\n\r\n stories = project.in_progress_stories()\r\n stories_by_owner = group_stories_by_owner(stories)\r\n\r\n print bold(\"{} SCRUM -- {}\".format(project.name, pretty_date()))\r\n print\r\n\r\n for owner in stories_by_owner:\r\n print bold(owner)\r\n for story in stories_by_owner[owner]:\r\n print \" #{:12s}{:9s} {:7s} {}\".format(story.story_id,\r\n estimate_visual(story.estimate),\r\n story.story_type,\r\n story.name)\r\n\r\n print\r\n\r\n print bold(\"Bugs\")\r\n bugs = project.open_bugs()\r\n if len(bugs) == 0:\r\n print 'Not sure that I believe it, but there are no bugs'\r\n for bug in bugs:\r\n print \" #{:12s} {:4s} {}\".format(bug.story_id,\r\n initials(bug.owned_by),\r\n bug.name)", "def main():\n parser = argparse.ArgumentParser(description='Creates tickets for release certification')\n parser.add_argument('-u', '--username', help='jira username', default='admin')\n parser.add_argument('-p', '--password', help='jira password', default='admin')\n parser.add_argument('-c', '--config', help='path to config file', default='./options.ini')\n parser.add_argument('-j', '--jira', help='url of jira server', default='http://localhost:8080')\n\n args = parser.parse_args()\n\n jira_user = args.username\n jira_pass = args.password\n jira_server = args.jira\n config_file_path = args.config\n CONFIG.read(config_file_path)\n\n parent_ticket = config_map('JiraOptions')['parent_ticket']\n apprenda_version = config_map('VersionInfo')['to_version']\n jira_project = config_map('JiraOptions')['project']\n jira_issue_type = config_map('JiraOptions')['issue_type']\n jira = JIRA(jira_server, basic_auth=(jira_user, jira_pass))\n\n parent_issue = jira.issue(parent_ticket)\n ticket_list = []\n\n # create clean install tickets\n clean_strings = config_map('CleanInstallSection')\n for cloud in ['single', 'hybrid']:\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(clean_strings['summary'], apprenda_version, cloud)\n ticket_to_add.format_description(clean_strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n # create upgrade tickets\n from_versions = json.loads(config_map('VersionInfo')['from_versions'])\n upgrade_strings = config_map('UpgradeSection')\n\n # single cloud\n for version in from_versions:\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(upgrade_strings['summary'], apprenda_version, version,\n \"single\")\n ticket_to_add.format_description(upgrade_strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n # hybrid cloud\n for version in from_versions:\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(upgrade_strings['summary'], apprenda_version, version,\n \"hybrid\")\n ticket_to_add.format_description(upgrade_strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n # create testing tickets for other tasks\n for section in CONFIG.sections():\n if 'Ticket' in section:\n strings = config_map(section)\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(strings['summary'], apprenda_version)\n ticket_to_add.format_description(strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n print 'Created {0} tickets, now sending them to Jira'.format(len(ticket_list))\n # send issues to jira and create tickets and links\n issues = jira.create_issues(field_list=ticket_list)\n\n for item in issues:\n jira.create_issue_link(\n type=\"Task of Story\",\n outwardIssue=item['issue'].key,\n inwardIssue=parent_issue.key,\n )\n\n print 'Finished linking issues, exiting.'", "def get_newhire_tickets(group_id):\n url = f\"{BASE_URL}/api/v2/tickets\"\n headers = {\"AUTHorization\": f\"Basic {AUTH}\"}\n r = requests.get(url, headers=headers)\n if r.ok:\n print(f\"Got list of all new hire tickets.\")\n else:\n logging.debug(f\"Error - {r.status_code} - {r.content}\")\n tickets = r.json()[\"tickets\"]\n ticket_ids = set()\n last_hour = datetime.now() - timedelta(hours=1)\n\n for ticket in tickets:\n update_time = datetime.strptime(ticket[\"updated_at\"], \"%Y-%m-%dT%H:%M:%SZ\")\n # Check for tickets modified in the last hour\n if update_time > last_hour:\n # Verify the subject and group are related to New Hire Onboarding\n if \"New Hire\" in ticket[\"subject\"] and ticket[\"group_id\"] == group_id:\n start_date = get_start_date(ticket[\"id\"])\n # Check to see if ticket due date was already updated\n if start_date == ticket[\"due_by\"][0:10]:\n print(f'Ticket {ticket[\"id\"]} already updated.')\n else:\n ticket_ids.add(ticket[\"id\"])\n add_ticket_note(ticket[\"id\"], ticket[\"due_by\"][0:10])\n\n return ticket_ids", "def get_jira_defects(project):\n return get_jira_issues('project = \"{}\" AND filter = 19589'.format(project))", "def push_current_data(project):\n defects = []\n\n logger.info(\"Starting {}...\".format(project))\n jira_issues = get_jira_defects(project)\n now = datetime.datetime.utcnow().strftime(DATE_FORMAT)\n logger.debug(\"Fetched {} issues successfully for {}\".format(len(jira_issues), project))\n\n # Each issue fetched is being generated with our schema.\n for issue in jira_issues:\n try:\n jira_dict = jira_obj_to_dict(issue, now)\n defect = create_defect(jira_dict, issue)\n defects.append(defect)\n except Exception as e:\n logger.debug(\"Exception processing {} {}\".format(issue.key, e))\n logger.debug(\"Missing values {}\".format(str(jira_dict)))\n pass\n if len(defects) < len(jira_issues):\n logger.debug(\"{delta} defects not added in the {} report\".format(project, delta=len(jira_issues) - len(defects)))\n\n return post_defects(project, jira_issues, defects)", "def nfldraft(self, irc, msg, args, optyear, optround):\n \n if optyear: # if optyear is there, test for valid and if after 2003.\n testdate = self._validate(optyear, '%Y')\n if not testdate:\n irc.reply(\"Invalid year. Must be YYYY.\")\n return\n if optyear < 1996:\n irc.reply(\"Year must be after 1996.\")\n return\n \n if optround:\n if 1 <= optround <= 7:\n irc.reply(\"Draft round must be 1 or 7.\")\n return\n \n url = self._b64decode('aHR0cDovL2luc2lkZXIuZXNwbi5nby5jb20vbmZsL2RyYWZ0L3JvdW5kcw==')\n\n if optyear: # add year if we have it.\n url += '?year=%s' % (optyear)\n\n if optround: # optional round.\n url += '&round=%s' % (optround)\n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Failed to fetch: %s\" % url)\n return\n\n soup = BeautifulSoup(html)\n\n # check and make sure we have a table, otherwise error.\n if not soup.find('table', attrs={'class':'tablehead draft-tracker'}): \n irc.reply(\"error: could not find any draft information. Bad year or round?\")\n return\n else:\n table = soup.find('table', attrs={'class':'tablehead draft-tracker'})\n \n h2 = soup.find('h2')\n rows = table.findAll('tr', attrs={'class': re.compile('^oddrow.*?|^evenrow.*?')})\n\n object_list = []\n \n for row in rows:\n pickNumber = row.find('p', attrs={'class':'round-number'})\n pickName = row.find('p', attrs={'class':'player-name'})\n pickPos = row.find('li', attrs={'class':'li-position'})\n pickTeam = row.find('p', attrs={'class':'team-name'})\n \n appendString = ircutils.bold(pickNumber.getText()) + \". \" + pickName.getText() + \" - \" + pickTeam.getText()\n \n if row.find('p', attrs={'class':'notes'}):\n appendString += \" (\" + row.find('p', attrs={'class':'notes'}).getText() + \")\"\n \n object_list.append(appendString) \n \n irc.reply(ircutils.mircColor(h2.getText().strip(), 'red') + \": \") # print header.\n \n for N in self._batch(object_list, 6):\n irc.reply(' | '.join(str(n) for n in N))", "def tickets(self):\n if self._tickets:\n return self._tickets\n else:\n # Parse out tickets by splitting on the fixed format -- will break if format changes\n tickets = self.text.split(FIXED_FORMAT + '\\n')\n tickets = tickets[1:-2] # Exclude extra line that are not tickets\n tickets = [Ticket(text) for text in tickets]\n\n for ticket in tickets:\n for line in ticket.text.splitlines():\n line = line.strip('\\n')\n\n # Use the Easier to Ask for Forgiveness idiom\n # If we recognize an entity, we parse it, if not, we do nothing\n try:\n ticket.outages.append(Outage(line))\n except ParsingException:\n pass\n\n try:\n ticket.causes.append(Cause(line))\n except ParsingException:\n pass\n\n try:\n ticket.date_log.append(DateEntry(line))\n except ParsingException:\n pass\n\n try:\n ticket.history_log.append(HistoryEntry(line))\n except ParsingException:\n pass\n\n return tickets", "def do_projects(self, arg):\n args = shlex.split(arg)\n limit = 10\n from_date = to_date = ''\n if args:\n limit = 0\n try:\n from_date, to_date = helpers.parse_date_parameters(args)\n except ValueError, msg:\n print(msg)\n return\n projects = self.db.get_projects_with_activity_field(\n from_date, to_date, limit=limit)\n refined = map(lambda x: [\n x['pid'], x['name'],\n '[Active]' if x['active'] else '[closed]',\n datetime.datetime.strftime(x['created'], '%c').decode('utf8'),\n x['description']], projects)\n print(tabulate(refined, ['ID', 'Project', 'Activity', 'Created',\n 'Description']))", "def ConvertProjectToCrpx(basket):\n\n sCrpxName = \"\"\n sCrpxContent = \"\"\n template_crp = \"seeker/crp.xml\"\n oErr = utils.ErrHandle()\n standard_features = ['searchWord', 'searchPOS']\n iQCid = 1\n\n try:\n # Access the research project and the gateway\n research = basket.research\n gateway = research.gateway\n\n # Get the name of the project\n sCrpxName = research.name\n\n # The format of what we process\n # Options: Xquery-Psdx, Folia-Xml, Negra-Tig, Alpino-Xml, Dbase\n format = basket.format\n if format == \"psdx\":\n extension = \".psdx\"\n project_type = \"Xquery-psdx\"\n elif format == \"folia\":\n extension = \".folia.xml\"\n project_type = \"Folia-Xml\"\n elif format == \"negra\":\n extension = \".xml\"\n project_type = \"Negra-Tig\"\n elif format == \"alpino\":\n extension = \".xml\"\n project_type = \"Alpino-Xml\"\n else:\n extension = \"\"\n project_type = \"\"\n\n # The language and location of what we process\n lng = basket.part.corpus.get_lng_display()\n dir = basket.part.dir\n\n outfeat = \"\" # List of features separated by semicolon\n queryname = \"Cesar_query-main\"\n defname = \"Cesar_standard-def\"\n currentdate = timezone.now().strftime(\"%c\")\n outputname = \"standard\"\n # Make sure that the dbfeatlist contains all features in exactly the right ORDER!!!\n dbfeatlist = []\n # Add the standard features\n for idx in range(0, len(standard_features)):\n dbfeat = standard_features[idx]\n iNum =idx+1\n oDbFeat = {\"name\": dbfeat, \"QCid\": iQCid, \"FtNum\": iNum}\n dbfeatlist.append(oDbFeat)\n # Add the user-defined features\n iLastNum = len(standard_features)+1\n feature_list = gateway.get_feature_list()\n forbidden_names = [x.lower() for x in standard_features]\n for idx in range(0, len(feature_list)):\n iNum = iLastNum + idx\n ft = feature_list[idx]\n # Name check\n if ft.name.lower() in forbidden_names:\n sCrpxName = \"\"\n sCrpxContent = [\"Sorry, please don't use the feature name [{}]\".format(ft.name)]\n return sCrpxName, sCrpxContent\n # We are okay...\n oDbFeat = {\"name\": ft.name, \"QCid\": iQCid, \"FtNum\": iNum}\n dbfeatlist.append(oDbFeat)\n\n # Create a context for the template\n context = dict(gateway=gateway, \n research=research,\n extension=extension,\n lng=lng,\n dir=dir,\n projectdir=PROJECT_DIR,\n outfeat=outfeat,\n queryname=queryname,\n defname=defname,\n outputname=outputname,\n dbfeatlist=dbfeatlist,\n project_type=project_type,\n currentdate=currentdate,\n changed=get_crpp_date(timezone.now()),\n created=get_crpp_date(basket.created),\n codedef=basket.codedef,\n codeqry=basket.codeqry)\n # Convert template\n sCrpxContent = loader.get_template(template_crp).render(context)\n sCrpxContent = re.sub(r'\\n\\s*\\n', '\\n', sCrpxContent).strip()\n\n except:\n # Show error message\n oErr.DoError(\"ConvertProjectToCrpx error: \")\n sCrpxName = \"\"\n sCrpxContent = oErr.loc_errStack\n\n return sCrpxName, sCrpxContent", "def getFeaturedProject(current_timeline, program):\n # expiry time to fetch the new featured project entity\n # the current expiry time is 2 hours.\n expiry_time = datetime.timedelta(seconds=7200)\n\n def queryForProject():\n query = project_model.GSoCProject.all()\n query.filter('is_featured', True)\n query.filter('program', program)\n if current_timeline == 'coding_period':\n project_status = project_model.STATUS_ACCEPTED\n else:\n project_status = 'completed'\n query.filter('status', project_status)\n return query\n\n q = queryForProject()\n\n # the cache stores a 3-tuple in the order student_project entity,\n # cursor and the last time the cache was updated\n fsp_cache = memcache.get('featured_gsoc_project' + program.key().name())\n\n if fsp_cache:\n cached_project, cached_cursor, cache_expiry_time = fsp_cache\n if not datetime.datetime.now() > cache_expiry_time + expiry_time:\n return cached_project\n else:\n q.with_cursor(cached_cursor)\n if q.count() == 0:\n q = queryForProject()\n\n new_project = q.get()\n new_cursor = q.cursor()\n memcache.set(\n key='featured_gsoc_project',\n value=(new_project, new_cursor, datetime.datetime.now()))\n\n return new_project", "def get_project_issues(repo_slug, max_issues_per_project=None, max_date=None):\n # type: (str, int, str) -> pd.DataFrame\n logging.info(\"Processing %s\", repo_slug)\n all_issues = pd.DataFrame(\n json_imap({\n 'reporter': 'user__login',\n 'role': 'author_association',\n 'number': 'number',\n 'title': 'title',\n 'created_at': 'created_at',\n 'body': 'body',\n 'state': 'state',\n },\n api.repo_issues(repo_slug)),\n ).sort_values('created_at')\n if max_date:\n all_issues = all_issues[all_issues['created_at'] < max_date]\n last_reported = all_issues.groupby(\n 'reporter').last().iloc[:max_issues_per_project]\n first_reported = all_issues.groupby('reporter').first()['created_at']\n # int(timedelta) is ns, times 86400 seconds in a day\n last_reported['tenure'] = (\n pd.to_datetime(last_reported['created_at'])\n - pd.to_datetime(last_reported.index.map(first_reported))\n ).astype(int) // 86400000000000\n last_reported['project'] = repo_slug\n return last_reported.reset_index().sort_values('number')", "def get_upcoming_games(n=10):\n conn, cursor = connect_to_db()\n query = \"\"\"select kickoff_time, t2.team_id home_id, t2.team_name home_name, \n t3.team_id away_id, t3.team_name away_name\n from fpl_fixtures t1 left join fpl_teams t2 on t1.team_h = t2.id left \n join fpl_teams t3 on t1.team_a = t3.id where started = 0 order by \n kickoff_time limit {}\"\"\".format(n)\n df = run_query(cursor, query)\n return df", "def createTasks():\n tickets = jutdaapi.get_tickets(queues=[3]) # this works better (still not\n # perfect) if list results is set to 1000 in jutda user settings\n tasks = []\n for ticket in tickets:\n tasks.append(ticketToTask(ticket))\n return tasks", "def get_jira_issues(query):\n jira_issues = []\n defects = []\n count, maxlen = 0, 1\n while count < maxlen:\n issues = jira_client.search_issues(query, startAt=count, maxResults=50, expand='changelog')\n jira_issues.extend(issues)\n count = len(jira_issues)\n maxlen = issues.total\n\n return jira_issues", "def get_jira_tasks(start_date, end_date, pj_name=project_name):\n\n start_date=start_date.replace(\"-\",'/')\n end_date=end_date.replace(\"-\",'/')\n try:\n jira = JIRA(options=options, basic_auth=(usr, pas))\n except JIRAError as e:\n if e.status_code == 401:\n print (\"Login to JIRA failed.\")\n jq = \"\"\"project = {} \n and duedate >= \"{}\" \n and duedate <= \"{}\" \n order by created DESC\"\"\".format(pj_name, start_date,end_date )\n issues = jira.search_issues(jq)\n columns = ['year','month','day', 'name','timeoriginalestimate','timespent']\n data = pd.DataFrame([], columns=columns)\n for issue in issues:\n name = \"NoAssign\"\n if issue.fields.assignee:\n name = issue.fields.assignee.displayName\n (year, month, day) = issue.fields.duedate.split(\"-\")\n timeoriginalestimate = issue.fields.timeoriginalestimate if issue.fields.timeoriginalestimate is not None else 0\n timespent = issue.fields.timespent if issue.fields.timespent is not None else 0\n tmp_df = pd.DataFrame([[year, month, day, name, timeoriginalestimate/3600, timespent/3600]], columns=columns)\n data = data.append(tmp_df)\n\n data.reset_index(drop=True, inplace=True)\n return data", "def tickets(people: list) -> str:\n\tprint(people)\n\n\tvasya = list()\n\n\tfor p in people:\n\n\t\tif p == 25:\n\t\t\tvasya.append(p)\n\t\t\tcontinue\n\n\t\tif p == 50 and 25 in vasya:\n\t\t\tdel vasya[vasya.index(25)]\n\t\t\tvasya.append(p)\n\t\t\tcontinue\n\n\t\tif p == 100:\n\t\t\tif 25 in vasya and 50 in vasya:\n\t\t\t\tdel vasya[vasya.index(25)]\n\t\t\t\tdel vasya[vasya.index(50)]\n\t\t\t\tvasya.append(p)\n\t\t\t\tcontinue\n\n\t\t\tif vasya.count(25) >= 3:\n\t\t\t\ti = 3\n\t\t\t\twhile i > 0:\n\t\t\t\t\tdel vasya[vasya.index(25)]\n\t\t\t\t\ti -= 1\n\t\t\t\tvasya.append(p)\n\t\t\t\tcontinue\n\n\t\treturn 'NO'\n\n\treturn \"YES\"", "def main():\n # group_id = get_group_id() This would be used if I had\n # the appropriate privileges\n group_id = 15000022833\n setup_logger()\n ticket_ids = get_newhire_tickets(group_id)\n for ticket_id in ticket_ids:\n update_ticket_info(ticket_id)", "def get_november_historical_comments(subreddit, limit):\n all_submissions = []\n\n days = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]\n month = 11\n year = 2020\n\n for day in days:\n # generate random 4 hour time chunk\n start_hour = random.randint(0, 14)\n end_hour = start_hour + 4\n start_time = int(dt.datetime(year, month, day, start_hour, 0).timestamp())\n end_time = int(dt.datetime(year, month, day, end_hour, 0).timestamp())\n\n # gets submissions and adds submission dictionary to master list\n threads = list(get_submissions(subreddit, start_time, end_time, limit))\n\n for item in threads:\n all_submissions.append(item.d_)\n\n # gets submissions and adds submission dictionary to master list\n threads = list(get_submissions(subreddit, start_time + 5, end_time + 5, limit))\n\n for item in threads:\n all_submissions.append(item.d_)\n\n print('querying day:', day)\n print('total submissions:', len(all_submissions))\n\n return all_submissions", "def tickets(number, day, premium_seating):\n #fill in your code here. \n return 0.0", "def get_popular_tickets_solution(tickets):\n popular_tickets = []\n for ticket in tickets:\n num_watchers = len(ticket['people']['watchers'])\n if num_watchers >= 8:\n popular_tickets.append(ticket)\n return popular_tickets", "def run():\n\n full_ticket = Ticket()\n daily_ticket_a = Ticket()\n daily_ticket_b = Ticket()\n daily_ticket_c = Ticket()\n community_ticket = Ticket()\n\n full_ticket.ticket_type = 'full'\n daily_ticket_a.ticket_type = 'daily-13'\n daily_ticket_b.ticket_type = 'daily-14'\n daily_ticket_c.ticket_type = 'daily-15'\n community_ticket.ticket_type = 'community'\n\n full_ticket.price = 400000\n daily_ticket_a.price = 200000\n daily_ticket_b.price = 300000\n daily_ticket_c.price = 350000\n community_ticket.price = 0\n\n full_ticket.information = 'Ticket for full 3 days devsummit event.'\n daily_ticket_a.information = 'Ticket for 13th November at devsummit event.'\n daily_ticket_b.information = 'Ticket for 14th November at devsummit event.'\n daily_ticket_c.information = 'Ticket for 15th November at devsummit event.'\n community_ticket.information = 'Ticket for community, only given by admin.'\n db.session.add(full_ticket)\n db.session.add(daily_ticket_a)\n db.session.add(daily_ticket_b)\n db.session.add(daily_ticket_c)\n db.session.add(community_ticket)\n\n db.session.commit()", "def switch_project(project):\n # Get the data\n project = project.lower()\n lines, finished, last_project = parse_file(project=None)\n line1, i1, last1, _, times1 = parse_line(lines, last_project, finished)\n line2, i2, _, new2, times2 = parse_line(lines, project, True)\n now = datetime.now()\n\n # Format the data\n if not finished:\n punch1 = now - last1\n times1.append(punch1)\n punch1 = punch1.total_seconds()\n total1 = sum(t.total_seconds() for t in times1)\n total2 = sum(t.total_seconds() for t in times2)\n now = now.strftime(TIMEF)\n\n # Modifying the lines for the file\n lines[1] = HEADER1 + project\n if not finished:\n\n # Clock-Out\n line1[-1] += IN_OUT_SEP + now\n line1[1] = fnum(total1)\n line1 = PUNCH_SEP.join(line1)\n lines[i1] = line1\n\n # Clock-In\n line2.append(now)\n line2 = PUNCH_SEP.join(line2)\n if new2:\n lines.append(line2)\n else:\n lines[i2] = line2\n\n # Write to file\n with open(PUNCHES_PATH, 'w+') as f:\n f.write('\\n'.join(lines))\n\n # Report\n if new2:\n print(f\"Created Project: '{project}'\")\n if finished:\n print(f\"CURRENTLY CLOCKED OUT, Project Switched From: '{last_project}', To: '{project}'\")\n print(f\"NOW: {now}\")\n print(f\"'{last_project}' Total Hrs: {fnum(total1)}\")\n print(f\"'{project}' Total Hrs: {fnum(total2)}\")\n else:\n print(f\"CLOCK OUT, Project: '{last_project}'\")\n print(f\"CLOCK IN, Project: '{project}'\")\n print(f\"'{last_project}' IN: {last1.strftime(TIMEF)}, NOW: {now}\")\n print(f\"'{last_project}' Total Hrs: {fnum(total1)}, Current Punch: {fnum(punch1)}\")\n print(f\"'{project}' Total Hrs: {fnum(total2)}\")", "def twenty_seventeen():\n return 2017", "def tickets(people):\n people= [100, 50, 25]", "def get_created_projects(self):\n project_ouessant1 = Project.objects.get(name='Ouessant Tidal Power Phase I')\n project_ouessant2 = Project.objects.get(name='Ouessant Tidal Power Phase II')\n project_liaoning = Project.objects.get(\n name='Liaoning Linghai China Resource Power Wind Power Wind Farm'\n )\n return [project_ouessant1, project_ouessant2, project_liaoning]", "def _create_historic_forecasts(\n data, time_dt, frt_dt, standard_grid_metadata=\"uk_ens\", number_of_days=5, **kwargs\n):\n historic_forecasts = iris.cube.CubeList([])\n for day in range(number_of_days):\n new_frt_dt = frt_dt + datetime.timedelta(days=day)\n new_time_dt = time_dt + datetime.timedelta(days=day)\n historic_forecasts.append(\n set_up_variable_cube(\n data - 2 + 0.2 * day,\n time=new_time_dt,\n frt=new_frt_dt,\n standard_grid_metadata=standard_grid_metadata,\n **kwargs,\n )\n )\n return historic_forecasts" ]
[ "0.5335107", "0.52704185", "0.5256979", "0.5130906", "0.50964624", "0.5023923", "0.5006047", "0.49849492", "0.4897661", "0.4888886", "0.4882372", "0.486753", "0.4858375", "0.48496896", "0.4849528", "0.48321384", "0.48075008", "0.47820213", "0.47624293", "0.4758167", "0.47529867", "0.47324315", "0.47247747", "0.47106138", "0.46973166", "0.46779072", "0.46518856", "0.46518558", "0.46459478", "0.46433154" ]
0.58926123
0
Starts interactive time tracking session. Updates Freshbooks based on Toggl entries.
def time_tracking(self): fb = FreshBooks() tg = Toggl() self.print_splash() self.print("Tip: You can always enter 'skip' when you want to skip a time entry.", format='warn') days = self.get_interactive_days() # number of days to go back self.print("OK, I'll run you through the Toggl time entries of the past %i day(s)." % (days)) timestamp = self.get_timestamp(days) # unix timestamp including tz time_entries = tg.get_time_entries(timestamp) if len(time_entries) == 0: self.print("No Toggl entries in this time span!", 'warn') return False time_entries = self.merge_toggl_time_entries(time_entries) # merge Toggl entries fb_projects = fb.get_projects() # Loop through merged Toggl time entries: for entry in time_entries: # Get and convert all necessary info: client_id = tg.get_client_id(project_id=entry.get('pid')) client_name = tg.get_client_name(client_id) project = tg.get_project(entry.get('pid')) duration = int(entry['duration']) / 60 / 60 # convert duration to hours duration = round(duration * 4 ) / 4 # round hours to nearest .25 description = self.format_description(project['name'], entry['description']) date = str(parser.parse(entry['start']).date()) # Print info in a nice way: self.print_divider(30) self.print("Description: " + description) self.print("Date: " + date) self.print("Hours spent: " + str(duration)) # Skip if Toggl entry is already booked: if entry.get('tags') and tg.BOOKED_TAG in entry['tags']: self.print("Skipping this entry because it is already in Freshbooks.", 'cross') # Skip if duration is below 0.25: elif duration < 0.25: self.print("Skipping this entry because there are less than 0.25 hours spent.", 'cross') # If billable, add to Freshbooks: elif entry['billable']: # Get FreshBooks project name through interactive search: try: self.print("Project: \U0001F50D ") fb_project_name = self.interactive_search(fb_projects.keys(), client_name) # Handle KeyboardInterrupt except KeyboardInterrupt: answer = input("\nKeyboardInterrupt! Skip current entry or quit time tracking? (S/q) ") if answer.lower() == 's' or answer == '': self.clear_lines(1) self.print("Skipping this entry.", 'cross') continue else: self.clear_lines(1) self.print("Ok, stopping time tracking.", 'cross') sys.exit() # If user requests so, skip this entry: self.clear_lines(1) if not fb_project_name: self.print("Skipping this entry.", 'cross') continue # Otherwise, add entry to FreshBooks and tag Toggl entry/entries: self.print("Project: " + fb_project_name) project_id = fb.get_project_id(fb_project_name) fb.add_entry(project_id, duration, description, date) tg.tag_projects(entry['merged_ids'], tg.BOOKED_TAG) # If not billable, skip entry: else: self.print("Skipping this entry because it is not billable.", 'cross') self.print_divider(30) answer = input("All done! Open FreshBooks in browser to verify? (Y/n) ") if answer.lower() == 'y' or answer == '': webbrowser.open('https://%s.freshbooks.com/timesheet' % fb.fb_creds['subdomain'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n\n # check database for tracking options\n # if empty prompt to add subject\n\n # present tracking options\n\n # calculate timedelta\n\n # printing/updating the time", "def setTrackStartTime() :\n s.startTrack()", "def time_automation_listener(now):\n action()", "def main():\n \n ## Determine whether to query for the sunset or sunrise\n if datetime.now().hour >= 20:\n ## Run sunrise tweets after 8PM\n type = 'sunrise'\n else:\n ## Any earlier, run sunset tweets (by default run at 12PM)\n type = 'sunset'\n \n ## Iterate through the time series and states\n log_df = TWEET_HISTORY_DF.copy()\n for loc in c.LOCATIONS.keys():\n \n ## Instantiate a class to do the tweetin'\n MySunTweeter = SunTweeter(loc, type, log_df)\n MySunTweeter.send_tweet()\n \n ## Save the log to use in the next iteration of the loop\n log_df = MySunTweeter.log_df\n \n ## Overwrite the log with the updated records\n log_df.to_csv(\"log/SunsetWx_full_tweet_log.csv\",\n index = False)", "def start_live_observation(self):\n\n logging.debug(\"start live observation, self.liveObservationStarted: {}\".format(self.liveObservationStarted))\n\n if \"scan sampling\" in self.textButton.text():\n self.textButton.setText(\"Stop live observation\")\n self.liveTimer.start(100)\n return\n\n if not self.liveObservationStarted:\n\n if self.twEvents.rowCount():\n if dialog.MessageDialog(programName, \"Delete the current events?\", [YES, NO]) == YES:\n self.twEvents.setRowCount(0)\n self.pj[OBSERVATIONS][self.observationId][EVENTS] = []\n self.projectChanged = True\n\n self.textButton.setText(\"Stop live observation\")\n\n self.liveStartTime = QTime()\n # set to now\n self.liveStartTime.start()\n # start timer\n self.liveTimer.start(100)\n else:\n\n self.textButton.setText(\"Start live observation\")\n self.liveStartTime = None\n self.liveTimer.stop()\n\n if self.timeFormat == HHMMSS:\n self.lbTimeLive.setText(\"00:00:00.000\")\n if self.timeFormat == S:\n self.lbTimeLive.setText(\"0.000\")\n\n self.liveObservationStarted = not self.liveObservationStarted", "def start_station(self):\n if Config.LOG_TO_CONSOLE and Config.LOG_INTERVAL:\n self._log_results(first_time=True)\n\n if Config.WEATHER_UPLOAD and Config.UPLOAD_INTERVAL:\n self._upload_results(first_time=True)\n\n if Config.UPDATE_DISPLAY and Config.UPDATE_INTERVAL:\n self._update_display()", "def initialize_new_live_observation(self):\n\n self.playerType = LIVE\n self.playMode = LIVE\n\n self.create_live_tab()\n\n self.toolBox.setVisible(True)\n\n self.dwObservations.setVisible(True)\n self.dwObservationsGroup.setVisible(True)\n\n self.simultaneousMedia = False\n\n self.lbFocalSubject.setVisible(True)\n self.lbCurrentStates.setVisible(True)\n\n self.liveTab.setEnabled(True)\n self.toolBox.setItemEnabled(0, True) # enable tab\n self.toolBox.setCurrentIndex(0) # show tab\n\n self.menu_options()\n\n self.liveObservationStarted = False\n self.textButton.setText(\"Start live observation\")\n if self.timeFormat == HHMMSS:\n self.lbTimeLive.setText(\"00:00:00.000\")\n if self.timeFormat == S:\n self.lbTimeLive.setText(\"0.000\")\n\n self.liveStartTime = None\n self.liveTimer.stop()", "def open(self):\n\n self.st_time = time.strftime('%H:%M %A %d %B')\n self.is_active = True", "def main(stdscr, starting_portfolios):\n\n # Generally don't need a cursor.\n curses.curs_set(0)\n\n # Clear the screen\n stdscr.clear()\n\n # Fire up the Stock Tracker.\n st = ST(stdscr);\n st.run(starting_portfolios)", "def TeleopPeriodic(self):\n Scheduler.GetInstance().Run()\n LiveWindow.Run()", "def do_upt(self, arg):\n self.do_timesheet('update today')", "def start(self):\n self.login(not self.quiet)\n self.start_time = time.time()\n while True:\n self.print_time()\n try:\n self.tick()\n except Exception as e:\n print(e)", "def start(self):\n self.start_time = time.time()", "def start(self):\r\n self.start_time = time.time()", "def time_automation_listener(now):\n hass.async_add_job(action, {\n 'trigger': {\n 'platform': 'time',\n 'now': now,\n },\n })", "def start_game(self) -> None:\n self.check_edgework_view_attached()\n self.timer.start_timing()\n self._edgework_view.start_timing()", "def FreshStart(self):\n # Create a vector holding historical data for the purpose of plotting.\n # The length may vary because the sampling speed of different are\n # sensors may vary.\n\n self.history = {'time': collections.deque( [], self.history_length ),\\\n 'data': collections.deque( [], self.history_length )\n }", "def start():\n print('Running...')\n with Feed(Config.database) as feed:\n feed.refresh()", "def refresh():\r\n db.drop_all()\r\n db.create_all()\r\n for time_value in get_datetime_values('Los Angeles', 'pm25'):\r\n record = Record(datetime=str(time_value[0]), value=time_value[1])\r\n db.session.add(record)\r\n db.session.commit()\r\n return render_template('refresh.html')", "def main():\n # group_id = get_group_id() This would be used if I had\n # the appropriate privileges\n group_id = 15000022833\n setup_logger()\n ticket_ids = get_newhire_tickets(group_id)\n for ticket_id in ticket_ids:\n update_ticket_info(ticket_id)", "def start(cls):\n\n cls._set_mode_running()\n TimeDisplay.start_time()\n for callback in cls.start_callback:\n callback()", "def run(self):\n self.timestamp['start'] = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')\n\n for point in self._prepare_grid():\n graph = self._prepare_graph(**point)\n env = self._prepare_env(graph, **point)\n log = self._prepare_logger(graph, env, **point)\n\n try:\n env.run(until=self.runtime)\n except Exception as e:\n print(e)\n log.close()\n\n # self.timestamp[grid.hash_grid_point(point)].append(datetime.datetime.now().strftime('%Y%m%dT%H%M%S'))\n\n self.timestamp['end'] = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')", "def insert_time(self):\n if self.controller.shared_data.obj_track.size == 0:\n message = 'There is no loaded track to insert timestamp'\n messagebox.showwarning(title='Insert Time Assistant',\n message=message)\n return\n\n self.timestamp = dt.datetime(2000, 1, 1, 0, 0, 0)\n self.speed = 0\n\n spinbox_options = {'year': [1990, 2030, 2000],\n 'month': [1, 12, 1],\n 'day': [1, 31, 1],\n 'hour': [0, 23, 0],\n 'minute': [0, 59, 0],\n 'second': [0, 59, 0]}\n\n top = tk.Toplevel()\n top.title('Insert Time Assistant')\n\n # Insert data frame\n frm_form = tk.Frame(top, relief=tk.FLAT, borderwidth=3)\n frm_form.pack() # insert frame to use grid on it\n spn_time = collections.defaultdict()\n\n for i, entry in enumerate(spinbox_options):\n # This allow resize the window\n top.columnconfigure(i, weight=1, minsize=75)\n top.rowconfigure(i, weight=1, minsize=50)\n\n # Create widgets\n var = tk.StringVar(top)\n var.set(spinbox_options[entry][2])\n\n spn_time[entry] = tk.Spinbox(from_=spinbox_options[entry][0],\n to=spinbox_options[entry][1],\n master=frm_form,\n width=8,\n textvariable=var,\n justify=tk.RIGHT,\n relief=tk.FLAT)\n\n lbl_label = tk.Label(master=frm_form, text=f'{entry}', anchor='w')\n\n # Grid\n lbl_label.grid(row=i, column=0) # grid attached to frame\n spn_time[entry].grid(row=i, column=1)\n\n # Insert speed\n i = len(spn_time)\n top.columnconfigure(i, weight=1, minsize=75)\n top.rowconfigure(i, weight=1, minsize=50)\n spn_speed = tk.Spinbox(from_=0, to=2000,\n master=frm_form,\n width=8,\n justify=tk.RIGHT,\n relief=tk.FLAT)\n lbl_label = tk.Label(master=frm_form, text='speed (km/h)', anchor='w')\n lbl_label.grid(row=i, column=0, pady=10)\n spn_speed.grid(row=i, column=1)\n\n def _insert_timestamp():\n # Check input data and insert timestamp\n try:\n self.timestamp = dt.datetime(int(spn_time['year'].get()),\n int(spn_time['month'].get()),\n int(spn_time['day'].get()),\n int(spn_time['hour'].get()),\n int(spn_time['minute'].get()),\n int(spn_time['second'].get()))\n self.speed = float(spn_speed.get())\n if self.speed <= 0:\n raise ValueError('Speed must be a positive number.')\n\n # Insert timestamp\n self.controller.shared_data.obj_track.\\\n insert_timestamp(self.timestamp, self.speed)\n top.destroy()\n\n except (ValueError, OverflowError) as e:\n messagebox.showerror('Input Error', e)\n\n def _clear_box():\n for s in spn_time:\n spn_time[s].delete(0, 8)\n spn_time[s].insert(0, spinbox_options[s][2])\n spn_speed.delete(0, 8)\n spn_speed.insert(0, 0)\n\n # Button frame\n frm_button = tk.Frame(top)\n frm_button.pack(fill=tk.X, padx=5,\n pady=5) # fill in horizontal direction\n\n btn_clear = tk.Button(master=frm_button, text='Clear',\n command=_clear_box)\n btn_submit = tk.Button(master=frm_button, text='Submit',\n command=_insert_timestamp)\n btn_clear.pack(side=tk.RIGHT, padx=10)\n btn_submit.pack(side=tk.RIGHT, padx=10)", "def tic():\n import time\n global startTime_for_tictoc\n startTime_for_tictoc = time.time()", "def _open(self):\n \n # Set initial time\n t = datetime.datetime.today()\n timestamp = t.microsecond/1000 + t.second*1000 + \\\n t.minute*60*1000 + t.hour*60*60*1000 + \\\n t.day*24*60*60*1000\n while timestamp > 4294967295: timestamp -= 4294967295\n self._last = timestamp\n \n # Nothing else to do... already open", "def start(self):\n self._state = STATE_INACTIVE\n self._game = None\n self._last_key_press = False\n self._last_n_press = False\n self._last_lose_life = False\n self._mssg = (GLabel(text=START_MSSG, x=GAME_WIDTH/2, y=GAME_HEIGHT/2, font_size=24))\n self.time = None\n self._points_mssg = None\n self._falling_points = []\n self._FP_mssg = None", "def start_timer(self):\n self.start_time = datetime.now()", "def start(self):\r\n self.debug(\"### starting gox streaming API, trading %s%s\" %\r\n (self.curr_base, self.curr_quote))\r\n self.client.start()", "def onCreate(self):\n\t\tself.enableTick()\n\t\tself.lastFpsUpdate = 0.0\n\t\tself.fpsInfo = \"\"\n\t\tself.createTime = time.time()", "def time(lancet, issue):\n issue = get_issue(lancet, issue)\n\n with taskstatus(\"Starting harvest timer\") as ts:\n lancet.timer.start(issue)\n ts.ok(\"Started harvest timer\")" ]
[ "0.6347523", "0.576873", "0.5634519", "0.56242204", "0.56151354", "0.5609387", "0.56086725", "0.55940264", "0.5576316", "0.5486407", "0.54707676", "0.5469594", "0.5462939", "0.54449016", "0.5444178", "0.5436368", "0.54061335", "0.53546655", "0.53450114", "0.5342797", "0.53191763", "0.53158724", "0.5309369", "0.52914613", "0.52407014", "0.52161396", "0.5204799", "0.5183675", "0.51699257", "0.516171" ]
0.69116616
0
Starts interactive search, allows user to make a selection. Accepts array of strings and optional (user) query. Returns string chosen by user.
def interactive_search(self, choices, query=None): if query: match = self.get_interactive_match(choices, query) if match: self.print("Matched query to '%s'." % (match)) answer = input("Is that correct? (Y/n) ") self.clear_lines(1) if answer.lower() == 'y' or answer == '': self.clear_lines(1) return match else: self.clear_lines(1) return self.interactive_search(choices) else: return None else: query = input("Please type a query: ") self.clear_lines(1) return self.interactive_search(choices, query)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ask_query(self):\n self.__output = list()\n return input(form('What do you want to search?\\n> '))", "def search():\n query = input('Please enter your search query\\n')\n # For now, we will just print the whole database\n #db_actions.display()\n db_actions.search(query)", "def invoke(self):\n # set menu handlers\n menu_handlers = [\n SearchByAuthor(self.db, self),\n SearchByName(self.db, self),\n SearchByPublishedDate(self.db, self)\n ]\n\n # display menu, get selection, and run\n is_exit = False\n while not is_exit:\n menu = ConsoleMenu(\n menu_handlers,\n \"Search Book by text:\"\n )\n menu.display_menu()\n is_exit = menu.prompt_and_invoke_option()", "def search():\n try:\n query = request.args.get(\"q\").lower()\n except AttributeError:\n query = request.args.get(\"q\")\n\n # Adding browse functionality\n browse = request.args.get(\"browse\")\n\n if browse is None:\n # Select all rows with a column value that includes query\n results = db.execute(\"SELECT * FROM books \"\n \"WHERE LOWER(isbn) LIKE CONCAT('%', :q, '%')\"\n \"OR LOWER(title) LIKE CONCAT('%', :q, '%') \"\n \"OR LOWER(author) LIKE CONCAT('%', :q, '%') \"\n \"ORDER BY title LIMIT 100\", {'q': query}).fetchall()\n else:\n # Select titles starting with letter\n results = db.execute(\n \"SELECT * FROM books \"\n \"WHERE LOWER(title) LIKE CONCAT(:q, '%') \"\n \"ORDER BY title\", {'q': query}).fetchall()\n\n return render_template(\"search.html\", browse=browse, query=query, results=results)", "def search():\n import booksearch as bs\n\n opt = var.get()\n term = searchBox.get()\n term2 = dateBox.get()\n\n # Case statement (substitute) for different search areas\n # Each key is an option in the OptionMenu\n searchBy = {\n \"Title & Author\" : bs.search(term),\n \"ID\" : bs.bookID(term),\n \"Date\" : bs.dateRange(term, term2),\n }\n query = searchBy[opt] # Make & stores a query (2D list)\n\n # Repopulates table\n if term != \"\":\n populate(query)", "def search_menu():\n clear_screen()\n print(\"What would you like to search by?\")\n print(\" d: Date (Default)\")\n print(\" t: Time spent\")\n print(\" e: Exact\")\n print(\" p: Pattern (Regex)\")\n user_input = input(\"> \").lower()\n if user_input == 't':\n search_by_time_spent()\n elif user_input == 'e':\n search_by_string()\n elif user_input == 'p':\n search_by_pattern()\n else:\n search_by_date()", "def book_search(library: list) -> None:\n options = ['Author', 'Title', 'Publisher', 'Shelf', 'Category', 'Subject']\n prompt = '\\nWhat option would you like to search by?'\n choice = get_user_choice(options, prompt)\n if choice == '1':\n search_by_chosen_option(library, options[0])\n elif choice == '2':\n search_by_chosen_option(library, options[1])\n elif choice == '3':\n search_by_chosen_option(library, options[2])\n elif choice == '4':\n search_by_shelf(library)\n elif choice == '5':\n search_by_chosen_option(library, options[4])\n elif choice == '6':\n search_by_chosen_option(library, options[5])", "def search_term():\n search = input(\"Enter term or string: \")\n entries = select_entries()\n entries = entries.where(\n (Entry.task_name.contains(search)) |\n (Entry.notes.contains(search)))\n view_entries(entries)\n return entries", "def _search(client, search_string):\n if search_string is None:\n logger.info(uxstring.UxString.list_all, fg=\"green\")\n\n current_page = 0\n total_pages = get_search_results(client, search_string, current_page)\n if total_pages < 1:\n return\n\n while 0 <= current_page < total_pages:\n try:\n prompt_resp = click.prompt(uxstring.UxString.pagination,\n type=str)\n next_page = get_next_page(prompt_resp, current_page)\n if next_page == -1:\n model_id = prompt_resp\n display_search_info(client, model_id)\n elif next_page >= total_pages or next_page < 0:\n continue\n elif next_page != current_page:\n get_search_results(client, search_string, next_page)\n current_page = next_page\n\n except click.exceptions.Abort:\n return", "def other_search(self):\n test = self.ask_zoekarg.text()\n if test:\n self.parent().search_arg = test\n self.parent().do_select()", "def searchUser(database):\n print(\"How do you want to search for a user\\n1.name\\n2.field\\n3.year of study\\n4.areas of interest\\n5.Quit\")\n choice=int(input(\"Your choice :\"))\n if choice==1:\n searchByName(database)\n elif choice==2:\n searchByField(database)\n elif choice==3: \n searchByYear(database)\n elif choice==4:\n searchByInterest(database)\n elif choice==5:\n return", "def __ui_choose_search_criteria_for_persons(self):\n print(\"By which criteria do you want to search persons?\\n\"\n \" 1. By name\\n\"\n \" 2. By phone number\\n\")\n user_choice = input(\"Type your option: \").strip()\n if user_choice == \"1\":\n self.__ui_search_persons_by_name()\n elif user_choice == \"2\":\n self.__ui_search_persons_by_phone_number()\n else:\n print(\"Invalid option!\\n\")\n return", "def search(query_string):", "def search():\n pass", "def search():\r\n ch = input('You are about to SEARCH for an entry. If NO, you may choose another option.\\n').lower()\r\n\r\n if y_n(ch):\r\n print('Enter your desired subject to search in...\\n')\r\n chs2 = ['last name', 'l', 'first name', 'f', 'grade', 'g', 'stream', 's', 'role', 'r']\r\n ch2 = input('Search by LAST NAME, FIRST NAME, GRADE, STREAM, or ROLE?\\n').lower()\r\n ch2 = check(ch2, chs2)\r\n\r\n if ch2 == 'last name' or ch2 == 'l':\r\n query(ln_s(re.sub(r'\\s', '', str(input('Desired last name?\\n')))))\r\n elif ch2 == 'first name' or ch2 == 'f':\r\n query(fn_s(re.sub(r'\\s', '', str(input('Desired first name?\\n')))))\r\n elif ch2 == 'grade' or ch2 == 'g':\r\n try:\r\n xgr = int(input('Desired grade?\\n'))\r\n xgrs = [8, 9, 10, 11, 12, 13]\r\n\r\n xgr = check_int(xgr, xgrs)\r\n query(gr_s(xgr))\r\n except ValueError:\r\n print('You did not enter an applicable grade. Please enter another value.')\r\n search()\r\n elif ch2 == 'stream' or ch2 == 's':\r\n query(sr_s(str(input('Desired stream?\\n'))))\r\n else:\r\n query(rl_s(str(input('Desired role?\\n'))))\r\n else:\r\n start()", "def search(self, query):\n launch_gs_app('search',\n self.browser,\n GoogleSuite.SEARCH_URL.format(_urlencode([('q', query)])))", "def __search_student(self):\n menu_string = \"Search for a student:\\n\"\n menu_string += \"\\t1. by ID\\n\"\n menu_string += \"\\t2. by discipline_name\\n\"\n menu_string += \"\\t0. Exit\\n\"\n\n stop = False\n while not stop:\n command_list = \\\n {\n '1': self.__ui_search_student_by_id,\n '2': self.__ui_search_student_by_name,\n '0': self.__no_command\n }\n command = self.__ui_read_command(menu_string)\n\n if command == '0':\n return\n\n search = input(\"Enter search_substring string: \")\n if len(search) == 0:\n print(\"Search string cannot be empty!\")\n return\n\n if command in command_list.keys():\n command_list[command](search)\n else:\n print(\"Invalid command!\")", "def get_interactive_match(self, choices, query):\n if query in self.SKIP_KEYWORDS:\n return None\n results = process.extract(query, choices, limit=10) # fuzzy string matching\n best_match = results[0]\n second_best_match = results[1]\n if best_match[1] == second_best_match[1] or best_match[1] < 50: # if inconclusive or low score\n self.print(\"Couldn't find a conclusive match for '%s'. Best matches:\" % (query))\n i = 0\n for result in results:\n i += 1\n print(\" [%i] %s\" % (i, result[0]))\n answer = input(\"Choose one or specify a less ambiguous query: \")\n self.clear_lines(2 + len(results))\n if answer.isdigit() and int(answer) <= len(results):\n return results[int(answer) - 1][0]\n else:\n return self.get_interactive_match(choices, answer)\n else:\n return best_match[0]", "def interactive_select(space, current):\n print \"Type an element name, an element index, or an unambiguous prefix to add to your selection.\"\n print \"Type '\" + color_code(MAGENTA) + \"list\" + CLEAR_COLOR +\"' to see the list of valid selections/indices.\"\n print \"Type '\" + color_code(MAGENTA) + \"clear\" + CLEAR_COLOR +\"' to clear selection.\"\n print \"Enter an empty line when done.\\n\"\n \n done = False\n while not done:\n print color_code(BLACK, bold=True), \"\\nCurrent selection\" + CLEAR_COLOR + \":\", (current if current else \"None\")\n tentative = raw_input(color_code(YELLOW) + \"Selection or Command\" + CLEAR_COLOR + \": \")\n matches = [el for el in space if el.startswith(tentative)]\n try: index = int(tentative)\n except ValueError: index = None\n if tentative == 'list':\n for i,el in enumerate(space):\n print \"\\t\", color_code(BLUE, bold=True), i, CLEAR_COLOR, el\n print \"\\n\"\n elif tentative == 'clear':\n current = []\n elif tentative == '':\n if current:\n print color_code(GREEN), \"\\nFinal selection\" + CLEAR_COLOR + \":\", current, \"\\n\\n\"\n done = True\n else:\n print_error(\"Must select at least one\")\n elif len(matches) > 1:\n print_error(\"Multiple matches found for `{}' ({})\".format(tentative, matches))\n elif len(matches):\n if matches[0] in current:\n print_warning(\"{} was already selected\".format(matches[0]))\n else:\n current.append(matches[0])\n elif index is not None:\n if index < 0 or index >= len(space):\n print_error(\"Invalid index {}\".format(index))\n elif space[index] in current:\n print_warning(\"{} was already selected\".format(space[index]))\n else:\n current.append(space[index])\n else:\n print_error(\"Unknown token: {}\".format(tentative))\n \n return current", "def choose_from_list(query_category, query_list):\n print('Choose the {cat} you want from the below list:'.format(\n cat=query_category))\n for counter, value in enumerate(query_list):\n print('{counter}: {value}'.format(counter=counter, value=value))\n selection = input('Choice: ')\n return query_list[int(selection)]", "def lookup_search_term():\n while True:\n search_query = input('Show entries containing (in name or notes): ')\n if validate_lookup_search_term_format(search_query):\n break\n print('** Please enter search term **')\n return (Entry.select().where(Entry.employee_name.contains(search_query)) |\n Entry.select().where(Entry.task_notes.contains(search_query)))", "def __ui_choose_search_criteria_for_activities(self):\n print(\"By which criteria do you want to search activities?\\n\"\n \" 1. By date\\n\"\n \" 2. By description\\n\")\n user_choice = input(\"Type your option: \").strip()\n if user_choice == \"1\":\n self.__ui_search_activities_by_date()\n elif user_choice == \"2\":\n self.__ui_search_activities_by_description()\n else:\n print(\"Invalid option!\\n\")\n return", "def search_by_string(self):\n print(\"*** String Search ***\\n\")\n print(\"Enter a search string.\\n\")\n print(\"- NAME and NOTE will be searched for all tasks -\")\n print(\"- Searching IS case-sensitive, but partial matches will be returned -\\n\")\n while True:\n try:\n search_string = input(\">>> \")\n results = self.regex_entry_search(search_string)\n except re.error:\n print(\"Couldn't parse search query. Please try again.\")\n else:\n clear_screen()\n print(f\"Found {len(results)} matches for string \\\"{search_string}\\\"...\\n\")\n self.print_selected_entries(results)\n break", "def ask_search():\n\n print(\n\"\"\"\nPlease enter your desired keywords for the lexical dispersion analysis. For quick templates, enter the following keys:\n\ntemplate_insurance: insurance identifier terms\ntemplate_contract: contract identifier terms\ntemplate_privacy: privacy contract identifier terms\n\nTo stop entering keywords, simply enter an empty input.\n\"\"\"\n )\n\n #asking user for search terms\n ask = True\n search = []\n\n while ask == True:\n temp = input(\"Enter a keyword: \")\n if temp == \"\":\n break\n elif temp == \"template_insurance\":\n search = [\"treatment\", \"premium\", \"claim\", \"benefit\", \"exclusions\", \"charges\", \"payment\", \"occupation\"]\n break\n elif temp == \"template_contract\":\n search = [\"defined\",\"liability\",\"service\",\"confidential\",\"terminate\",\"law\", \"breach\"]\n break\n elif temp == \"template_privacy\":\n search = [\"purpose\",\"personal\",\"data\",\"collect\",\"transfer\",\"services\",\"contact\",\"provide\",\"authority\",\"marketing\",\"retention\",\"consent\",\"analysis\",\"analytics\"]\n break\n else:\n search.append(temp)\n\n return search", "def search(self, query=None):\n\n self.visual.log(\"Starting search\")\n if self.search_invoke_counter > 0:\n # step to the starting history to search everything\n self.reset_history()\n search_done = False\n just_began_search = True\n query_supplied = bool(query)\n\n ttr = TimedThreadRunner(self.search_for_entry, \"\")\n # ttr.set_delay(1, self.visual.log, \"delaying search execution...\")\n\n while True:\n # get new search object, if it's a continued search OR no pre-given query\n if not just_began_search or (just_began_search and not query_supplied):\n search_done, new_query = self.visual.receive_search()\n self.visual.log(\"Got: [{}] [{}]\".format(search_done, new_query))\n if search_done is None:\n # pressed ESC\n self.visual.message(\"Aborting search\")\n return\n if new_query == \"\" and search_done:\n # pressed enter\n self.visual.message(\"Concluded search\")\n break\n # got an actual query item\n # if query content is updated, reset the timer\n query = new_query\n\n query = query.lower().strip()\n # ttr.reset_time(query)\n # self.visual.log(\"Got query: {}\".format(query))\n # ttr.update_args(query)\n # ttr.start()\n # ttr.stop()\n # results_ids = ttr.get_result()\n results_ids = self.search_for_entry(query)\n # results_ids = []\n just_began_search = False\n self.search_invoke_counter += 1\n if not self.visual.does_incremental_search:\n break\n\n if not query:\n # no search was performed\n return\n # push the reflist modification to history\n self.change_history(results_ids, \"search:\\\"{}\\\"\".format(query))", "def search_by_chosen_option(library: list, chosen_option: str) -> None:\n user_input = input(f'What is the name of the {chosen_option} you want to search for?')\n found_books = []\n for book in library:\n if user_input.lower() in str(getattr(book, chosen_option.lower())).lower():\n found_books.append(book)\n print(f'We found {len(found_books)} book(s) that matched this search in your library.\\n')\n for num, book in enumerate(found_books, 1):\n print(f'{num} - {book.__repr__()}')\n if len(found_books) > 0 and not return_to_main_menu():\n move_book(library, found_books)", "def search(self, *args, **kwargs):", "def search(self, query):\n logger.debug('Performing search for: '+query)\n write_textfield('queryString', query+\"\\n\", check=False)\n self.waitForLoaderToDisappear()", "def search():\n # Check for database tables\n check_db()\n # Check for GET data\n search_query = request.args.get(\"q\", None)\n # Format search results as HTML\n search_results = get_search_results_html(search_query)\n # Format recent searches as HTML\n recent_searches = get_recent_searches_html()\n\n return html_wrapper('<h1>' + SITE_NAME + '''</h1>\n <form action=\"/\" method=\"GET\">\n <input type=\"text\" name=\"q\">\n <input type=\"submit\" value=\"search\">\n </form>''' + search_results + recent_searches)", "def search(search, candidates):\n choicer = choices.Choice()\n for candidate in candidates:\n choicer.add(candidate)\n return choicer.search(search)" ]
[ "0.6945267", "0.6146135", "0.60944784", "0.6077637", "0.6019907", "0.5902816", "0.58127916", "0.58041674", "0.5797001", "0.5792282", "0.5770086", "0.5716065", "0.56671447", "0.5661047", "0.5658922", "0.56546295", "0.56507295", "0.5648148", "0.56405544", "0.56253314", "0.5614291", "0.5591348", "0.55757976", "0.5567116", "0.5538281", "0.5532533", "0.55296844", "0.5517586", "0.5517161", "0.5510592" ]
0.67623794
1
Returns string that best matches query out of a list of choices. Prompts user if unsure about best match.
def get_interactive_match(self, choices, query): if query in self.SKIP_KEYWORDS: return None results = process.extract(query, choices, limit=10) # fuzzy string matching best_match = results[0] second_best_match = results[1] if best_match[1] == second_best_match[1] or best_match[1] < 50: # if inconclusive or low score self.print("Couldn't find a conclusive match for '%s'. Best matches:" % (query)) i = 0 for result in results: i += 1 print(" [%i] %s" % (i, result[0])) answer = input("Choose one or specify a less ambiguous query: ") self.clear_lines(2 + len(results)) if answer.isdigit() and int(answer) <= len(results): return results[int(answer) - 1][0] else: return self.get_interactive_match(choices, answer) else: return best_match[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interactive_search(self, choices, query=None):\n if query:\n match = self.get_interactive_match(choices, query)\n if match:\n self.print(\"Matched query to '%s'.\" % (match))\n answer = input(\"Is that correct? (Y/n) \")\n self.clear_lines(1)\n if answer.lower() == 'y' or answer == '':\n self.clear_lines(1)\n return match\n else:\n self.clear_lines(1)\n return self.interactive_search(choices)\n else:\n return None\n else:\n query = input(\"Please type a query: \")\n self.clear_lines(1)\n return self.interactive_search(choices, query)", "async def cmd_choose(self, args: Args, **_):\n response = \"From what you gave me, I believe `{}` is the best choice\".format(\n args[randint(0, len(args) - 1)]\n )\n return response", "def get_choice(choices: list, choice: str):\n if choice == \"1\":\n return 0\n \n if choice == \"2\":\n return 1\n\n choices = list(map(str.lower, choices))\n words = list(map(str.split, choices))\n\n # Go through all words in the given message, and find any words unique to a choice\n for word in choice.lower().split():\n if word in words[0] and word not in words[1]:\n return 0\n elif word in words[1] and word not in words[0]:\n return 1\n\n # Invalid choice\n return None", "async def suggest(self, ctx, choice=None):\n\n if choice is None or choice.lower() in (\"online\", \"voice\"):\n suggestions = get_suggestions(get_users(ctx, choice))\n\n if suggestions:\n await self.bot.say(\"You can play these games: \\n\")\n message = pagify(\"\\n\".join(suggestions), ['\\n'])\n\n for page in message:\n await self.bot.say(box(page))\n else:\n await self.bot.say(\"You have exactly **zero** games in common, go buy a 4-pack!\")\n else:\n await self.bot.say(\"Please enter a valid filter -> either use `online` (default) for all online users or `voice` for all users in a voice channel\")", "def promptUser(choices, choiceStr, question=None, maxToShow=20):\n # Display choices to the user\n print \"\"\n validinput = ['']\n for i in range(len(choices)):\n validinput.append(str(i+1))\n try:\n try: print encode(\" %2s. %s\" % (i+1, choiceStr(choices[i])))\n except: print \" %2s. %s\" % (i+1, choiceStr(choices[i]))\n except:\n pass\n if (i == maxToShow-1): break\n # Get a response from the user\n response = \"<UNANSWERED>\"\n question = question or \" Please select the correct item\"\n question = \"%s (0 for None) [0]: \" % question\n while (response not in validinput):\n response = raw_input(\"\\n%s\" % question)\n if (response not in validinput):\n print \" Invalid input, please choose one of: %s\" % validinput\n # We have a response, return the correct choice\n if (response == ''):\n print \" You selected: None\"\n return None\n selection = choices[int(response)-1]\n print \" You selected: %s\" % choiceStr(selection)\n return selection", "def select_option(options, choice):\n choices = []\n txt = \"\"\n last = len(options) - 1\n for opt in options:\n if options.index(opt) == 0:\n txt += \"'\" + str(opt) + \"'\"\n elif options.index(opt) == last:\n txt += \" and '\" + str(opt) + \"'\"\n else:\n txt += \", '\" + str(opt) + \"'\"\n choices.append({'name': opt})\n\n question = [\n {\n 'type': 'list',\n 'message': 'The similarities between \\'' + choice + '\\' with ' + txt + ' are equal. Choose the one to consider.',\n 'name': 'option',\n 'choices': choices\n }\n ]\n\n answer = prompt(question, style=style)\n return answer.get(\"option\")", "async def choose(self, ctx, *args):\n query = \" \".join(args)\n choices = query.split(\" or \")\n if len(choices) < 2:\n await ctx.send(\"Give me at least 2 options to choose from! (separate options with `or`)\")\n self.logger.warning(misolog.format_log(ctx, f\"1 option\"))\n return\n choice = rd.choice(choices).strip()\n await ctx.send(f\"I choose **{choice}**\")\n self.logger.info(misolog.format_log(ctx, f\"{choice}\"))", "def choose_matching_model_for_style(model_style_name, model_choices):\n model_choices = set(model_choices)\n matching_models = []\n\n # Remove punctuation and capitalize both terms for easier comparison\n model_style_uc = model_style_name.replace(\"&\", \"And\").upper()\n model_style_alphanumeric = not_alphanumeric.sub(\"\", model_style_uc)\n model_choice_original_map = {}\n for model_choice in model_choices:\n model_choice_original_map[not_alphanumeric.sub(\"\", model_choice.upper())] = model_choice\n model_choices_alphanumeric = model_choice_original_map.keys()\n\n # First check if the model_style starts with the name of any of our models\n for model_choice in model_choices_alphanumeric:\n if model_style_alphanumeric.startswith(model_choice):\n matching_models.append(model_choice_original_map[model_choice])\n\n if len(matching_models) == 1:\n return matching_models[0]\n\n # If that fails, look for overlap between a model and the model_style\n for model_choice in model_choices_alphanumeric:\n if model_choice in model_style_alphanumeric:\n matching_models.append(model_choice_original_map[model_choice])\n\n if len(matching_models) == 1:\n return matching_models[0]\n\n if len(matching_models) > 1:\n # If there are multiple matching, choose the largest match first. This mostly seems to work.\n matching_models = sorted(matching_models, key=lambda x: len(x), reverse=True)\n return matching_models[0]\n\n return None", "def _choose_best_option(self):", "async def randomChoice(self, ctx: commands.Context, *choices: str):\n if not choices:\n await ctx.reply(f\"Command failed - no arguments given.\\nEnter a sequence of arguments to choose from (you can use quotes for grouping).\", mention_author=False)\n elif len(choices)==1:\n await ctx.reply(f\"After some extremely randomized choosing from the one singular option that was given to choose from, the surprising result is:\\n{choices[0]}\", mention_author=False)\n else:\n await ctx.reply(f\"Randomly chosen result:\\n{random.choice(choices)}\", mention_author=False)", "def prompt(text, choices):\n text += \" [\" + \"/\".join(choices) + \"] \"\n while True:\n inp = input(text)\n if inp in choices:\n return inp", "def next_choice(self, opponent: 'Player') -> str:\n\n if self.adaptive_ai:\n # this is an adaptive_ai player, so see if it has collected\n # enough stats about the current opponent yet:\n if sum(self.opponent_choices[opponent.name].values()) > 5:\n # has enough samples to start adapting to the opponent\n print(' {} is trying to guess the opponent\\'s choice...'.format(self.name))\n\n # AI algorithm 1:\n # simply find the most-frequent selection by the opponent and\n # choose its killer.\n\n guess = self.opponent_choices[opponent.name].most_common(1)[0][0]\n ai_choice = weapon_to_beat(guess)\n print(' ', opponent.name, 'most often chose', guess, 'so he/she chose', ai_choice)\n return ai_choice\n\n # use the standard tendency distribution to choose a weapon:\n n = randint(1, self.randmax)\n if n <= self.tendency[0]:\n return 'rock'\n elif n <= self.tendency[0] + self.tendency[1]:\n return 'paper'\n else:\n return 'scissors'", "def ask(question, options, default):\n assert default in options\n\n question += \" ({})? \".format(\"/\".join(o.upper() if o == default else o for o in options))\n selected = None\n while selected not in options:\n selected = input(question).strip().lower()\n if selected == \"\":\n selected = default\n else:\n if selected not in options:\n question = \"Please type '{}'{comma} or '{}': \".format(\n \"', '\".join(options[:-1]), options[-1],\n comma=',' if len(options) > 2 else '',\n )\n return selected", "def __choose_best_matching_candidate(candidates, artist):\n\n artist_names = set()\n for match in candidates:\n artist_names.add(match[1])\n\n # If there is more than 1 matched artist:\n if len(artist_names) > 1:\n \n best_distance = 10000\n best_artist = \"\"\n\n # Calculate the levenshtein edit distance between the searched artist name and the artist names in the search results.\n for matched_artist in artist_names:\n distance = editdistance.eval(matched_artist, artist)\n if distance < best_distance:\n best_distance = distance\n best_artist = matched_artist\n\n # Then exclude from candidates all matches that are NOT from the best artist\n candidates = [candidate for candidate in candidates if candidate[1] == best_artist]\n else:\n best_artist = artist_names.pop()\n best_distance = editdistance.eval(best_artist, artist)\n\n # Threshold candidate name to the artist name\n ratio = best_distance/len(artist)\n # Allow ~15% difference\n if ratio > 0.15:\n raise MatchNotFoundError(\"Closest artist is too far of the queried artist\")\n\n # Descending list\n sort_on_num_ratings = sorted(candidates, key=lambda cand: cand[2], reverse=True)\n\n # Take the one with the most votes\n selected = sort_on_num_ratings[0]\n\n # Unless it has a rating lower than 4.\n if selected[3] < 4:\n\n sort_on_rating = sorted(candidates, key=lambda cand: cand[3], reverse=True)\n\n # If there is one with a rating higher than 4, select that one. \n if sort_on_rating[0][3] > 4:\n selected = sort_on_rating[0]\n\n return selected", "def __str__(self):\n return gettext('One of %s') % self._get_choices_str()", "def selection_input(\n self,\n prompt,\n choices,\n default=None,\n error_message=\"Invalid Selection\",\n transform=None\n ):\n while True:\n result = self.text_input(prompt, default)\n\n if transform is not None and result is not None:\n result = transform(result)\n\n if result in choices:\n return result\n\n print()\n print(error_message)", "def QueryStrGeneral(cls, queryString: str, errorPrompt: str, conditionList: list) -> str:\n\n global userInput\n\n try:\n userInput = input(queryString).upper()\n\n # Check if userInput points to either of the options, and recursively call\n # the function until userInput has an actionable value.\n if userInput not in conditionList:\n raise ValueError\n\n except ValueError:\n # Reprompt user for valid entry.\n print(errorPrompt)\n cls.QueryStrGeneral(queryString, errorPrompt, conditionList)\n\n except Exception:\n print(\"\\nOops something is buggy\")\n\n return userInput", "def _choice_str(choices, max):\n return ''.join(map(str, [choice(choices) for _ in range(max)]))", "def get_choice(choices, conversation):\n user_choice = None\n while user_choice is None:\n try:\n user_input = input(\"\\nChoice: \")\n if user_input == 'debug' or user_input == 'd':\n print(\"\\n\", conversation.getDebugInfo())\n else:\n user_choice = int(user_input)\n if choices.__len__() < user_choice or user_choice < 1:\n print('Error, try again')\n user_choice = None\n else:\n print(\"\\nYou: \" + choices[user_choice - 1])\n except ValueError:\n print('Error, try again')\n return user_choice", "def find_best_candidate(s_array):\n best_string = ''\n max_val = 0\n for s in s_array:\n score = compare(s)\n if score > max_val:\n max_val = score\n best_string = s\n return best_string", "def select_query(\n items: Sequence,\n max_display: int = 10,\n fallback: Callable[[], T] = None,\n item_formatter: Callable[[T], str] = str,\n header: str = \"Available options:\",\n footer: str = \"Please enter the number of the option to use.\",\n) -> T:\n\n # Truncate if needed\n print(header)\n if max_display is not None and len(items) > max_display:\n items = items[:max_display]\n print(f\"(showing the latest {max_display})\")\n\n # Display list\n for i, exp in enumerate(items):\n print(\" \", i, \": \", item_formatter(exp))\n\n print(footer)\n\n # Repeat query on errors\n while True:\n sel = input()\n\n # Check if sel is a number, if so use it.\n if sel == \"\":\n # first item is default\n return items[0]\n elif sel.isdigit():\n # Parse index\n sel_idx = int(sel)\n if sel_idx < len(items):\n return items[sel_idx]\n # Error\n print(\"Please enter a number between 0 and \", len(items) - 1, \".\")\n elif fallback is not None:\n # Use fallback if any\n fres = fallback(sel)\n if fres is not None:\n return fres\n # The fallback should report it's own errors\n else:\n print(\"Please enter a number.\")", "async def choose(self, ctx, *, choices: str):\n await ctx.send(\n self.bot.bot_prefix + 'I choose: ``{}``'.format(random.choice(choices.split(\"|\"))))", "def choose_option():\n print(\"1. title of most played game\"\n \"\\n2. how many copies have been sold in total\"\n \"\\n3. average selling\"\n \"\\n4. how many characters long is the longest title\"\n \"\\n5. average of the release dates\"\n \"\\n6. properties of the game\"\n \"\\n7. how many games are grouped by genre\"\n \"\\n8. ordered titles of games by date and alphabet\"\n \"\\n9. Exit\")\n\n option = input(\"\\nDisplay: \")\n return option", "def get_best_match(self, list):\n raise NotImplementedError", "def pull_suggestion(self, callback, who, arg):\n\t\t\n random_sug = self.dong.db.get_random_row('suggest')\n res = self.google_suggest(callback, who, random_sug[2], False)\n\t\t\n w = res.split()\n if w[0].lower() in ('what', 'why', 'was', 'where', 'who', 'which', 'whom', 'when', 'how', 'is', 'are', 'did'):\n if w[-1:] != '?':\n res = res + '?'\n return res.capitalize()", "def question_with_suggested_answers(text, default, suggest):\n\n reply = question(text, default)\n while reply not in suggest:\n report(_(\"\"\"The value you have chosen is not among the suggested values.\nYou have chosen '%s'.\"\"\" % reply))\n report(_(\"The suggested values are \" + str(suggest)))\n correct = question(_(\"Do you want to correct your answer?\"), True)\n if correct:\n reply = question(text, default)\n else:\n return reply\n return reply", "def search(search, candidates):\n choicer = choices.Choice()\n for candidate in candidates:\n choicer.add(candidate)\n return choicer.search(search)", "def pick_place(choices_arg, question='Where to next?',inv=True):\r\n \r\n choices_alt = []\r\n \r\n if isinstance(choices_arg,list):\r\n choices = list(choices_arg)\r\n if inv:\r\n choices += ['inventory','map']\r\n \r\n elif isinstance(choices_arg,tuple):\r\n choices = choices_arg[0]\r\n choices_alt = choices_arg[1]\r\n if inv:\r\n choices += ['inventory','map']\r\n choices_alt += ['inventory','map']\r\n\r\n staying = True\r\n \r\n while staying:\r\n\r\n print question + '\\n'\r\n\r\n if choices_alt:\r\n for index in range(len(choices_alt)): #print alternate choices in menu form\r\n if str(choices[index]) == 'inventory':\r\n print\r\n print(str(index+1) + ': ' + str(choices_alt[index]))\r\n\r\n else:\r\n for index in range(len(choices)): #print choices in menu form\r\n if str(choices[index]) == 'inventory':\r\n print\r\n print(str(index+1) + ': ' + str(choices[index]))\r\n\r\n print('') #get some blank line in here yo\r\n chosen = raw_input('').lower()\r\n \r\n try:\r\n final = ''\r\n for index in range(len(choices)): #check if they typed a number\r\n item = choices[index]\r\n if index == int(chosen)-1:\r\n final = item\r\n staying = False\r\n if final == '':\r\n print 'Nice Try.\\n' #if they type a number not in range\r\n question = 'Try again, foo.'\r\n except:\r\n final = ''\r\n if choices_alt:\r\n for index in range(len(choices_alt)): #check if they typed letters\r\n item = choices_alt[index]\r\n if chosen == str(item).lower():\r\n final = choices[index]\r\n staying = False\r\n\r\n else:\r\n for index in range(len(choices)): #check if they typed letters\r\n item = choices[index]\r\n if chosen == str(item).lower():\r\n final = item\r\n staying = False\r\n if final == '':\r\n print 'Nice Try.\\n' #if they misspelled\r\n question = 'Try again, foo.'\r\n\r\n if final == 'map':\r\n inspect_map()\r\n question = 'Where to?'\r\n staying = True\r\n if final == 'inventory':\r\n inspect_inventory()\r\n question = 'Where to?'\r\n staying = True\r\n\r\n return final", "def report_matches(unknown, reference_langs, args):\n matches = language_match.best_matches(unknown, reference_langs, args.n_gram_max, args.matches)\n print(\"Best match{} for\".format(\"es\" if args.matches != 1 else \"\"), repr(unknown))\n pad = max([len(name) for (name, score) in matches])\n for (name, score) in matches:\n print(\"\\t\", name.ljust(pad), \"\\t{:>6.2%}\".format(score))", "def select_best_match(self, normalized_texts: List[str], transcript: str, verbose: bool = False):\n normalized_texts = calculate_cer(normalized_texts, transcript)\n normalized_texts = sorted(normalized_texts, key=lambda x: x[1])\n normalized_text, cer = normalized_texts[0]\n\n if verbose:\n print('-' * 30)\n for option in normalized_texts:\n print(option)\n print('-' * 30)\n return normalized_text, cer" ]
[ "0.6642002", "0.6545624", "0.6269913", "0.623575", "0.6235035", "0.62169236", "0.6010412", "0.60037553", "0.6000231", "0.59865594", "0.5913682", "0.59093326", "0.5877843", "0.5836029", "0.5736314", "0.57017106", "0.5665372", "0.56378025", "0.5632232", "0.5620297", "0.55832994", "0.55733556", "0.5563177", "0.5549811", "0.5519867", "0.5513932", "0.5500545", "0.5495603", "0.5491926", "0.5485603" ]
0.7756389
0
Asks an user how many days to go back. Returns int.
def get_interactive_days(self): answer = input("Press return to get entries of past day or input number of days to go back in time: ") if answer == '': days = 1 else: try: days = int(answer) except: print("You didn't enter a number, assuming 1 day.") days = 1 return days
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decreases_remaining(self):\n return 2 - self.decreases_today", "def remain():\r\n global total\r\n global user_pick\r\n total = int(total - user_pick)\r\n print(\"Remaining \" + str(total))", "def remaining_days_in_cycle(self) -> int:\n if not self.expiration:\n return 0\n delta = self.expiration - _today()\n return int(delta.days)", "def decays(self):\n return self._base.decays", "def last_days_results(self, days):\n return self.security['Date', 'Close', 'FinalDecision'][-days:]", "def Daysleftverification():\n pass", "def now_minus(days: int):\n return NOW - datetime.timedelta(days=days)", "def countdown(self, amt=1):\n pass", "def getBugsToday(myDay):\r\n #set bugs_today as neg one to accept zero as an input\r\n bugs_today = -1\r\n while bugs_today < 0 :\r\n myBugs_Validation = (input(u'Enter the number of bugs collected on day ' + str(myDay) + ' : '))\r\n #call my getValidation to check values entered\r\n bugs_today = getValidation(myBugs_Validation)\r\n #check if user entered a valid number\r\n if bugs_today == -1:\r\n print('\\nPlease enter the number of bugs collected. \\nEnter a whole integer number >= 0')\r\n \r\n return bugs_today", "def remaining_trial_days(self):\n try:\n return self.count_days_from_now(self.trial_ended_at)\n except AttributeError:\n return 0", "def days(input=None):\n return get(input).days", "def ex8() :\r\n print(\" - Date Calculator - \")\r\n import datetime\r\n today = datetime.date.today()\r\n print(today)\r\n try : #try catch method, in case user enters non-date, or 31st Feb etc.\r\n userDate = input(\"Please enter the date to check in a dd/mm/yy format: \") #userDate is string\r\n userDate = datetime.datetime.strptime(userDate, '%d/%m/%Y').date() #userDate is date_object\r\n if userDate < today : print(\"Invalid input, date is in the past\")\r\n elif userDate == today: print(\"That's today you dum-dum, answer is 0 days.\")\r\n else:\r\n delta = userDate - today #calculate difference\r\n delta = str(delta) #date_object don't work with split only str\r\n delta = delta.split(\",\") #unorthodox method to delete time (0:00:0) from the days\r\n print(\"The number of days between today (\",today,\") and entered date (\",userDate,\") are \",delta[0],\".\")\r\n except ValueError as e :\r\n print(\"Not a valid date.\")", "def days_since_last_checkin(self):\n # TODO use local timezone\n checkin_date = (self.last_checkin - datetime.timedelta(hours=5)).date()\n today = datetime.date.today()\n return (today - checkin_date).days", "def remaining_days(self):\n if self.trialing or self.trial_ended:\n return self.remaining_trial_days\n else:\n return self.remaining_days_in_current_period", "def countdown(n):\n if n < 0:\n print(\"ERROR! Invalid input\")\n elif n == 0:\n print(\"Done!\")\n else:\n print(n)\n return countdown(n-1)", "def get_number_days(self):\r\n return 1", "def remaining_days_in_current_period(self):\n try:\n return self.count_days_from_now(self.current_period_ends_at)\n except AttributeError:\n return 0", "def get_n_days_ago(self, startdate, n):\n return startdate - datetime.timedelta(days=n)", "def automatically_after_days(self) -> Optional[int]:\n return pulumi.get(self, \"automatically_after_days\")", "def automatically_after_days(self) -> Optional[int]:\n return pulumi.get(self, \"automatically_after_days\")", "def calc_remained_days(name: str, full_date: str, current: str):\n expiry_date = get_expiry_date(name, full_date)\n intervals = datetime.strptime(expiry_date, DATE_FORMAT) - datetime.strptime(current, DATE_FORMAT)\n days = intervals.days + 1\n if days <= 0:\n raise ValueError(f'remained days {expiry_date} - {current}, {days} out of range. ')\n return days", "def remaining_retention_days(self) -> int:\n return pulumi.get(self, \"remaining_retention_days\")", "def until_reset(self) -> int:\n return int((self.resets_at - datetime.now()).total_seconds())", "def decrement(val):\n return coerce_to_int(val) - 1", "def down(self):\n global curafl, maxafl\n curafl -= 1\n if (curafl == 0):\n curafl = maxafl \n try:\n subprocess.call(['/home/holiday/bin/afl', '%d' % curafl])\n except:\n print \"afl failed\"", "def countdown(n):\n while n > 0:\n n -= 1", "def numOfDays():\n\n print(\"Podaj rok, miesiac oraz dzien pierwszej daty: \")\n inputs = [input() for i in range(3)]\n\n print(\"Podaj rok, miesiac oraz dzien drugiej daty: \")\n inputs1 = [input() for i in range(3)]\n\n d0 = date(inputs[0], inputs[1], inputs[2])\n d1 = date(inputs1[0], inputs1[1], inputs1[2])\n delta = abs(d1 - d0)\n \n print(delta.days)\n return abs(delta.days)", "def GetDownLast(self, *args, **kwargs):\n pass", "def back( self ):\n super( ConfirmationScreen, self ).back()\n\n self._current_option = self._current_option - 1\n print( \"Current option is \" +str( self._current_option ) )\n \n if self._current_option < 0:\n self._current_option = len( self._options ) - 1", "async def rewards(ctx, username):\n history = get_history(username)\n await bot.say(history+\" in the past 7 days\")" ]
[ "0.6136943", "0.59328794", "0.5877445", "0.58612114", "0.57502425", "0.5743318", "0.55419844", "0.55365306", "0.55009514", "0.5493947", "0.54851043", "0.5480768", "0.5478333", "0.54070824", "0.5393414", "0.53924155", "0.5386575", "0.5374631", "0.52890193", "0.52890193", "0.5274363", "0.5264618", "0.52618796", "0.52066404", "0.51860446", "0.51715845", "0.51540196", "0.51327634", "0.51059616", "0.50729567" ]
0.70395863
0
Hacky way to check if this function already made a Toggl project based on a Zendesk ticket ID.
def already_created(self, ticket_id, toggl_projects): project_prepends = [p['name'].split()[0][1:] for p in toggl_projects] if str(ticket_id) in project_prepends: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def check_if_is_ticket(ctx):\n channel : TextChannel = ctx.channel\n return 'ticket-' in channel.name", "def sync(self, no_of_days=1):\n zd = Zendesk()\n tg = Toggl()\n try:\n self.print(\"Syncing...\")\n self.print_divider(30)\n tickets = zd.get_tickets(no_of_days)\n for ticket in tickets:\n project_title = self.format_title(ticket.id, ticket.subject)\n if ticket.organization:\n client_id = tg.get_client_id(name=ticket.organization.name)\n if not client_id:\n new_client = tg.create_client(ticket.organization.name)\n client_id = new_client['id']\n else:\n client_id = False\n self.print(\"Ticket '%s' has no associated organization!\" % (project_title))\n all_projects = tg.get_projects()\n if not self.already_created(ticket.id, all_projects):\n self.print(\"Creating project '%s'...\" % (project_title))\n result = tg.create_project(project_title, client_id, is_private=False)\n self.print(\"Toggl response:\")\n self.log(result, silent=False)\n else:\n self.print(\"There is already a Toggl project for Zendesk ticket #%s!\" % ticket.id)\n pass\n # TODO: edit Toggl project\n # tg.edit_project(project_id, name=ticket.subject)\n self.print_divider(30)\n self.print(\"Done!\")\n except:\n self.log(traceback.format_exc(), silent=False)", "def verify_project(self, pool, project):\n svc = self.project_path % (pool, project)\n ret = self.rest_get(svc, restclient.Status.OK)\n return ret", "def is_project_created(path):\n project_id = None\n try:\n with open(\"%s%sproject\"\n % (path, os.sep)) as project_file:\n project_id = project_file.readline().strip()\n try:\n project_id = bigml.api.get_project_id(\n project_id)\n return True, project_id\n except ValueError:\n return False, None\n except IOError:\n return False, None", "def is_project(self, project):\n return self._projects_lookup.get(project, False)", "def create_project_if_necessary(ctx, org_name, project_name, ):\n org = cmd.get_one_organization_by_name(\n client=ctx.obj, organization_name=org_name)\n pprint(cmd.ensure_project(\n client=ctx.obj, project_name=project_name, organization_id=org.id))", "def test_check_ticket_8(self):\n self.tkt.phage_id = \"\"\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set(), phage_id_dupe_set=set(),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def _dummy(ticket):\r\n return True", "def is_pull_request(issue):\r\n return 'pull_request_url' in issue", "def test_check_ticket_9(self):\n self.tkt.type = \"add\"\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set(), phage_id_dupe_set=set(),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def test_not_authed_public_project(self):\n # Clear out existing project with ID=1 if necessary.\n Project.objects.filter(id=1).delete()\n locale = LocaleFactory.create(code='fakelocale')\n project = ProjectFactory.create(id=1, slug='valid-project', locales=[locale])\n ResourceFactory.create(project=project)\n\n response = self.client.get('/fakelocale/valid-project/')\n assert_equal(response.status_code, 200)\n # I'd assertTemplateUsed here but it doesn't work on non-DTL\n # templates.", "def _get_project_id(self, request):\n project_id = request.environ[\"masakari.context\"].project_id\n if project_id in request.url:\n return project_id\n return ''", "def is_project_in_the_response(projectComponent, response):\n for project in response:\n if response[project] == projectComponent:\n return True\n return False", "def test_check_ticket_3(self):\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set(), phage_id_dupe_set=set([\"Trixie\"]),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def project_in_vc(name):\n vc3_client = get_vc3_client()\n projects = vc3_client.listProjects()\n vc = vc3_client.getRequest(requestname=name)\n vc_owner_projects = []\n\n for project in projects:\n if vc.owner == project.owner:\n vc_owner_projects.append(project)\n\n for p in vc_owner_projects:\n if (session['name'] in p.members or session['name'] == p.owner):\n return True\n else:\n return False", "def has_valid_id(self):\n try:\n project_id = self.track.project.id\n except (OSError, AttributeError):\n return False\n pointer, name = self._get_pointer_and_name()\n return bool(RPR.ValidatePtr2(project_id, pointer, name))", "def project(request, proj_id=None, scenario_id=None):\n\n if proj_id:\n project = get_object_or_404(Project, id=proj_id)\n\n if project.user != request.user and project.is_private:\n raise Http404\n\n return render_to_response('home/home.html', get_context(request))", "def check_project_exists(self, project):\n session = self.session_factory()\n exists = session.query(PipelineRun).filter_by(project=project).first()\n session.close()\n if exists:\n return True\n return False", "def project_with_revision_exists(project_name, project_revision, working_dir):\n try:\n with open(working_dir + project_name + \".qpf\", \"r\") as project_file:\n for line in project_file:\n if f\"PROJECT_REVISION = \\\"{project_revision}\\\"\" in line:\n return True\n return False\n except FileNotFoundError:\n return False", "def test_check_ticket_2(self):\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set([1]), phage_id_dupe_set=set(),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def test_check_ticket_4(self):\n self.tkt.type = \"invalid\"\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set(), phage_id_dupe_set=set(),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def check_project_id(project_id):\n # Convert variable into a string\n project_id = str(project_id)\n # Replace Capital letters and spaces\n project_id = project_id.replace(\" \", \"-\").lower()\n\n # Throw an error if any known incorrect usages found\n try:\n if re.search(\"^-|[^a-z0-9-]|google|ssl|-$\", project_id):\n raise ValueError(\"Invalid characters or words in Project ID\")\n elif len(project_id) > 30:\n raise ValueError(\"Too many characters in Project ID\")\n elif len(project_id) < 6:\n raise ValueError(\"More Characters required in Project ID\")\n else:\n log.info(f\"Project Id {project_id} passed regex check\")\n project_outcome = {\n \"outcome\": True,\n \"project_id\": project_id\n }\n return project_outcome\n except ValueError as e:\n log.warning(f\"Proposed Id {project_id} violates known google policies: \"\n \"https://cloud.google.com/resource-manager/docs/creating-managing-projects\")\n project_outcome = {\n \"outcome\": False,\n \"project_id\": project_id\n }\n return project_outcome", "def test_check_ticket_7(self):\n self.tkt.eval_flags = {}\n import_genome.check_ticket(\n self.tkt, type_set=self.type_set,\n description_field_set=self.description_field_set,\n eval_mode_set=self.eval_mode_set,\n id_dupe_set=set(), phage_id_dupe_set=set(),\n retain_set=self.retain_set, retrieve_set=self.retrieve_set,\n add_set=self.add_set, parse_set=self.parse_set)\n count = count_status(self.tkt, \"error\")\n self.assertEqual(count, 1)", "def project_validated(name):\n vc3_client = get_vc3_client()\n # Grab project by name\n project = vc3_client.getProject(projectname=name)\n\n # Checks to see if user is in project\n if (session['name'] in project.members or\n session['name'] == project.owner):\n return True\n else:\n return False", "def git_has_object(project: Project, name: str) -> bool:\n ret = project.git(\"rev-parse\", \"--verify\", name, _ok_code=[0, 128])\n return ret.exit_code == 0", "def is_already_linked(ticket_id):\n exists_query = db.session.query(\n all_models.IssuetrackerIssue.issue_id\n ).filter_by(issue_id=ticket_id).exists()\n return db.session.query(exists_query).scalar()", "def test_get_project(self):\n pass", "def __get_project_version__(self):\n api = FortifyApi(self.ssc_server, token=self.token, verify_ssl=False)\n try:\n response = api.get_project_versions() # api should support a search expression here. alas...\n if response.success:\n for project_version in response.data['data']:\n if project_version['project']['name'] == self.application_name:\n if project_version['name'] == self.fortify_version:\n # we have a matching project version\n Logger.app.debug(\"Found existing project version {0}\".format(project_version['id']))\n return project_version['id']\n # Didn't find a matching project version, verify that our project exists\n for project_version in response.data['data']:\n if project_version['project']['name'] == self.application_name:\n # Our project exsits, so create a new version\n return self.__create_project_version__()\n # Let upload_scan know that our project doesn't exist\n return -2\n elif \"401\" in response.message:\n # Avoid printing error for invalid token. Return -1 to reauth\n return -1\n else:\n Logger.app.critical(\"Failed to get project version. {0}\".format(response.message))\n except Exception as e:\n Logger.app.critical(\"Exception trying to get project version. {0}\".format(e.message))\n\n return None", "def ensure_project(self, project_id):\n\n if not project_id:\n return\n\n # TODO(rkukura): It seems load_from_conf_options() and\n # keystoneclient auth plugins have been deprecated, and we\n # should use keystoneauth instead.\n if project_id not in self.project_names:\n if self.keystone is None:\n self._get_keystone_client()\n LOG.debug(\"Calling project API\")\n projects = self.keystone.projects.list()\n LOG.debug(\"Received projects: %s\", projects)\n for project in projects:\n self.project_names[project.id] = project.name", "def test_create_project_request(self):\n pass" ]
[ "0.60784185", "0.584883", "0.56591004", "0.55432487", "0.5472561", "0.5453476", "0.5446431", "0.54430735", "0.54305506", "0.54244524", "0.54243267", "0.5412555", "0.53918374", "0.5391034", "0.5389106", "0.5382748", "0.53724587", "0.53702873", "0.535847", "0.5357751", "0.53424674", "0.53350586", "0.5321972", "0.53187186", "0.5315832", "0.5315102", "0.5314675", "0.53119636", "0.53095293", "0.52856845" ]
0.7632334
0
Formats id and subject into a suitable (Freshbooks) title.
def format_title(self, ticket_id, subject): # TODO: strip block tags? title = "#%i %s" % (ticket_id, subject) return title.strip()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_title(self):\n ret = self.properties['reason'].capitalize()\n ret += ' has been reported near ' + self.properties['address'].split(',')[0]\n time = datetime.strptime(self.properties['when'], '%Y-%m-%dT%H:%M:%S')\n times = [time.strftime(i).lstrip('0') for i in ('%m', '%d', '%I:%M%p')]\n ret += ' on {}/{} at {}'.format(times[0], times[1], times[2])\n return ret", "def format_title(self, data):\n return data", "def get_title(self):\n return \"{id}@{hn}\".format(id=self.model.identity, hn=self.model.hostname)", "def get_title_by_id(id):\n\n # your code", "def get_title():", "def getMITItemTitle(self,xc,item,id):\n \n titles = xc.xpathEval(\"mitcp:title\")\n title = ''\n if titles:\n title = titles[0].getContent()\n else:\n title = id\n\n return title", "def make_title(words):", "def format_filename(title: str, id: Any, ext: str = \".\", dirFormat=None):\r\n ...", "def dc_title(self):\n return u\"{0} ({1}): {2} {3}\".format(\n self.label, self.in_assessment[0].timepoint,\n self.subjects[0].code_in_study,\n \"...\" if len(self.subjects) > 1 else \"\")", "def numbered_title(self):\n return f\"{self.title}\"", "def numbered_title(self):\n return f\"{self.title}\"", "def Title(self, **kwargs):\n full_name = ''\n if self.getFirstname() == '' or self.getLastname() == '':\n if not self.getOrganization():\n return '...'\n else:\n return self.getOrganization()\n format = kwargs.get('format', None)\n if format == 'natural':\n full_name = '%s %s' % (self.getFirstname(), self.getLastname())\n else:\n full_name = '%s %s' % (self.getLastname(), self.getFirstname())\n return '%s' % full_name", "def generate_title(self, title=None):\n if title is None:\n title = self.header.get('title', self.title)\n\n title = self.generate(title)\n title = title.replace('<p>', '').replace('</p>', '')\n # no trailing newlines\n title = re.sub(r'\\n+', ' ', title).rstrip()\n return title", "def _update_title(self, title, tag, lid):\n return title", "def get_title(self):\n if not hasattr(self, '_title'):\n self._title = 'NO TITLE'\n if self._title:\n title = _(self._title)\n title = title.replace('&', '&amp;') \n title = title.replace('\"', '&quot;')\n return title\n else:\n return u''", "def _generate_title_description(psap_id, title, description):\n if description is None:\n description = PersistentFields.get_description(psap_id)\n else:\n PersistentFields.set_description(psap_id, description)\n if title is None:\n title = PersistentFields.get_title(psap_id)\n else:\n PersistentFields.set_title(psap_id, title)\n\n return title, description", "def get_challenge_name_and_id(self, obj):\n return \"%s - %s\" % (obj.challenge.title, obj.challenge.id)", "def get_challenge_name_and_id(self, obj):\n return \"%s - %s\" % (obj.challenge.title, obj.challenge.id)", "def inject_title(self,template,title):\n return re.sub('TITLE',title,template)", "def name_with_title(self):\n return \"%s %s\" % (self.title, self.name)", "def __str__(self):\n return f\"{self.id}: {self.title}\"", "def _get_full_title(self):\n return \"%s - %s %d\" % (self.title, _('Season'), self.season)", "def format_title(self, title):\n new_title = ''.join(word.lower().strip('!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~ ') for word in title)\n return new_title", "def __str__(self):\n return \"ID {0:25} | Name: {1} \\n\".format(self.id, self.title)", "def title_by_id(id_: int) -> Any:\n post = Posts.query.filter_by(id=id_).first()\n if post is None:\n return \"404\"\n return post.title", "def getTitle(self, item):\n return item.Title() or item.getId()", "def title(self, obj):\n return str(obj)", "def get_title(self) -> str:\n pass", "def numbered_title(self):\n return f\"Chapter {self.title}\"", "def get_title_by_id_from_table(table, id):\n\n # your code" ]
[ "0.63595736", "0.6318129", "0.6200423", "0.61921966", "0.6153127", "0.60798085", "0.6040445", "0.6013723", "0.6009649", "0.59691554", "0.59691554", "0.5925276", "0.58930725", "0.5861564", "0.5843622", "0.5807595", "0.57983005", "0.57983005", "0.5792964", "0.57914525", "0.5784936", "0.5754138", "0.5747758", "0.57427716", "0.5736765", "0.57350945", "0.5733803", "0.5710083", "0.56977195", "0.569569" ]
0.78905237
0
Formats Toggl project name and description into (Freshbooks) description.
def format_description(self, project_name, description): description = description if description else '' return "%s %s" % (project_name, '- ' + description)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def description():", "def get_descriptive_name(self):\n description = (f\"{self.year} {self.manufacturer.title()} \"\n f\"{self.model.title()}\")\n\n return description", "def get_describe_name(self):\n long_name = str(self.year)+ ' ' + self.make.title()+ ' ' +self.model.title()\n return long_name", "def __str__(self):\r\n proj_string = \" Project Name: \" + self.__name\r\n proj_string += \"\\n Cover Photo: \" + self.__cover_photo\r\n proj_string += \"\\n Links: \" + self.__links\r\n proj_string += \" Note: \" + self.__note\r\n proj_string += \" Photos: \" + list_str(self.__photos)\r\n\r\n return proj_string", "def full_description(self):\n des = describe_dut(self.dut) if self.dut else ''\n if self.build:\n des += ' with ' + self.build\n if self.result_id:\n des += ' BVT result ID ' + str(self.result_id)\n return (self.description if self.description \n else 'unknown test') + ' on ' + des", "def _create_readme(self, name, summary, description):\n return \"\"\"\n %(header_bar)s\n %(header)s\n %(header_bar)s\n\n %(content)s\n \"\"\" % {\n 'header': name,\n 'header_bar': '=' * len(name),\n 'content': '\\n\\n'.join(\n content\n for content in (summary, description)\n if content\n ) or 'Describe your extension.',\n }", "def unique_project_description():\n return ''.join([str(uuid.uuid4())[:6] for num in range(30)])", "def get_descriptive_name(self): # 定义描述完整信息的方法\n long_name = str(self.year) + \" \" + self.make + \" \" + self.model # 拼接变量字符串并赋值变量\n return long_name.title() # 返回字符串并首字母大写", "def __str__(self):\n return_string = \"Project: {}-{}\".\\\n format(self.public_information[\"project_id\"],\n self.public_information[\"title\"])\n\n return return_string", "def getProjectName():", "def displaySummary(self):\r\n print('Project Name:' + self.project['name'])\r\n print('Project chip:' + self.project['chip'])\r\n print('Project includes: ' + ' '.join(self.project['incs']))\r\n print('Project defines: ' + ' '.join(self.project['defs']))\r\n print('Project srcs: ' + ' '.join(self.project['srcs']))", "def _build_title(db, place):\n descr = place.get_title()\n location = get_main_location(db, place)\n parish = location.get(PlaceType.PARISH)\n city = location.get(PlaceType.CITY)\n state = location.get(PlaceType.STATE)\n title_descr = \"\"\n if descr:\n title_descr += descr.strip()\n if parish:\n title_descr += ', ' + parish.strip() + _(\" parish\")\n if city:\n title_descr += ', ' + city.strip()\n if state:\n title_descr += ', ' + state.strip() + _(\" state\")\n return _strip_leading_comma(title_descr)", "def Description(self) -> str:", "def Description(self) -> str:", "def get_descriptive_name(self):\n long_name = f\"{self.make} {self.model} {self.year}\"\n \n return long_name.title()", "def get_descriptive_name(self):\n return f\"{self.year} {self.make} {self.model}\".title()", "def get_descriptive_name(self):\r\n long_name=str(self.year)+' '+self.make+' '+self.model\r\n return long_name.title()", "def describe(self) -> str:\n return (\n \"{name} {surname} è nata/o a {birth_municipality} ({birth_province_code}) il {birthdate}.\"\n \" Ora vive a {municipality} ({province_code}) in {address} {house_number}.\"\n ).format(**self._data)", "def display_project_info(project_name):\n\n # project = request.args.get('project')\n\n title, description, max_grade = hackbright.get_project_by_title(project_name)\n\n grades = hackbright.get_grades_by_title(project_name)\n\n return render_template(\"project_info.html\",\n title=title,\n description=description,\n grade=max_grade,\n grades=grades)", "def description(self, description: str):\n return self.swag({\n 'description': normalize_indent(description),\n })", "def get_descriptive_name(self):\n long_name = f\"{self.year} {self.make} {self.model}\"\n return long_name.title()", "def description(self):\n publisher = self.parent\n\n shotgun_url = publisher.sgtk.shotgun_url\n\n media_page_url = \"%s/page/media_center\" % (shotgun_url,)\n review_url = \"https://www.shotgunsoftware.com/features/#review\"\n\n return \"\"\"\n Separate layers and upload to Shotgun for review.<br><br>\n\n A <b>Version</b> entry will be created in Shotgun and a transcoded\n copy of the file will be attached to it. The file can then be reviewed\n via the project's <a href='%s'>Media</a> page, <a href='%s'>RV</a>, or\n the <a href='%s'>Shotgun Review</a> mobile app.\n \"\"\" % (media_page_url, review_url, review_url)", "def get_descriptive_name(self):\r\n long_name = str(self.year)+' '+self.make + ' '+self.model\r\n return long_name.title()", "def combined_description(desc1, desc2):\n description = desc1\n if desc2:\n description = '{0}_{1}'.format(desc1, desc2)\n\n return description", "def ftitle(self, text):\n return \"{} - {}\".format(self._app_name, text)", "def get_description(self):\n return \"-\".join(\n map(str, (self.release, self.chromosome, self.start, self.reference, self.alternative))\n )", "def __str__(self):\n string = \"\"\"\n Project Factory:\\n\n Directory: {}\\n\n Size: {}\\n\n \"\"\".format(self._directory, len(self.projects))\n return string", "def _make_title(self):\n ret = self.properties['reason'].capitalize()\n ret += ' has been reported near ' + self.properties['address'].split(',')[0]\n time = datetime.strptime(self.properties['when'], '%Y-%m-%dT%H:%M:%S')\n times = [time.strftime(i).lstrip('0') for i in ('%m', '%d', '%I:%M%p')]\n ret += ' on {}/{} at {}'.format(times[0], times[1], times[2])\n return ret", "def testDescription(self):\n project = self.session.create_project()\n\n self.util.stringTypeTest(self, project, \"description\")\n\n self.util.stringPropertyTest(self, project, \"description\")", "def get_descriptive_name(self):\r\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\r\n #Mostrar_Grande = long_name.upper()\r\n #return long_name.upper()\r\n #return Mostrar_Grande #Funciona Com Return TAMBÉM, mas olhe na linha 39 como seria necessário usar.\r\n print(long_name.upper())" ]
[ "0.6330707", "0.5992437", "0.59294826", "0.59286237", "0.58890724", "0.584214", "0.5832703", "0.5818409", "0.5808963", "0.5762168", "0.5734125", "0.5720652", "0.5715829", "0.5715829", "0.56770307", "0.5670249", "0.56625277", "0.5661302", "0.56451756", "0.5631556", "0.5621286", "0.56144625", "0.5603907", "0.5602831", "0.5570571", "0.5557692", "0.5554851", "0.55504674", "0.5547473", "0.5531748" ]
0.7948996
0
Merges toggle time entries with same project name. Sums duration if billable.
def merge_toggl_time_entries(self, time_entries): tg = Toggl() d = {} for entry in time_entries: if entry.get('billable'): if entry.get('tags') and tg.BOOKED_TAG in entry['tags']: status = 'booked' else: status = 'not-booked' date = parser.parse(entry['start']).date() if not entry.get('pid'): self.log("Couldn't find associated project for entry: %s" % (str(entry))) continue unique_id = str(entry['pid']) + str(date) + status if not entry.get('description'): entry['description'] = "" if d.get(unique_id): d[unique_id]['duration'] += entry['duration'] d[unique_id]['merged_ids'].append(entry['id']) if d[unique_id].get('description'): if entry['description'].strip() not in d[unique_id]['description']: d[unique_id]['description'] += ' / ' + entry['description'] else: d[unique_id]['description'] = entry['description'] else: entry['merged_ids'] = [entry['id']] d[unique_id] = entry return d.values()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def break_time(self):\n\t\ts = timedelta()\n\t\tfor i in xrange(1, len(self.toggles)-1, 2):\n\t\t\ts += self.toggles[i+1] - self.toggles[i]\n\n\t\t# If not working need to add the last period of time\n\t\tif not self.status():\n\t\t\ts += datetime.now() - self.toggles[-1]\n\t\treturn s", "def _task_data(self):\n output = {\n 'all': [],\n 'open': [],\n 'open_hours': 0,\n 'done': [],\n 'done_hours': 0,\n 'week_done': [],\n 'week_done_hours': 0,\n 'week_due': [],\n 'week_due_hours': 0,\n 'velocity': [],\n 'velocity_hours': 0,\n 'velocity_count': 0,\n }\n\n last_sunday = SUNDAY - timedelta(weeks=1)\n three_weeks_ago = MONDAY - timedelta(weeks=4)\n\n tasks = Task.originals.owner_id(self.pk).order_by('due_dt')\n for t in tasks:\n output['all'].append(t)\n # process open tasks\n if not t.completed:\n output['open'].append(t)\n output['open_hours'] += t.task_time\n\n # Process done tasks\n else:\n output['done'].append(t)\n output['done_hours'] += t.task_time\n if t.completed_dt >= three_weeks_ago and t.completed_dt <= last_sunday:\n output['velocity'].append(t)\n output['velocity_hours'] += t.task_time\n\n if t.due_dt >= MONDAY and t.due_dt <= SUNDAY:\n output['week_due'].append(t)\n output['week_due_hours'] += t.task_time\n\n if t.completed and t.completed_dt >= MONDAY and t.completed_dt <= SUNDAY:\n output['week_done'].append(t)\n output['week_done_hours'] += t.task_time\n\n output['all_hours'] = output['open_hours'] + output['done_hours']\n\n # Extra calcs for the velocity\n output['velocity_count'] = len(output['velocity'])\n\n if output['velocity_hours'] > 0:\n output['velocity_hours'] = round(output['velocity_hours']/3,2)\n if output['velocity_count'] > 0:\n output['velocity_count'] = round(Decimal(output['velocity_count'])/3,2)\n\n return output", "def _task_data(self):\n output = {\n 'all': [],\n 'all_hours': 0,\n 'open': [],\n 'open_hours': 0,\n 'done': [],\n 'done_hours': 0,\n }\n\n tasks = Task.originals.project_id(self.pk).order_by('due_dt')\n for t in tasks:\n # process open tasks\n if not t.completed:\n output['open'].append(t)\n output['open_hours'] += t.task_time\n\n # Process done tasks\n else:\n output['done'].append(t)\n output['done_hours'] += t.task_time\n\n # Included in the loop to keep the ordering\n output['all'].append(t)\n\n output['all_hours'] = output['open_hours'] + output['done_hours']\n\n return output", "def addTimeWashed(df): \n # Calculate time washed of food (start of no food)\n time_washed = pd.DataFrame(df.groupby(['date_yyyymmdd'])['wormsorter_start_time'].min())\n time_washed = time_washed.reset_index(drop=False)\n time_washed.columns = ['date_yyyymmdd','time_washed']\n \n df = pd.merge(left=df, right=time_washed, on='date_yyyymmdd')\n \n return df", "def _get_open_projects_info():\n projects = Project.objects.filter(project_open=True).order_by(\"created_at\")\n projects_sum_hours = []\n for project in projects:\n time_entries_pro_project = TimeEntry.objects.filter(project=project)\n used_hours = _sum_hours(time_entries_pro_project)\n hours_percent = _calculate_hours_percent(used_hours, project.stimated_hours)\n projects_sum_hours.append(\n {\n \"hours_percent_number\": hours_percent,\n \"hours_percent\": f\"{hours_percent}%\",\n \"worked_hours\": used_hours,\n \"project\": project,\n }\n )\n return projects_sum_hours", "def merge_arrival_and_completion_time(tests_dataframe):\r\n arrival_time_df = tests_dataframe[['time_test_arrives_lab', 'server_size']]\r\n completion_time_df = tests_dataframe[['completion_time', 'server_size']]\r\n arrival_time_df['add'] = 1\r\n completion_time_df['add'] = -1\r\n arrival_time_df = arrival_time_df.rename(columns={\"time_test_arrives_lab\":\"time\"})\r\n completion_time_df = completion_time_df.rename(columns={\"completion_time\":\"time\"})\r\n union = pd.concat([arrival_time_df, completion_time_df])\r\n union = union.sort_values(by=\"time\")\r\n prev_server_size = 0\r\n for index, row in union.iterrows():\r\n if index == 0:\r\n current_server_size= row['server_size'] + row['add']\r\n prev_server_size = current_server_size\r\n #union['server_size'] = union['server_size'] + union['add']\r\n else:\r\n current_server_size = prev_server_size + row['add'] \r\n prev_server_size = current_server_size\r\n union.at[index,'server_size'] = current_server_size\r\n #union.to_csv('union.csv')\r\n return union", "def _02_merge_plant_project_outputs(records, **params):\n output_records = OrderedDict()\n for record in records:\n project_key = (record[\"Power Plant Name\"], record[\"Project Name\"])\n if project_key not in output_records:\n output_records[project_key] = record\n\n base_field = f\"{record['Type']} Output\"\n if record.get(f\"{base_field} Year\") in [None, \"NA\"]:\n continue\n elif (\n output_records[project_key][f\"{base_field} Year\"] in [None, \"NA\"]\n or record[f\"{base_field} Year\"] > output_records[project_key][f\"{base_field} Year\"]\n ):\n for key in [f\"{base_field}\", f\"{base_field} Unit\", f\"{base_field} Year\"]:\n output_records[project_key][key], record[key] = record[key], None\n else:\n for key in [f\"{base_field}\", f\"{base_field} Unit\", f\"{base_field} Year\"]:\n record[key] = None # reduce\n\n return records", "def cal_group_actions(df,option):\r\n\r\n if option == 'precovid':\r\n print('This is the pre-pandemic period:')\r\n elif option == 'postcovid':\r\n print('This is the post-pandemic period:')\r\n\r\n A = df[df['mod_numEdits'] == 1]\r\n B = df[(df['mod_numEdits'] > 1) & (df['mod_numEdits'] <= 10)]\r\n C = df[(df['mod_numEdits'] > 10) & (df['mod_numEdits'] <= 100)]\r\n D = df[(df['mod_numEdits'] >= 100)]\r\n \r\n A.insert(11,'group','A')\r\n B.insert(11,'group','B')\r\n C.insert(11,'group','C')\r\n D.insert(11,'group','D')\r\n\r\n li_add_A = []\r\n li_upd_A = []\r\n li_rem_A = []\r\n\r\n li_add_B = []\r\n li_upd_B = []\r\n li_rem_B = []\r\n\r\n li_add_C = []\r\n li_upd_C = []\r\n li_rem_C = []\r\n\r\n li_add_D = []\r\n li_upd_D = []\r\n li_rem_D = []\r\n\r\n for userid in set(A.userId):\r\n \r\n li_add_A.append(len(A[(A['action'] == 'add') & (A['userId'] == userid)]))\r\n li_upd_A.append(len(A[(A['action'] == 'update') & (A['userId'] == userid)]))\r\n li_rem_A.append(len(A[(A['action'] == 'remove') & (A['userId'] == userid)]))\r\n \r\n for userid in set(B.userId):\r\n \r\n li_add_B.append(len(B[(B['action'] == 'add') & (B['userId'] == userid)]))\r\n li_upd_B.append(len(B[(B['action'] == 'update') & (B['userId'] == userid)]))\r\n li_rem_B.append(len(B[(B['action'] == 'remove') & (B['userId'] == userid)]))\r\n \r\n for userid in set(C.userId):\r\n \r\n li_add_C.append(len(C[(C['action'] == 'add') & (C['userId'] == userid)]))\r\n li_upd_C.append(len(C[(C['action'] == 'update') & (C['userId'] == userid)]))\r\n li_rem_C.append(len(C[(C['action'] == 'remove') & (C['userId'] == userid)]))\r\n\r\n for userid in set(D.userId):\r\n \r\n li_add_D.append(len(D[(D['action'] == 'add') & (D['userId'] == userid)]))\r\n li_upd_D.append(len(D[(D['action'] == 'update') & (D['userId'] == userid)]))\r\n li_rem_D.append(len(D[(D['action'] == 'remove') & (D['userId'] == userid)]))\r\n \r\n li_add = [li_add_A, li_add_B, li_add_C, li_add_D]\r\n li_upd = [li_upd_A, li_upd_B, li_upd_C, li_upd_D]\r\n li_rem = [li_rem_A, li_rem_B, li_rem_C, li_rem_D]\r\n\r\n print(f'the mean of li_add_A is:{round(np.mean(li_add_A, dtype=np.float64),2)}')\r\n print(f'the mean of li_add_B is:{round(np.mean(li_add_B, dtype=np.float64),2)}')\r\n print(f'the mean of li_add_C is:{round(np.mean(li_add_C, dtype=np.float64),2)}')\r\n print(f'the mean of li_add_D is:{round(np.mean(li_add_D, dtype=np.float64),2)}')\r\n\r\n print(f'the mean of li_upd_A is:{round(np.mean(li_upd_A, dtype=np.float64),2)}')\r\n print(f'the mean of li_upd_B is:{round(np.mean(li_upd_B, dtype=np.float64),2)}')\r\n print(f'the mean of li_upd_C is:{round(np.mean(li_upd_C, dtype=np.float64),2)}')\r\n print(f'the mean of li_upd_D is:{round(np.mean(li_upd_D, dtype=np.float64),2)}')\r\n\r\n print(f'the mean of li_rem_A is:{round(np.mean(li_rem_A, dtype=np.float64),2)}')\r\n print(f'the mean of li_rem_B is:{round(np.mean(li_rem_B, dtype=np.float64),2)}')\r\n print(f'the mean of li_rem_C is:{round(np.mean(li_rem_C, dtype=np.float64),2)}')\r\n print(f'the mean of li_rem_D is:{round(np.mean(li_rem_D, dtype=np.float64),2)}')\r\n\r\n return li_add, li_upd, li_rem", "def collapse_using_timeStr(self):\n if self.modified == True:\n raise Exception('Probabilities already modified.\\nCollapsing after modification will lead to incorrect results.')\n timeUnits = np.array(process_time_string(self.timeStr))\n if len(self.timeslices) + 1 == np.sum(timeUnits):\n if timeUnits[-1] == 1:\n timeUnits = timeUnits[:-1]\n else:\n timeUnits[-1] -= 1\n if len(self.timeslices) != np.sum(timeUnits):\n raise Exception('Total number of timeslices is different.')\n ind = 0\n cnt = 0\n curr_rates = np.matrix(np.zeros((np.shape(self.obsRates)[0], len(timeUnits))))\n curr_times = []\n for i in timeUnits:\n curr_rates[:, cnt] = np.sum(self.obsRates[:, ind:ind + i], axis=1)\n curr_times.append(np.sum(self.timeslices[ind:ind + i]))\n ind += i\n cnt += 1\n\n self.obsRates = curr_rates\n self.timeslices = curr_times", "def switch_project(project):\n # Get the data\n project = project.lower()\n lines, finished, last_project = parse_file(project=None)\n line1, i1, last1, _, times1 = parse_line(lines, last_project, finished)\n line2, i2, _, new2, times2 = parse_line(lines, project, True)\n now = datetime.now()\n\n # Format the data\n if not finished:\n punch1 = now - last1\n times1.append(punch1)\n punch1 = punch1.total_seconds()\n total1 = sum(t.total_seconds() for t in times1)\n total2 = sum(t.total_seconds() for t in times2)\n now = now.strftime(TIMEF)\n\n # Modifying the lines for the file\n lines[1] = HEADER1 + project\n if not finished:\n\n # Clock-Out\n line1[-1] += IN_OUT_SEP + now\n line1[1] = fnum(total1)\n line1 = PUNCH_SEP.join(line1)\n lines[i1] = line1\n\n # Clock-In\n line2.append(now)\n line2 = PUNCH_SEP.join(line2)\n if new2:\n lines.append(line2)\n else:\n lines[i2] = line2\n\n # Write to file\n with open(PUNCHES_PATH, 'w+') as f:\n f.write('\\n'.join(lines))\n\n # Report\n if new2:\n print(f\"Created Project: '{project}'\")\n if finished:\n print(f\"CURRENTLY CLOCKED OUT, Project Switched From: '{last_project}', To: '{project}'\")\n print(f\"NOW: {now}\")\n print(f\"'{last_project}' Total Hrs: {fnum(total1)}\")\n print(f\"'{project}' Total Hrs: {fnum(total2)}\")\n else:\n print(f\"CLOCK OUT, Project: '{last_project}'\")\n print(f\"CLOCK IN, Project: '{project}'\")\n print(f\"'{last_project}' IN: {last1.strftime(TIMEF)}, NOW: {now}\")\n print(f\"'{last_project}' Total Hrs: {fnum(total1)}, Current Punch: {fnum(punch1)}\")\n print(f\"'{project}' Total Hrs: {fnum(total2)}\")", "def get_weekly_project_durations(self, week=0):\n\n # get the start and end of the desired week\n now = dt.datetime.now()\n monday = now.date() - dt.timedelta(days=now.weekday() + 7*week)\n nextmonday = monday + dt.timedelta(days=7)\n\n # get all jobs and associated projects for the selected week\n # there will be one row per job and associated project such that a job\n # which is assigned to two projects will also have two rows\n self.alog.dbcur.execute(\n 'WITH ja (id, start, dur, act) AS ('\n ' SELECT jobs.id, jobs.start, jobs.duration, activities.label '\n ' FROM jobs JOIN activities ON jobs.activity = activities.id '\n ' WHERE jobs.start >= ? AND jobs.start < ?) '\n 'SELECT ja.id, ja.start, ja.dur, ja.act, projects.label '\n 'FROM ja LEFT OUTER JOIN job_pj ON ja.id = job_pj.job '\n ' LEFT OUTER JOIN projects ON job_pj.project = projects.id',\n (monday, nextmonday))\n\n jobs = pd.DataFrame(self.alog.dbcur.fetchall(),\n columns=('id', 'start', 'duration', 'act',\n 'project'))\n\n # do the same thing for people, but do not select jobs here that have a\n # project associated with them\n # note that it's not necessary to outer join here, because I have already\n # got all the necessary information about jobs above\n self.alog.dbcur.execute(\n 'SELECT jobs.id, people.label '\n 'FROM jobs JOIN job_p, people '\n ' ON jobs.id = job_p.job AND job_p.person = people.id '\n 'WHERE jobs.start >= ? '\n ' AND jobs.start < ?'\n ' AND jobs.id NOT IN (SELECT job FROM job_pj)',\n (monday, nextmonday))\n\n j_p = pd.DataFrame(self.alog.dbcur.fetchall(),\n columns=('id', 'person'))\n\n # sort the people as projects into the job list\n ids = j_p.id.unique()\n for jid in ids:\n people = j_p[j_p.id == jid].person\n\n row = jobs[jobs.id == jid].copy()\n row.project = people.iloc[0]\n\n # add first person to the corresponding job\n jobs[jobs.id == jid] = row\n\n # if several people are associated with the job, add more rows to the\n # job list\n for person in people.values[1:]:\n row.project = person\n jobs = jobs.append(row, ignore_index=True)\n\n projects = pd.DataFrame(jobs.groupby('project').duration.sum(\n ).sort_values(ascending=False))\n acts = jobs.act.unique()\n\n for act in acts:\n projects[act] = 0\n\n for pj in projects.index:\n actdurs = jobs[jobs.project == pj].groupby('act').duration.sum()\n\n projects.loc[pj, actdurs.index] = actdurs\n\n # remove activities which did not occur in any of the projects\n # (these are project-independent activities)\n projects = projects.T[projects.sum() > 0].T\n\n return projects", "def projectDuration(listActivities):\n lastAct = max(listActivities, key=lambda activity: activity.startTime)\n return lastAct.startTime + lastAct.duration", "def merge_all_data(self):\n\n logging.info('***** Starting the merging process merge_all_data')\n\n \"\"\" All possible unique_dates to loop on \"\"\"\n date_times = self.merged_unique_dates\n date_times.sort()\n date_times = np.array(date_times) \n\n \"\"\" List storing the indices of the date_index of the merged dataset \"\"\"\n all_combined_obs , all_combined_head, all_combined_era5fb , combined_indices , combined_date_time, = [] , [] , [] , [] , []\n best_ds_list = [] \n source_files = []\n station_configurations = []\n\n \"\"\" The items contained in the lists in the list below can be removed from the list when the record that was previously stored is removed. \"\"\"\n all_list = [all_combined_obs , all_combined_head, all_combined_era5fb , combined_indices , combined_date_time, best_ds_list, source_files , station_configurations ] # holder of all the above lists\n all_list_name = ['all_combined_obs' , 'all_combined_head', 'all_combined_era5fb' , 'combined_indices' , 'combined_date_time' , 'best_ds_list', 'source_files' ] \n \n removed_record, kept_record = [], []\n \n \"\"\" Dictionary that will contain the merged file. \"\"\" \n # rand = datetime.strptime('1981-01-03 12:00:00', '%Y-%m-%d %H:%M:%S') \n #dt_bestds_dic = {} # store the selected best dataset for each dt \n #date_times=date_times[0:30000]\n tot = len(date_times)\n tt=time.time()\n print('*** Merging ' , tot, ' records ***')\n \n early_datasets = True\n \n self.processed_dt = [] \n \n for dt, c in zip(date_times, range(tot) ): # loop over all the possible date_times \n\n if (c+1)%1000==0:\n print('Analize : ', str(c+1) , '/', str(tot) , ' ', dt , ' ',\n now(time.time()),'{:5.3f}'.format(time.time()-tt ))\n\n delete = self.delete_ds(dt) # check if there is a dataset to delete \n \n \"\"\" Finding if this record is the same as the previous one analyzed, according to the given time_shift \"\"\"\n if c == 0:\n is_same_record = False\n else:\n is_same_record = self.is_same_record( time_shift = self.hour_time_delta , dt = dt)\n \n \"\"\" Updating list of processed datetimes \"\"\"\n self.processed_dt.append(dt) # cannot put it before the check_timeshift or it will check itself \n\n \n cleaned_df_container = {} \n all_len = [] # will hold the length of all the obs_tabs \n \n for k in self.dataset_per_dt[dt].keys() : # checking the list of available datasets \n ''' {'era5_2': ['example_stations/0-20000-0-82930_era5_2_harvested_era5.conv._1:82930.gz.nc', \n 'example_stations/0-20000-0-82930_era5_2_harvested_era5.conv._82930.gz.nc']}\n ''' \n for F in self.dataset_per_dt[dt][k]: # checking the list of available files for the dataset\n \n if data[k][F][\"counter\"] %self.slice_size==0 or data[k][F][\"counter\"] == 0: # loading the data only at specific slices \n load = self.load_obstab_feedback_sliced(datetime=dt, dataset=k, file = F)\n \n data[k][F][\"counter\"] = data[k][F][\"counter\"] + 1 \n \n obs_tab, era5fb_tab = self.make_obstab_era5fb_dic(dataset = k , date_time = dt, File = F )\n\n if len(obs_tab['date_time'][:])==0: # go to next file if obs_tab is empty \n #print('ZERO length')\n continue \n\n all_len.append( len(obs_tab['date_time'][:] ) )\n \n if k not in cleaned_df_container.keys():\n cleaned_df_container[k] = {}\n\n cleaned_df_container[k][F] = {}\n cleaned_df_container[k][F]['obs_tab'] = obs_tab # cleaned dataframe \n cleaned_df_container[k][F]['era5fb_tab'] = era5fb_tab # cleaned dataframe \n \n \"\"\" Merging the different records found in the sifferent sources \"\"\"\n if bool(all_len): # skipping empty container dictionary. At this point I certainyl have one valid record \n best_ds, combined_obs_tab, combined_era5fb_tab, combined_head_tab, selected_file, best_file = self.combine_record(dt, container = cleaned_df_container)\n \n if is_same_record: # decide what to keep in case of same record\n temporary_previous = all_combined_obs[-1] # keep the temporary previous record \n\n if best_ds in ['era5_1','era5_2']: # best_ds from era5\n if best_ds_list[-1] not in ['era5_1','era5_2']: # remove previous non era5_1 or era5_2 record \n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n\n elif best_ds_list[-1] in ['era5_1','era5_2']:\n if len(combined_obs_tab) <= len(all_combined_obs[-1] ):\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab)\n continue # nothing to do, will keep the previous records -> go to next dt \n \n else: # case where both the current and previous are from era5_1 and era5_2, but the previous has smaller number of data \n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n \n else: # best_ds not from era5\n if best_ds_list[-1] in ['era5_1','era5_2']:\n #print('This best ds is ' , best_ds , ' but I will keep ' , best_ds_list[-1] )\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue \n \n else:\n if len(combined_obs_tab) < len(all_combined_obs[-1] ):\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue # nothing to do, will keep the previous records -> go to next dt \n \n elif len(combined_obs_tab) > len(all_combined_obs[-1] ): # remove previous, keep current \n for lista in all_list:\n lista.pop() \n #kept_record.append(combined_obs_tab) \n #removed_record.append(temporary_previous)\n \n elif len(combined_obs_tab) == len(all_combined_obs[-1] ): # prefer igra2, otherwise\n if best_ds == 'igra2':\n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n \n else: # case where data source is not important, I keep the previous and do nothing \n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue \n \n else: # not the same record, nothing special to do, keep both previous and current \n pass \n else:\n print(' Found an empty record / time shifted record ')\n continue\n \n\n \"\"\" Fill the best_ds list \"\"\"\n best_ds_list.append(best_ds)\n\n \"\"\" Storing the selected file for the source_configuration \"\"\"\n source_files.append(selected_file)\n \"\"\" Selecting the station_configuration \"\"\"\n station_configurations.append(self.data[best_ds][best_file]['station_configuration'] )\n \n \"\"\" Storing the combined era5fb, header and observations tables\"\"\"\n all_combined_era5fb.append(combined_era5fb_tab)\n all_combined_obs .append(combined_obs_tab)\n \n primary, name = self.data[best_ds][best_file]['station_configuration']['primary_id'][0] , self.data[best_ds][best_file]['station_configuration']['station_name'][0] \n #combined_head_tab['primary_station_id'] = [ primary ] * len( combined_head_tab ) \n #combined_head_tab['station_name'] = [ name ] * len( combined_head_tab ) \n \n combined_head_tab['primary_station_id'] = np.array( [primary] )\n combined_head_tab['station_name'] = np.array( [name] )\n \n all_combined_head .append(combined_head_tab)\n\n \"\"\" Dictionary to fill the best_ds for duplicates \"\"\"\n #dt_bestds_dic[dt] = {}\n #dt_bestds_dic[dt]['best_ds'] = best_ds\n #dt_bestds_dic[dt]['len'] = len(combined_obs_tab['date_time'])\n\n \"\"\" New merged recordindex and recordtimestamps indices \"\"\"\n combined_indices.append(len(combined_obs_tab['date_time'])) \n combined_date_time.append(dt)\n\n del cleaned_df_container \n \n \n \n #print(blue + 'Memory used after deleting the cleaned_df_container: ', process.memory_info().rss/1000000000 , cend)\n\n \"\"\" Removing remaining loaded df \"\"\"\n for k in self.datasets_keys:\n for F in self.datasets[k]:\n try:\n del data[k][F]['era5fb_tab']\n print('=== removed era5fb ' , k , F )\n except:\n pass\n try:\n del data[k][F]['observations_table']\n print('=== removed obstab ' , k , F ) \n except:\n pass\n \n \n \"\"\" Saving a numpy dictionary \"\"\"\n print(\" === Saving the numpy dictionary of removed and kept records +++ \")\n #dic_records = { 'kept' : kept_record , 'removed': removed_record }\n #np.save(self.station + '_time_shift_removed_kept.npy',dic_records )\n \n \n \"\"\" Storing the merged date_time values and indices \"\"\"\n di=xr.Dataset()\n combined_date_time = np.array(combined_date_time)\n di['recordtimestamp'] = ( {'recordtimestamp' : combined_date_time.shape } , combined_date_time )\n di['recordtimestamp'].attrs['units']='seconds since 1900-01-01 00:00:00'\n\n \"\"\" Creating the merged indices mi \"\"\"\n mi = [] \n mi.append(0)\n for i in range(len(combined_indices)):\n mi.append( combined_indices[i] + mi[-1] )\n mi.pop()\n pop = np.array(mi) # removing last unecessary index \n di['recordindex'] = ( {'recordindex' : pop.shape } , pop )\n\n\n \"\"\" Creating the combined data \"\"\"\n logging.debug('*** Concatenating the observations_table ' ) \n combined_obs = {}\n #### Writing combined observations_table dic\n logging.info(' ***** Writing the observations_table to the netCDF output ***** ' ) \n for k in all_combined_obs[0].keys(): \n a = np.concatenate([all_combined_obs[i][k][:] for i in range(len(all_combined_obs))])\n if k == 'date_time':\n combined_obs[k]= a \n self.tot_records = len(combined_obs[k])\n self.write_merged(content = 'observations_table', table= {k:a})\n #logging.info('*** Written observations table %s: ', k)\n\n\n #self.tot_records = len(combined_obs['date_time'])\n del all_combined_obs\n print(blue + 'Memory used after deleting all_combined_obs dic: ', process.memory_info().rss/1000000000 , cend )\n \n dateindex = combined_obs['date_time']//86400 \n date_times, indices, counts = np.unique(dateindex, return_counts = True, return_index= True) \n di['dateindex'] = ( {'dateindex' : indices.shape } , indices ) # considers the day only \n del combined_obs\n \n combined_era5fb = {}\n #### Writing combined era5fb_table dic \n for k in all_combined_era5fb[0].keys():\n try:\n #combined_era5fb[k]=np.concatenate([all_combined_era5fb[i][k][:] for i in range(len(all_combined_era5fb))])\n #self.write_merged(content = 'era5fb', table= {k:combined_era5fb[k]})\n \"\"\" try replacing , remove combined_era5fb = {} \"\"\"\n a = np.concatenate([all_combined_era5fb[i][k][:] for i in range(len(all_combined_era5fb))])\n self.write_merged(content = 'era5fb', table= {k:a})\n logging.debug('*** Written era5fb %s: ', k)\n except:\n print(\"FAILED feedback variable \" , k)\n\n del all_combined_era5fb\n print(blue + 'Memory used after deleting era5fb_tab dic: ', process.memory_info().rss/1000000000 , cend)\n\n\n #### Writing combined header_table dic \n for k in all_combined_head[0].keys():\n print('head variable is', k )\n if ( k == 'comments' or k == 'history'):\n continue\n try:\n tab=np.concatenate([all_combined_head[i][k][:] for i in range(len(all_combined_head))])\n self.write_merged(content = 'header_table', table= {k: tab}) # { key: np.array([])}\n logging.info('*** Written header table %s: ', k)\n except:\n print('FFF FAILED variable in header table', k )\n\n del all_combined_head\n print(blue + 'Memory used after deleting all_merged head_tab dic: ', process.memory_info().rss/1000000000 , cend)\n \n self.write_merged(content = 'recordindex', table = di) \n self.write_merged(content = 'cdm_tables', table= '')\n\n\n source_conf=xr.Dataset()\n source_files = np.array(source_files).astype(dtype='|S70')\n source_conf['source_file'] = ( {'source_file' : source_files.shape } , source_files )\n self.write_merged(content = 'source_configuration', table= source_conf )\n\n print(0)\n\n\n \"\"\" Concatenation of station_configurations \"\"\"\n station_conf = pd.concat( station_configurations ) \n for k in station_conf.columns:\n try:\n a =np.array( station_conf[k])\n self.write_merged(content = 'station_configuration', table= {k:a})\n logging.debug('*** Written station_configuration %s: ', k)\n except:\n print(\" Failed station_configuration \" , k )\n \n return 0", "def merge(self, otr):\n self._duration = otr.get_start() - self.get_start()\n self._duration += otr.get_duration()\n self._line[3] = self._duration", "def merge(self, projects):\n benchmarks = set()\n for project in projects:\n for runspec in project:\n for classresult in runspec:\n for instresult in classresult.instresults:\n instresult.instance.maxRuns = max(instresult.instance.maxRuns, len(instresult.runs))\n benchmarks.add(runspec.benchmark)\n return BenchmarkMerge(benchmarks)", "def make_entries(self, user=None, projects=None, dates=None,\n hours=1, minutes=0):\n if not user:\n user = self.user\n if not projects:\n projects = self.default_projects\n if not dates:\n dates = self.default_dates\n for project in projects:\n for day in dates:\n self.log_time(project=project, start=day,\n delta=(hours, minutes), user=user)", "def time_tracking(self):\n fb = FreshBooks()\n tg = Toggl()\n self.print_splash()\n self.print(\"Tip: You can always enter 'skip' when you want to skip a time entry.\", format='warn')\n days = self.get_interactive_days() # number of days to go back\n self.print(\"OK, I'll run you through the Toggl time entries of the past %i day(s).\" % (days))\n timestamp = self.get_timestamp(days) # unix timestamp including tz\n time_entries = tg.get_time_entries(timestamp)\n if len(time_entries) == 0:\n self.print(\"No Toggl entries in this time span!\", 'warn')\n return False\n time_entries = self.merge_toggl_time_entries(time_entries) # merge Toggl entries\n fb_projects = fb.get_projects()\n # Loop through merged Toggl time entries:\n for entry in time_entries:\n # Get and convert all necessary info:\n client_id = tg.get_client_id(project_id=entry.get('pid'))\n client_name = tg.get_client_name(client_id)\n project = tg.get_project(entry.get('pid'))\n duration = int(entry['duration']) / 60 / 60 # convert duration to hours\n duration = round(duration * 4 ) / 4 # round hours to nearest .25\n description = self.format_description(project['name'], entry['description'])\n date = str(parser.parse(entry['start']).date())\n # Print info in a nice way:\n self.print_divider(30)\n self.print(\"Description: \" + description)\n self.print(\"Date: \" + date)\n self.print(\"Hours spent: \" + str(duration))\n # Skip if Toggl entry is already booked:\n if entry.get('tags') and tg.BOOKED_TAG in entry['tags']:\n self.print(\"Skipping this entry because it is already in Freshbooks.\", 'cross')\n # Skip if duration is below 0.25:\n elif duration < 0.25:\n self.print(\"Skipping this entry because there are less than 0.25 hours spent.\", 'cross')\n # If billable, add to Freshbooks:\n elif entry['billable']:\n # Get FreshBooks project name through interactive search:\n try:\n self.print(\"Project: \\U0001F50D \")\n fb_project_name = self.interactive_search(fb_projects.keys(), client_name)\n # Handle KeyboardInterrupt\n except KeyboardInterrupt:\n answer = input(\"\\nKeyboardInterrupt! Skip current entry or quit time tracking? (S/q) \")\n if answer.lower() == 's' or answer == '':\n self.clear_lines(1)\n self.print(\"Skipping this entry.\", 'cross')\n continue\n else:\n self.clear_lines(1)\n self.print(\"Ok, stopping time tracking.\", 'cross')\n sys.exit()\n # If user requests so, skip this entry:\n self.clear_lines(1)\n if not fb_project_name:\n self.print(\"Skipping this entry.\", 'cross')\n continue\n # Otherwise, add entry to FreshBooks and tag Toggl entry/entries:\n self.print(\"Project: \" + fb_project_name)\n project_id = fb.get_project_id(fb_project_name)\n fb.add_entry(project_id, duration, description, date)\n tg.tag_projects(entry['merged_ids'], tg.BOOKED_TAG)\n # If not billable, skip entry:\n else:\n self.print(\"Skipping this entry because it is not billable.\", 'cross')\n self.print_divider(30)\n answer = input(\"All done! Open FreshBooks in browser to verify? (Y/n) \")\n if answer.lower() == 'y' or answer == '':\n webbrowser.open('https://%s.freshbooks.com/timesheet' % fb.fb_creds['subdomain'])", "def time_budget(self, mode):\n\n def time_budget_analysis(cursor, plot_parameters, by_category=False):\n \"\"\"\n extract number of occurrences, total duration, mean ...\n if start_time = 0 and end_time = 0 all events are extracted\n \"\"\"\n\n categories, out = {}, []\n for subject in plot_parameters[\"selected subjects\"]:\n out_cat, categories[subject] = [], {}\n\n for behavior in plot_parameters[\"selected behaviors\"]:\n\n if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\"SELECT distinct modifiers FROM events WHERE subject = ? AND code = ?\",\n (subject, behavior))\n distinct_modifiers = list(cursor.fetchall())\n\n if not distinct_modifiers:\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n if POINT in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = cursor.fetchall()\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = list(cursor.fetchall())\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": modifier[0], \"duration\": UNPAIRED,\n \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\n \"end time\"] and plot_parameters[\"start time\"] <= rows[idx + 1][0] <= \\\n plot_parameters[\"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n # all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations),\n 3) if len(all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(\n all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(\n statistics.mean(all_event_interdurations), 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n else: # no modifiers\n\n if POINT in self.eventType(behavior).upper():\n\n # if len(selectedObservations) > 1:\n cursor.execute(\n \"SELECT occurence,observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n\n if len(selectedObservations) == 1:\n new_rows = []\n for occurence, observation in rows:\n new_occurence = max(float(plot_parameters[\"start time\"]), occurence)\n new_occurence = min(new_occurence, float(plot_parameters[\"end time\"]))\n new_rows.append([new_occurence, observation])\n rows = list(new_rows)\n\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(statistics.stdev(all_event_interdurations),\n 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n\n cursor.execute(\n \"SELECT occurence, observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]: # include behaviors without events\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": \"-\", \"duration\": 0, \"duration_mean\": 0,\n \"duration_stdev\": \"NA\", \"number\": 0, \"inter_duration_mean\": \"-\",\n \"inter_duration_stdev\": \"-\"})\n continue\n\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior, \"modifiers\": \"NA\",\n \"duration\": UNPAIRED, \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\"end time\"] and \\\n plot_parameters[\"start time\"] <= rows[idx + 1][0] <= plot_parameters[\n \"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations), 3) if len(\n all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n out += out_cat\n\n if by_category: # and flagCategories:\n\n for behav in out_cat:\n\n try:\n category = [self.pj[ETHOGRAM][x][\"category\"] for x in self.pj[ETHOGRAM] if\n \"category\" in self.pj[ETHOGRAM][x] and self.pj[ETHOGRAM][x][\"code\"] == behav[\n 'behavior']][0]\n except:\n category = \"\"\n\n if category in categories[subject]:\n if behav[\"duration\"] not in [\"-\", \"NA\"] and categories[subject][category][\n \"duration\"] != \"-\":\n categories[subject][category][\"duration\"] += behav[\"duration\"]\n else:\n categories[subject][category][\"duration\"] = \"-\"\n categories[subject][category][\"number\"] += behav[\"number\"]\n else:\n categories[subject][category] = {\"duration\": behav[\"duration\"], \"number\": behav[\"number\"]}\n\n out_sorted = []\n for subject in plot_parameters[\"selected subjects\"]:\n for behavior in plot_parameters[\"selected behaviors\"]:\n for row in out:\n if row[\"subject\"] == subject and row[\"behavior\"] == behavior:\n out_sorted.append(row)\n\n ### http://stackoverflow.com/questions/673867/python-arbitrary-order-by\n return out_sorted, categories\n\n def default_value(behav, param):\n \"\"\"\n return value for duration in case of point event\n \"\"\"\n default_value_ = 0\n if ({self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behav} == {\"Point event\"}\n and param in [\"duration\"]):\n default_value_ = \"-\"\n return default_value_\n\n def init_behav_modif():\n \"\"\"\n initialize dictionary with subject, behaviors and modifiers\n \"\"\"\n behaviors = {}\n for subj in plot_parameters[\"selected subjects\"]:\n behaviors[subj] = {}\n for behav_modif in distinct_behav_modif:\n behav, modif = behav_modif\n if behav not in behaviors[subj]:\n behaviors[subj][behav] = {}\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n behaviors[subj][behav][param[0]] = default_value(behav, param[0])\n\n if plot_parameters[\"include modifiers\"]:\n behaviors[subj][behav][modif] = {}\n for param in parameters:\n behaviors[subj][behav][modif][param[0]] = default_value(behav, param[0])\n\n return behaviors\n\n result, selectedObservations = self.selectObservations(MULTIPLE)\n if not selectedObservations:\n return\n\n # check if state events are paired\n out = \"\"\n not_paired_obs_list = []\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId], self.timeFormat)\n\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n not_paired_obs_list.append(obsId)\n\n if out:\n out = \"Some observations have UNPAIRED state events<br><br>\" + out\n self.results = dialog.Results_dialog()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.pbSave.setVisible(False)\n self.results.pbCancel.setVisible(True)\n\n if not self.results.exec_():\n return\n\n flagGroup = False\n if len(selectedObservations) > 1 and mode != \"synthetic\":\n flagGroup = dialog.MessageDialog(programName, \"Group observations in one time budget analysis?\",\n [YES, NO]) == YES\n\n '''\n # check if state events are paired\n out = \"\"\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId],\n self.timeFormat)\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n if out:\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.show()\n '''\n\n selectedObsTotalMediaLength = Decimal(\"0.0\")\n max_obs_length = 0\n for obsId in selectedObservations:\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n logging.debug(\"media length for {0}: {1}\".format(obsId, obs_length))\n\n if obs_length in [0, -1]:\n selectedObsTotalMediaLength = -1\n break\n max_obs_length = max(max_obs_length, obs_length)\n\n selectedObsTotalMediaLength += obs_length\n\n # an observation media length is not available\n if selectedObsTotalMediaLength == -1:\n # propose to user to use max event time\n if dialog.MessageDialog(programName,\n \"A media length is not available.<br>Use last event time as media length?\",\n [YES, NO]) == YES:\n maxTime = 0 # max length for all events all subjects\n for obsId in selectedObservations:\n if self.pj[OBSERVATIONS][obsId][EVENTS]:\n maxTime += max(self.pj[OBSERVATIONS][obsId][EVENTS])[0]\n logging.debug(\"max time all events all subjects: {}\".format(maxTime))\n selectedObsTotalMediaLength = maxTime\n else:\n selectedObsTotalMediaLength = 0\n\n logging.debug(\"selectedObsTotalMediaLength: {}\".format(selectedObsTotalMediaLength))\n\n if mode in [\"by_behavior\", \"by_category\"]:\n if len(selectedObservations) > 1:\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=max_obs_length,\n by_category=(mode == \"by_category\"))\n else:\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=selectedObsTotalMediaLength,\n by_category=(mode == \"by_category\"))\n\n if mode == \"synthetic\":\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=max_obs_length,\n flagShowExcludeBehaviorsWoEvents=False,\n by_category=False)\n\n if not plot_parameters[\"selected subjects\"] or not plot_parameters[\"selected behaviors\"]:\n return\n\n # check if time_budget window must be used\n if mode in [\"by_behavior\", \"by_category\"] and (flagGroup or len(selectedObservations) == 1):\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], selectedObservations,\n plot_parameters[\"selected behaviors\"])\n\n total_observation_time = 0\n for obsId in selectedObservations:\n\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n if obs_length == -1:\n obs_length = 0\n\n if plot_parameters[\"time\"] == TIME_FULL_OBS:\n min_time = float(0)\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_EVENTS:\n try:\n min_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][0][0])\n except:\n min_time = float(0)\n try:\n max_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][-1][0])\n except:\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_ARBITRARY_INTERVAL:\n min_time = float(plot_parameters[\"start time\"])\n max_time = float(plot_parameters[\"end time\"])\n\n # check intervals\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if POINT in self.eventType(behav).upper():\n continue\n # extract modifiers\n\n cursor.execute(\n \"SELECT distinct modifiers FROM events WHERE observation = ? AND subject = ? AND code = ?\",\n (obsId, subj, behav))\n distinct_modifiers = list(cursor.fetchall())\n\n logging.debug(\"distinct_modifiers: {}\".format(distinct_modifiers))\n\n for modifier in distinct_modifiers:\n\n logging.debug(\"modifier #{}#\".format(modifier[0]))\n\n if len(cursor.execute(\n \"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence < ?\",\n (obsId, subj, behav, modifier[0], min_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], min_time))\n\n if len(cursor.execute(\n \"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence > ?\",\n (obsId, subj, behav, modifier[0], max_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], max_time))\n try:\n cursor.execute(\"COMMIT\")\n except:\n pass\n\n total_observation_time += (max_time - min_time)\n\n cursor.execute(\"DELETE FROM events WHERE observation = ? AND (occurence < ? OR occurence > ?)\",\n (obsId, min_time, max_time))\n\n out, categories = time_budget_analysis(cursor, plot_parameters, by_category=(mode == \"by_category\"))\n\n # widget for results visualization\n self.tb = timeBudgetResults(logging.getLogger().getEffectiveLevel(), self.pj)\n\n # observations list\n self.tb.label.setText(\"Selected observations\")\n for obs in selectedObservations:\n self.tb.lw.addItem(obs)\n\n # media length\n if len(selectedObservations) > 1:\n if total_observation_time:\n if self.timeFormat == HHMMSS:\n self.tb.lbTotalObservedTime.setText(\n \"Total observation length: {}\".format(seconds2time(total_observation_time)))\n if self.timeFormat == S:\n self.tb.lbTotalObservedTime.setText(\n \"Total observation length: {:0.3f}\".format(float(total_observation_time)))\n else:\n self.tb.lbTotalObservedTime.setText(\"Total observation length: not available\")\n else:\n if self.timeFormat == HHMMSS:\n self.tb.lbTotalObservedTime.setText(\n \"Analysis from {} to {}\".format(seconds2time(min_time), seconds2time(max_time)))\n if self.timeFormat == S:\n self.tb.lbTotalObservedTime.setText(\n \"Analysis from {:0.3f} to {:0.3f} s\".format(float(min_time), float(max_time)))\n\n if mode == \"by_behavior\":\n\n tb_fields = [\"Subject\", \"Behavior\", \"Modifiers\", \"Total number\", \"Total duration (s)\",\n \"Duration mean (s)\", \"Duration std dev\", \"inter-event intervals mean (s)\",\n \"inter-event intervals std dev\", \"% of total length\"]\n\n fields = [\"subject\", \"behavior\", \"modifiers\", \"number\", \"duration\", \"duration_mean\", \"duration_stdev\",\n \"inter_duration_mean\", \"inter_duration_stdev\"]\n self.tb.twTB.setColumnCount(len(tb_fields))\n self.tb.twTB.setHorizontalHeaderLabels(tb_fields)\n\n for row in out:\n self.tb.twTB.setRowCount(self.tb.twTB.rowCount() + 1)\n column = 0\n for field in fields:\n item = QTableWidgetItem(str(row[field]).replace(\" ()\", \"\"))\n # no modif allowed\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n column += 1\n\n # % of total time\n if row[\"duration\"] not in [\"NA\", \"-\", UNPAIRED, 0] and selectedObsTotalMediaLength:\n item = QTableWidgetItem(str(round(row[\"duration\"] / float(total_observation_time) * 100, 1)))\n else:\n item = QTableWidgetItem(\"NA\")\n\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n if mode == \"by_category\":\n tb_fields = [\"Subject\", \"Category\", \"Total number\", \"Total duration (s)\"]\n fields = [\"number\", \"duration\"]\n self.tb.twTB.setColumnCount(len(tb_fields))\n self.tb.twTB.setHorizontalHeaderLabels(tb_fields)\n\n for subject in categories:\n\n for category in categories[subject]:\n\n self.tb.twTB.setRowCount(self.tb.twTB.rowCount() + 1)\n\n column = 0\n item = QTableWidgetItem(subject)\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n column = 1\n if category == \"\":\n item = QTableWidgetItem(\"No category\")\n else:\n item = QTableWidgetItem(category)\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n for field in fields:\n column += 1\n item = QTableWidgetItem(str(categories[subject][category][field]))\n item.setFlags(Qt.ItemIsEnabled)\n item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n self.tb.twTB.resizeColumnsToContents()\n\n self.tb.show()\n\n if mode in [\"by_behavior\", \"by_category\"] and (\n not flagGroup and len(selectedObservations) > 1) or mode == \"synthetic\":\n\n if mode in [\"by_behavior\", \"by_category\"]:\n items = (\"Tab Separated Values (*.tsv)\",\n \"Comma separated values (*.csv)\",\n \"OpenDocument Spreadsheet (*.ods)\",\n \"OpenDocument Workbook (*.ods)\",\n \"Microsoft Excel Spreadsheet (*.xlsx)\",\n \"Microsoft Excel Workbook (*.xlsx)\",\n \"HTML (*.html)\",\n \"Legacy Microsoft Excel Spreadsheet (*.xls)\")\n\n formats = [\"tsv\", \"csv\", \"od spreadsheet\", \"od workbook\", \"xlsx spreadsheet\", \"xlsx workbook\", \"html\",\n \"xls legacy\"]\n\n item, ok = QInputDialog.getItem(self, \"Time budget analysis format\", \"Available formats\", items, 0,\n False)\n if not ok:\n return\n\n outputFormat = formats[items.index(item)]\n extension = re.sub(\".* \\(\\*\\.\", \"\", item)[:-1]\n\n flagWorkBook = False\n\n if mode in [\"by_behavior\", \"by_category\"] and \"workbook\" in outputFormat:\n workbook = tablib.Databook()\n flagWorkBook = True\n if \"xls\" in outputFormat:\n filters = \"Microsoft Excel Workbook *.xlsx (*.xlsx);;All files (*)\"\n if \"od\" in outputFormat:\n filters = \"Open Document Workbook *.ods (*.ods);;All files (*)\"\n\n if QT_VERSION_STR[0] == \"4\":\n WBfileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self, \"Save Time budget analysis\",\n \"\", filters)\n else:\n WBfileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Save Time budget analysis\", \"\",\n filters)\n if not WBfileName:\n return\n\n if mode in [\"by_behavior\", \"by_category\"] and \"workbook\" not in outputFormat: # not workbook\n exportDir = QFileDialog(self).getExistingDirectory(self,\n \"Choose a directory to save the time budget analysis\",\n os.path.expanduser(\"~\"),\n options=QFileDialog.ShowDirsOnly)\n if not exportDir:\n return\n\n if mode == \"synthetic\":\n\n formats_str = (\"Tab Separated Values *.txt, *.tsv (*.txt *.tsv);;\"\n \"Comma Separated Values *.txt *.csv (*.txt *.csv);;\"\n \"Open Document Spreadsheet *.ods (*.ods);;\"\n \"Microsoft Excel Spreadsheet *.xlsx (*.xlsx);;\"\n # \"Pandas dataframe (*.df);;\"\n \"Legacy Microsoft Excel Spreadsheet *.xls (*.xls);;\"\n \"HTML *.html (*.html);;\"\n \"All files (*)\")\n\n while True:\n if QT_VERSION_STR[0] == \"4\":\n fileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self, \"Save Time budget report\",\n \"\", formats_str)\n else:\n fileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Save Time budget report\", \"\",\n formats_str)\n\n if not fileName:\n return\n\n extension = \"\"\n availableFormats = (\n \"tsv\", \"csv\", \"ods\", \"xlsx)\", \"xls)\", \"html\") # ) is added to distinguish between xls and xlsx\n for fileExtension in availableFormats:\n if fileExtension in filter_:\n extension = fileExtension.replace(\")\", \"\")\n if not extension:\n QMessageBox.warning(self, programName, \"Choose a file format\",\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n else:\n break\n\n data_report = tablib.Dataset()\n data_report.title = \"Synthetic time budget\"\n\n parameters = [[\"duration\", \"Total duration\"], [\"number\", \"Number of occurrences\"]]\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"],\n selectedObservations, plot_parameters[\"selected behaviors\"])\n\n cursor.execute(\"SELECT distinct code, modifiers FROM events WHERE subject in ({})\".format(\n \",\".join(\"?\" * len(plot_parameters[\"selected subjects\"]))),\n (plot_parameters[\"selected subjects\"]))\n\n distinct_behav_modif = [[rows[\"code\"], rows[\"modifiers\"]] for rows in cursor.fetchall()]\n\n # add selected behaviors that are not observed\n for behav in plot_parameters[\"selected behaviors\"]:\n if [x for x in distinct_behav_modif if x[0] == behav] == []:\n distinct_behav_modif.append([behav, \"-\"])\n\n behaviors = init_behav_modif()\n\n subj_header, behav_header, modif_header, param_header = [\"\", \"\"], [\"\", \"\"], [\"\", \"\"], [\"\",\n \"Total length (s)\"]\n # subj_header, behav_header, modif_header, param_header = [\"\"], [\"\"], [\"\"], [\"\"]\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n subj_header.append(subj)\n behav_header.append(behav)\n param_header.append(param[1])\n\n if plot_parameters[\"include modifiers\"]:\n for modif in sorted(list(behaviors[subj][behav].keys())):\n for param in parameters:\n subj_header.append(subj)\n behav_header.append(behav)\n modif_header.append(modif)\n param_header.append(param[1])\n\n data_report.append(subj_header)\n data_report.append(behav_header)\n if plot_parameters[\"include modifiers\"]:\n data_report.append(modif_header)\n data_report.append(param_header)\n\n if mode == \"by_behavior\":\n fields = [\"subject\", \"behavior\", \"modifiers\", \"number\",\n \"duration\", \"duration_mean\", \"duration_stdev\",\n \"inter_duration_mean\", \"inter_duration_stdev\"]\n\n if mode == \"by_category\":\n fields = [\"subject\", \"category\", \"number\", \"duration\"]\n\n for obsId in selectedObservations:\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], [obsId],\n plot_parameters[\"selected behaviors\"])\n\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n if obs_length == -1:\n obs_length = 0\n\n if plot_parameters[\"time\"] == TIME_FULL_OBS:\n min_time = float(0)\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_EVENTS:\n try:\n min_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][0][0])\n except:\n min_time = float(0)\n try:\n max_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][-1][0])\n except:\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_ARBITRARY_INTERVAL:\n min_time = float(plot_parameters[\"start time\"])\n max_time = float(plot_parameters[\"end time\"])\n\n # check intervals\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if POINT in self.eventType(behav).upper():\n continue\n # extract modifiers\n # if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\n \"SELECT distinct modifiers FROM events WHERE observation = ? AND subject = ? AND code = ?\",\n (obsId, subj, behav))\n distinct_modifiers = list(cursor.fetchall())\n\n for modifier in distinct_modifiers:\n\n if len(cursor.execute(\n \"\"\"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence < ?\"\"\",\n (obsId, subj, behav, modifier[0], min_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], min_time))\n if len(cursor.execute(\n \"\"\"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence > ?\"\"\",\n (obsId, subj, behav, modifier[0], max_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], max_time))\n try:\n cursor.execute(\"COMMIT\")\n except:\n pass\n\n cursor.execute(\"\"\"DELETE FROM events WHERE observation = ? AND (occurence < ? OR occurence > ?)\"\"\",\n (obsId, min_time, max_time))\n\n out, categories = time_budget_analysis(cursor, plot_parameters, by_category=(mode == \"by_category\"))\n\n if mode == \"synthetic\":\n\n behaviors = init_behav_modif()\n\n for element in out:\n for param in parameters:\n if not plot_parameters[\"include modifiers\"]:\n try:\n behaviors[element[\"subject\"]][element[\"behavior\"]][param[0]] = element[param[0]]\n except:\n pass\n if plot_parameters[\"include modifiers\"]:\n try:\n behaviors[element[\"subject\"]][element[\"behavior\"]][element[\"modifiers\"]][param[0]] = \\\n element[param[0]]\n except:\n pass\n\n columns = []\n columns.append(obsId)\n columns.append(\"{:0.3f}\".format(max_time - min_time))\n # columns.append([obsId])\n\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n columns.append(behaviors[subj][behav][param[0]])\n if plot_parameters[\"include modifiers\"]:\n for modif in sorted(list(behaviors[subj][behav].keys())):\n for param in parameters:\n columns.append(behaviors[subj][behav][modif][param[0]])\n\n data_report.append(columns)\n\n if mode in [\"by_behavior\", \"by_category\"]:\n rows = []\n # observation id\n rows.append([\"Observation id\", obsId])\n rows.append([\"\"])\n\n labels = [\"Independent variables\"]\n values = [\"\"]\n if INDEPENDENT_VARIABLES in self.pj and self.pj[INDEPENDENT_VARIABLES]:\n for idx in self.pj[INDEPENDENT_VARIABLES]:\n labels.append(self.pj[INDEPENDENT_VARIABLES][idx][\"label\"])\n if (INDEPENDENT_VARIABLES in self.pj[OBSERVATIONS][obsId]\n and self.pj[INDEPENDENT_VARIABLES][idx][\"label\"] in self.pj[OBSERVATIONS][obsId][\n INDEPENDENT_VARIABLES]):\n values.append(self.pj[OBSERVATIONS][obsId][INDEPENDENT_VARIABLES][\n self.pj[INDEPENDENT_VARIABLES][idx][\"label\"]])\n rows.append(labels)\n rows.append(values)\n rows.append([\"\"])\n\n rows.append(\n [\"Analysis from\", \"{:0.3f}\".format(float(min_time)), \"to\", \"{:0.3f}\".format(float(max_time))])\n rows.append([\"Total length (s)\", \"{:0.3f}\".format(float(max_time - min_time))])\n rows.append([\"\"])\n rows.append([\"Time budget\"])\n\n if mode == \"by_behavior\":\n\n rows.append(fields + [\"% of total length\"])\n # data.headers = fields + [\"% of total media length\"]\n\n for row in out:\n values = []\n for field in fields:\n values.append(str(row[field]).replace(\" ()\", \"\"))\n\n # % of total time\n if row[\"duration\"] not in [\"NA\", \"-\", UNPAIRED, 0] and selectedObsTotalMediaLength:\n # if row[\"duration\"] != \"-\" and row[\"duration\"] != 0 and row[\"duration\"] != UNPAIRED and selectedObsTotalMediaLength:\n values.append(round(row[\"duration\"] / float(max_time - min_time) * 100, 1))\n '''\n if len(selectedObservations) > 1:\n values.append(round(row[\"duration\"] / float(selectedObsTotalMediaLength) * 100, 1))\n else:\n values.append(round(row[\"duration\"] / float(max_time - min_time) * 100, 1))\n '''\n else:\n values.append(\"-\")\n\n rows.append(values)\n\n if mode == \"by_category\":\n rows.append = fields\n # data.headers = fields # + [\"% of total media length\"]\n for subject in categories:\n\n for category in categories[subject]:\n values = []\n values.append(subject)\n if category == \"\":\n values.append(\"No category\")\n else:\n values.append(category)\n\n values.append(categories[subject][category][\"number\"])\n values.append(categories[subject][category][\"duration\"])\n\n rows.append(values)\n\n data = tablib.Dataset()\n data.title = obsId\n for row in rows:\n data.append(complete(row, max([len(r) for r in rows])))\n\n if \"xls\" in outputFormat:\n for forbidden_char in EXCEL_FORBIDDEN_CHARACTERS:\n data.title = data.title.replace(forbidden_char, \" \")\n\n if flagWorkBook:\n for forbidden_char in EXCEL_FORBIDDEN_CHARACTERS:\n data.title = data.title.replace(forbidden_char, \" \")\n if \"xls\" in outputFormat:\n if len(data.title) > 31:\n data.title = data.title[:31]\n workbook.add_sheet(data)\n\n else:\n\n fileName = exportDir + os.sep + safeFileName(obsId) + \".\" + extension\n\n if outputFormat in [\"tsv\", \"csv\", \"html\"]:\n with open(fileName, \"wb\") as f:\n f.write(str.encode(data.export(outputFormat)))\n\n if outputFormat == \"od spreadsheet\":\n with open(fileName, \"wb\") as f:\n f.write(data.ods)\n\n if outputFormat == \"xlsx spreadsheet\":\n with open(fileName, \"wb\") as f:\n f.write(data.xlsx)\n\n if outputFormat == \"xls legacy\":\n if len(data.title) > 31:\n data.title = data.title[:31]\n QMessageBox.warning(None, programName, (\n \"The worksheet name <b>{0}</b> was shortened to <b>{1}</b> due to XLS format limitations.\\n\"\n \"The limit on worksheet name length is 31 characters\").format(obsId, data.title),\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n\n with open(fileName, \"wb\") as f:\n f.write(data.xls)\n\n if mode == \"synthetic\":\n if extension in [\"tsv\", \"csv\", \"html\"]:\n with open(fileName, \"wb\") as f:\n f.write(str.encode(data_report.export(extension)))\n if extension in [\"ods\", \"xlsx\", \"xls\"]:\n with open(fileName, \"wb\") as f:\n f.write(data_report.export(extension))\n\n if mode in [\"by_behavior\", \"by_category\"] and flagWorkBook:\n if \"xls\" in outputFormat:\n with open(WBfileName, \"wb\") as f:\n f.write(workbook.xlsx)\n if \"od\" in outputFormat:\n with open(WBfileName, \"wb\") as f:\n f.write(workbook.ods)", "def merge_delta_time(\n username: str | None = None,\n password: str | None = None,\n verbose: bool = False,\n mode: oct = 0o775\n ):\n # retrieve history delta time files\n pull_deltat_file('historic_deltat.data',\n username=username, password=password,\n verbose=verbose, mode=mode\n )\n # read historic delta time file\n historic_file=pyTMD.utilities.get_data_path(['data','historic_deltat.data'])\n historic = np.loadtxt(historic_file, skiprows=2)\n HY = np.floor(historic[:,0])\n HM = 12.0*np.mod(historic[:,0],1.0) + 1.0\n HD = np.ones_like(historic[:,0])\n # retrieve monthly delta time files\n pull_deltat_file('deltat.data',\n username=username, password=password,\n verbose=verbose, mode=mode\n )\n # read modern monthly delta time file\n monthly_file = pyTMD.utilities.get_data_path(['data','deltat.data'])\n monthly = np.loadtxt(monthly_file)\n monthly_time = convert_calendar_decimal(monthly[:,0],monthly[:,1],\n day=monthly[:,2])\n # retrieve daily delta time files\n merge_bulletin_a_files(\n username=username, password=password,\n verbose=verbose, mode=mode\n )\n # read modern daily delta time file from IERS Bulletin A files\n daily_file = pyTMD.utilities.get_data_path(['data','iers_deltat.data'])\n daily = np.loadtxt(daily_file)\n daily_time = convert_calendar_decimal(daily[:,0], daily[:,1],\n day=daily[:,2])\n # write to new merged file\n merged_file = pyTMD.utilities.get_data_path(['data','merged_deltat.data'])\n fid = merged_file.open(mode='w', encoding='utf8')\n logging.info(str(merged_file))\n file_format = ' {0:4.0f} {1:2.0f} {2:2.0f} {3:7.4f}'\n # use historical values for times prior to monthly\n ind1, = np.nonzero(historic[:,0] < monthly_time[0])\n for i in ind1:\n args = (HY[i],HM[i],HD[i],historic[i,1])\n print(file_format.format(*args),file=fid)\n # use monthly values for times prior to daily\n ind2, = np.nonzero(monthly_time < np.min(daily_time))\n for i in ind2:\n args = (monthly[i,0],monthly[i,1],monthly[i,2],monthly[i,3])\n print(file_format.format(*args),file=fid)\n # use daily values for all times available\n for i in np.argsort(daily_time):\n args = (daily[i,0],daily[i,1],daily[i,2],daily[i,3])\n print(file_format.format(*args),file=fid)\n # close the merged file and change the permissions mode\n fid.close()\n merged_file.chmod(mode)", "def merge_logs(self):\n ourlog = LogData()\n for l in self.data_set:\n ourlog.entries = ourlog.entries + l.entries\n ourlog.sort_time()\n self.finalized_data = ourlog", "def timings_across_runs(self):\n\n\t\t# first determine individual run duration (to make sure that stimulus timings of all runs are correct)\n\t\trun_duration = []\n\t\tfor r in [self.runList[i] for i in self.conditionDict['WMM']]:\n\t\t\tniiFile = NiftiImage(self.runFile(stage = 'processed/mri', run = r))\n\t\t\ttr, nr_trs = round(niiFile.rtime*1)/1000.0, niiFile.timepoints\n\t\t\trun_duration.append(tr * nr_trs)\n\t\trun_duration = np.r_[0,np.cumsum(np.array(run_duration))]\n\n\t\t# timing information stimuli\n\t\tstim_info = []\n\t\trun = 0\n\t\tfor r in [self.runList[i] for i in self.conditionDict['WMM']]:\n\t\t\tstim_events = np.loadtxt(self.runFile(stage = 'processed/behavior', run = r, extension = '.txt', postFix = ['stim' ,'all','task']))\n\t\t\tstim_events[:,:2] += run_duration[run]\n\t\t\tstim_info.append(stim_events)\n\t\t\trun += 1\n\n\t\t# save stim_info as text_file\t\n\t\tnp.savetxt(self.runFile(stage = 'processed/behavior', postFix = ['stim_info_all'],extension = '.txt'), np.vstack(stim_info), fmt = '%3.2f', delimiter = '\\t')", "def merge_time_constraints(list_constraints, constraint_one, constraint_two):\n\n list_constraints.remove(constraint_one)\n list_constraints.remove(constraint_two)\n\n ending_time1 = constraint_one.starting_time + constraint_one.duration\n ending_time2 = constraint_two.starting_time + constraint_two.duration\n\n new_name = constraint_one.name + \" and \" + constraint_two.name\n starting_time = min(constraint_one.starting_time, constraint_two.starting_time)\n duration = max(ending_time1, ending_time2) - starting_time\n\n new_constraint = Constraint(new_name, constraint_one.month, constraint_one.day,\n # Starting time\n starting_time,\n # Ending time\n duration,\n False, False, False)\n\n list_constraints.append(new_constraint)\n\n return list_constraints", "def merge_time_metric(self, metric):\n\n self.merge_raw_time_metric(metric.duration, metric.exclusive)", "def join_target(self):\n df = self.get_all_data()\n target_df = self.get_target_df().copy(deep=True)\n target_df['ft_data_dt'] = target_df['ft_data_dt'].astype('datetime64[M]') - pd.DateOffset(months=2) + MonthEnd(1)\n df = df.merge(target_df, on=['idd', 'ft_data_dt'], how='left')\n values = {'target': 0}\n df['target'] = df['target'].replace(np.nan, 0)\n self.set_prep_data(df)", "def sync_update(self):\n for rec in self:\n if rec.ks_last_exported_date and rec.ks_sync_date:\n ks_reduced_ks_sync_time = rec.ks_last_exported_date - datetime.timedelta(seconds=30)\n ks_increased_ks_sync_time = rec.ks_last_exported_date + datetime.timedelta(seconds=30)\n if rec.ks_sync_date > ks_reduced_ks_sync_time and rec.ks_sync_date < ks_increased_ks_sync_time:\n rec.ks_sync_status = True\n else:\n rec.ks_sync_status = False\n else:\n rec.ks_sync_status = False", "def updateDateValues(self):\n kwargs = {\"cdb_project_id\": self.project.cdb_project_id}\n cca = Project.MakeChangeControlAttributes()\n kwargs.update(cdb_mdate=sqlapi.SQLdbms_date(cca[u\"cdb_mdate\"]))\n kwargs.update(cdb_mpersno=cca[u\"cdb_mpersno\"])\n\n update_gap_by_view = \"\"\"cdbpcs_taskrel_gaps_v SET gap = new_gap\n WHERE pred_pid = '%(cdb_project_id)s'\n OR succ_pid = '%(cdb_project_id)s'\"\"\" % kwargs\n update_gap_by_select = \"\"\"cdbpcs_taskrel SET gap =\n (SELECT CASE\n WHEN cdbpcs_taskrel.rel_type = 'AA' THEN b.start_time_fcast_offset - a.start_time_fcast_offset\n WHEN cdbpcs_taskrel.rel_type = 'AE' THEN b.end_time_fcast_offset - a.start_time_fcast_offset\n WHEN cdbpcs_taskrel.rel_type = 'EA' THEN b.start_time_fcast_offset - a.end_time_fcast_offset\n WHEN cdbpcs_taskrel.rel_type = 'EE' THEN b.end_time_fcast_offset - a.end_time_fcast_offset\n ELSE 0 END +\n CASE\n WHEN a.milestone = 1 AND b.milestone = 1 AND a.early_position = 0 AND b.early_position = 1 THEN -1\n WHEN a.milestone = 1 AND b.milestone = 0 AND a.early_position = 0 AND cdbpcs_taskrel.rel_type IN ('EA', 'AA') THEN -1\n WHEN a.milestone = 0 AND b.milestone = 1 AND b.early_position = 1 AND cdbpcs_taskrel.rel_type IN ('EA', 'EE') THEN -1\n WHEN a.milestone = 0 AND b.milestone = 0 AND cdbpcs_taskrel.rel_type IN ('EA') THEN -1\n ELSE 0 END +\n CASE\n WHEN a.status = 180 THEN a.days_fcast\n ELSE 0 END\n FROM cdbpcs_task a, cdbpcs_task b\n WHERE cdbpcs_taskrel.cdb_project_id2 = a.cdb_project_id\n AND cdbpcs_taskrel.task_id2 = a.task_id\n AND cdbpcs_taskrel.cdb_project_id = b.cdb_project_id\n AND cdbpcs_taskrel.task_id = b.task_id)\n WHERE cdb_project_id2 = '%(cdb_project_id)s'\n OR cdb_project_id = '%(cdb_project_id)s'\n \"\"\" % kwargs\n\n update_gap_stmt = {\n sqlapi.DBMS_SQLITE: update_gap_by_select,\n sqlapi.DBMS_MSSQL: update_gap_by_view,\n sqlapi.DBMS_ORACLE: update_gap_by_view,\n }\n\n updates = [\n \"\"\"cdbpcs_project\n SET start_time_plan = (SELECT CASE\n WHEN MIN(cdbpcs_task.start_time_fcast) < MIN(cdbpcs_task.start_time_plan)\n THEN MIN(cdbpcs_task.start_time_fcast)\n ELSE MIN(cdbpcs_task.start_time_plan)\n END\n FROM cdbpcs_task\n WHERE cdbpcs_task.cdb_project_id = '%(cdb_project_id)s'\n AND cdbpcs_task.parent_task = ''\n ), end_time_plan = (SELECT CASE\n WHEN MAX(cdbpcs_task.end_time_fcast) > MAX(cdbpcs_task.end_time_plan)\n THEN MAX(cdbpcs_task.end_time_fcast)\n ELSE MAX(cdbpcs_task.end_time_plan)\n END\n FROM cdbpcs_task\n WHERE cdbpcs_task.cdb_project_id = '%(cdb_project_id)s'\n AND cdbpcs_task.parent_task = ''\n ), cdb_mdate = %(cdb_mdate)s,\n cdb_mpersno = '%(cdb_mpersno)s'\n WHERE cdb_project_id = '%(cdb_project_id)s'\n \"\"\" % kwargs,\n (\"\"\"cdbpcs_project\n SET start_time_fcast = start_time_plan,\n end_time_fcast = end_time_plan,\n days_fcast = days,\n duration_fcast = duration,\n cdb_mdate = %(cdb_mdate)s,\n cdb_mpersno = '%(cdb_mpersno)s'\n WHERE cdb_project_id = '%(cdb_project_id)s'\n \"\"\" % kwargs) if self.project.auto_update_time else None,\n \"\"\"cdbpcs_task\n SET total_float = late_finish_offset - end_time_fcast_offset,\n cdb_mdate = %(cdb_mdate)s,\n cdb_mpersno = '%(cdb_mpersno)s'\n WHERE cdb_project_id = '%(cdb_project_id)s'\n \"\"\" % kwargs,\n update_gap_stmt[sqlapi.SQLdbms()],\n \"\"\"cdbpcs_task\n SET cdb_mdate = %(cdb_mdate)s,\n cdb_mpersno = '%(cdb_mpersno)s'\n WHERE cdb_project_id = '%(cdb_project_id)s'\n AND task_id IN (SELECT task_id FROM cdbpcs_taskrel\n WHERE cdb_project_id = '%(cdb_project_id)s'\n AND (violation = 0 AND minimal_gap > gap\n OR violation = 1 AND minimal_gap <= gap)\n UNION\n SELECT task_id2 FROM cdbpcs_taskrel\n WHERE cdb_project_id2 = '%(cdb_project_id)s'\n AND (violation = 0 AND minimal_gap > gap\n OR violation = 1 AND minimal_gap <= gap))\n \"\"\" % kwargs,\n \"\"\"cdbpcs_taskrel\n SET violation = CASE\n WHEN minimal_gap <= gap\n THEN 0\n ELSE 1\n END\n WHERE cdb_project_id = '%(cdb_project_id)s'\n OR cdb_project_id2 = '%(cdb_project_id)s'\n \"\"\" % kwargs,\n ]\n for upd in updates:\n if upd:\n sqlapi.SQLupdate(upd)", "def make_all_datetime(self):\n \n logging.info('\\n *** Running make_all_datetime ' )\n \n all_uniques = [] # storing a list with all the unique date_tmes \n which_k_in_dt = {} # list of avilable dataset for each unique date_time, so that when looping over the distinct date_times, only the proper dataset will be read and compared \n\n def add_time_delta(time_offset_value, date_time, dataset):\n \"\"\" Converting to proper date_time adding the time_delta. \n Removes minutes rounding to closest integer hour. \"\"\" \n \n if 'minutes' in time_offset:\n date_time_delta = [ timedelta(minutes = float(i) ) + time_offset_value for i in date_time ]\n elif 'hours' in time_offset:\n date_time_delta = [ timedelta(hours = float(i) ) + time_offset_value for i in date_time ] \n elif 'seconds' in time_offset: \n date_time_delta = [ timedelta(seconds = float(i) ) + time_offset_value for i in date_time ] \n \n \n '''\n if 'era' not in dataset:\n \n if 'minutes' in time_offset:\n date_time_delta = [ timedelta(minutes = float(i) ) + time_offset_value for i in date_time ]\n elif 'hours' in time_offset:\n date_time_delta = [ timedelta(hours = float(i) ) + time_offset_value for i in date_time ] \n elif 'seconds' in time_offset: \n date_time_delta = [ timedelta(seconds = float(i) ) + time_offset_value for i in date_time ] \n else:\n date_time = np.array( [ datetime.strptime(str(int(i)), '%Y%m%d%H') for i in date_time ] )# convert to datetime object \n ''' \n \n #else:\n # print('check if time is wrong !!!! (should never happen)')\n # sys.exit() \n #unique_dt = [i for i in [ time_offset_value + j for j in delta ] ] \n #unique_dt = [ i +0 ]\n date_time_delta = [ i.replace(minute=0, second=0) for i in date_time_delta ] \n \n return date_time_delta \n\n\n for k,v in self.datasets.items() : \n self.unique_dates[k] = {}\n \n self.unique_dates[k]['indices'] = {} \n #self.unique_dates[k]['indices_low'] = {} \n #self.unique_dates[k]['index_up'] = {} \n \n \"\"\" recordtimestamp from the input file \"\"\"\n \n \"\"\" Convert to proper date_time using the add_time_delta funtion \"\"\"\n logging.debug(' Calculating the time_delta for : %s', k )\n \n File = nc.Dataset(self.datasets[k]) \n unique = File.variables['recordtimestamp']\n \n self.data[k]['recordtimestamp'] = File.variables['recordtimestamp'][:].data\n self.data[k]['recordindex'] = File.variables['recordindex'][:].data\n \n time_offset = File.groups['observations_table']['date_time'].units\n time_offset_value = time_offset.split('since ') [1] \n time_offset_value = datetime.strptime(time_offset_value, '%Y-%m-%d %H:%M:%S')\n \n #unique = self.data[k]['recordtimestamp']\n \n unique_dt = add_time_delta (time_offset_value, unique, k ) \n \n all_uniques += unique_dt # adding to the total unique date_times \n \n \"\"\" Extracting the recordindex low and up from the input file \"\"\"\n indices = self.data[k]['recordindex']\n \n \"\"\" Loop over all the date_times of each dataset \"\"\"\n for dt, index_low, count in zip (unique_dt, indices, range(len(unique_dt)) ):\n \n try: \n which_k_in_dt[dt].append(k)\n except:\n which_k_in_dt[dt] = []\n which_k_in_dt[dt].append(k) \n \n self.unique_dates[k]['indices'][dt] = {}\n self.unique_dates[k]['indices'][dt]['low'] = index_low \n try:\n index_up = indices[ count + 1 ] # works until the last available recordindex\n except: \n #index_up = len(indices-1) \n index_up = len(indices)-1 \n \n self.unique_dates[k]['indices'][dt]['up'] = index_up\n \n #self.unique_dates[k]['indices'].append(index) \n #self.unique_dates[k]['indices_up'].append(index_up) \n \n self.dataset_per_dt = which_k_in_dt \n self.merged_unique_dates = np.unique(np.array(all_uniques) ) # storing the set of all distinct dt values \n logging.debug('make_all_datetime finished ')", "def add_jira_entries(config, date, dry_run, economic):\n if date is not None:\n jira = Jira(config.items('Jira'))\n for task in jira.get_tasks():\n if task:\n economic.add_time_entry(task, dry_run)", "def wg_task_summary(request, fieldname='workinggroup', view='production', taskdays=3):\n query = {}\n hours = 24 * taskdays\n startdate = datetime.now() - timedelta(hours=hours)\n startdate = startdate.strftime(settings.DATETIME_FORMAT)\n enddate = datetime.now().strftime(settings.DATETIME_FORMAT)\n query['modificationtime__castdate__range'] = [startdate, enddate]\n if fieldname == 'workinggroup':\n query['workinggroup__isnull'] = False\n if view == 'production':\n query['tasktype'] = 'prod'\n elif view == 'analysis':\n query['tasktype'] = 'anal'\n\n if 'processingtype' in request.session['requestParams']:\n query['processingtype'] = request.session['requestParams']['processingtype']\n\n if 'workinggroup' in request.session['requestParams']:\n query['workinggroup'] = request.session['requestParams']['workinggroup']\n\n if 'project' in request.session['requestParams']:\n query['taskname__istartswith'] = request.session['requestParams']['project']\n\n summary = JediTasks.objects.filter(**query).values(fieldname, 'status').annotate(Count('status')).order_by(\n fieldname, 'status')\n totstates = {}\n tottasks = 0\n wgsum = {}\n for state in const.TASK_STATES:\n totstates[state] = 0\n for rec in summary:\n wg = rec[fieldname]\n status = rec['status']\n count = rec['status__count']\n if status not in const.TASK_STATES:\n continue\n tottasks += count\n totstates[status] += count\n if wg not in wgsum:\n wgsum[wg] = {}\n wgsum[wg]['name'] = wg\n wgsum[wg]['count'] = 0\n wgsum[wg]['states'] = {}\n wgsum[wg]['statelist'] = []\n for state in const.TASK_STATES:\n wgsum[wg]['states'][state] = {}\n wgsum[wg]['states'][state]['name'] = state\n wgsum[wg]['states'][state]['count'] = 0\n wgsum[wg]['count'] += count\n wgsum[wg]['states'][status]['count'] += count\n\n # convert to ordered lists\n suml = []\n for f in wgsum:\n itemd = {}\n itemd['field'] = f\n itemd['count'] = wgsum[f]['count']\n kys = copy.deepcopy(const.TASK_STATES)\n iteml = []\n for ky in kys:\n iteml.append({'kname': ky, 'kvalue': wgsum[f]['states'][ky]['count']})\n itemd['list'] = iteml\n suml.append(itemd)\n suml = sorted(suml, key=lambda x: x['field'])\n return suml", "def aggregatePlans(update):\n out.header('Aggregating plans\\n')\n # For now we just order the plans and return a new list\n update.plans.sort()" ]
[ "0.52670914", "0.521612", "0.5171499", "0.49770486", "0.49408284", "0.49355707", "0.47276974", "0.47106627", "0.4675955", "0.4652707", "0.46401706", "0.46156195", "0.45833963", "0.4529443", "0.45191568", "0.44983798", "0.44943383", "0.4493547", "0.4472287", "0.44453704", "0.4442126", "0.4402147", "0.4399679", "0.43958047", "0.43868315", "0.4376463", "0.4354266", "0.4350832", "0.4348201", "0.43386894" ]
0.6820048
0
Mark a class as Controller Resource
def add_resource(self, cls): # check if the same controller was already used for another cls (Resource) if ( hasattr(self, Controller.RESOURCE_CLASS_KEY) and getattr(self, Controller.RESOURCE_CLASS_KEY) != cls ): raise MultipleResourceException() # check if cls (Resource) was exteded from another if hasattr(cls, Controller.RC_KEY): self.__get_parent_routes(cls.__router__) setattr(cls, Controller.RC_KEY, self.router) setattr(self, Controller.RESOURCE_CLASS_KEY, cls) cls.router = lambda: Controller.__parse_controller_router(cls) return cls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_resource():\n return wsgi.Resource(Controller())", "def setController(self, controller):\n self.__controller = controller", "def __init__(self, controller):\n self._controller = controller", "def resource(self, prefix):\n def wrapper(cls):\n # Save the original init\n clsinit = getattr(cls, '__init__', lambda self: None)\n\n # Dirty trick, make the class belong to the type restful.Resource\n cls = type(cls.__name__, (Resource,), dict(cls.__dict__))\n\n aliases = getattr(cls, 'aliases', None)\n if isinstance(aliases, dict) and len(aliases) > 0:\n cls.preparer = FieldsPreparer(fields=aliases)\n\n # Rename self for using inside __init__\n api = self\n\n def __init__(self, *args, **kwargs):\n # Call Resource constructor\n super(cls, self).__init__(api)\n\n # Initialize the instance\n clsinit(self, *args, **kwargs)\n\n cls.__init__ = __init__\n\n # Add the resource to the API\n cls.add_url_rules(self.app, prefix)\n\n return cls\n\n return wrapper", "def resource(self, resource):\n self._resource = resource", "def resource(self, resource):\n self._resource = resource", "def resource(self, resource):\n\n self._resource = resource", "def create_resource():\n return wsgi.Resource(WorkersController())", "def setController_( self, Controller ):\n\t\ttry:\n\t\t\tself._controller = Controller\n\t\texcept Exception as e:\n\t\t\tself.logToConsole( \"setController_: %s\" % str(e) )", "def expose(self, model, route='/api', access_control=None, resource_class=Resource, **kwargs):\n endpoint_path = route + '/' + inflection.pluralize(inflection.underscore(model.__name__))\n endpoint = endpoint_path\n resource = Resource(model=model, access_control=access_control)\n self._add_api_method(endpoint_path, resource.list_,\n methods=['GET'], endpoint=endpoint + '/list')\n self._add_api_method('%s/<id>' % endpoint_path, resource.get_,\n methods=['GET'], endpoint=endpoint + '/get')\n\n self._add_api_method(endpoint_path, resource.put_,\n methods=['PUT'], endpoint=endpoint + '/put')\n\n self._add_api_method('%s/<id>' % endpoint_path, resource.delete_,\n methods=['DELETE'], endpoint=endpoint + '/delete')\n\n self._add_api_method(endpoint_path, resource.post_,\n methods=['POST'], endpoint=endpoint + 'post')\n\n self._add_api_method('%s/<id>' % endpoint_path, resource.patch_,\n methods=['PATCH'], endpoint=endpoint + 'patch')", "def create_controller(self, typ):\n return self.controller_objects[typ]()", "def add_resource(self, cls, url, **kwargs):\n methods = []\n callmap = {}\n # Create instance of resource handler, if passed as just class (not instance)\n try:\n obj = cls()\n except TypeError:\n obj = cls\n # Get all implemented HTTP methods and make callmap\n for m in ['GET', 'POST', 'PUT', 'PATCH', 'DELETE']:\n fn = m.lower()\n if hasattr(obj, fn):\n methods.append(m)\n callmap[m.encode()] = (getattr(obj, fn), kwargs)\n self.add_route(url, restful_resource_handler,\n methods=methods,\n save_headers=['Content-Length', 'Content-Type'],\n _callmap=callmap)", "def use(_):\n\n def wrapper(cls):\n __app_controllers__.append(cls)\n return cls\n\n return wrapper", "def create_controller() -> Controller:\n _controller = Controller()\n return _controller", "def create_resource():\n #deserializer = ImageDeserializer()\n #serializer = ImageSerializer()\n return wsgi.Resource(Controller())", "def _get_controller(self):\n return self.__controller", "def _set_controller(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"controller_name\",controller.controller, yang_name=\"controller\", rest_name=\"controller\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='controller-name', extensions={u'tailf-common': {u'info': u'OpenFlow controller configuration', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-incomplete-command': None, u'callpoint': u'OpenFlowGlobalControllerCallpoint'}}), is_container='list', yang_name=\"controller\", rest_name=\"controller\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OpenFlow controller configuration', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-incomplete-command': None, u'callpoint': u'OpenFlowGlobalControllerCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"controller must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"controller_name\",controller.controller, yang_name=\"controller\", rest_name=\"controller\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='controller-name', extensions={u'tailf-common': {u'info': u'OpenFlow controller configuration', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-incomplete-command': None, u'callpoint': u'OpenFlowGlobalControllerCallpoint'}}), is_container='list', yang_name=\"controller\", rest_name=\"controller\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OpenFlow controller configuration', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-incomplete-command': None, u'callpoint': u'OpenFlowGlobalControllerCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__controller = t\n if hasattr(self, '_set'):\n self._set()", "def _create_controller(main_controller, action_controller_list):\n controller = server.wsgi.Resource(main_controller())\n for ctl in action_controller_list:\n controller.register_actions(ctl())\n return controller", "def rest_resource(resource_cls):\n ecommerce_checkout_api.add_resource(resource_cls, *resource_cls.endpoints)\n return resource_cls", "def resource(name):\n \n def w(cls):\n RESOURCES[name] = cls\n return cls\n \n return w", "def mvcObj(self, router):\n pass", "def create_resource():\n deserializer = wsgi.JSONRequestDeserializer()\n serializer = wsgi.JSONResponseSerializer()\n return wsgi.Resource(Controller(), deserializer, serializer)", "def name(self) -> str:\n return \"Controller\"", "def create_resource():\n return wsgi.Resource(Controller(), serializer=ImageSerialize())", "def resource(self):\n return self.add_resource", "def define_route(self, route, **kwargs):\n\n def decorator(cls):\n if is_class(cls):\n resource = cls(**kwargs)\n else:\n resource = cls\n\n self.add_route(route, resource)\n\n return cls\n\n return decorator", "def resource_type(cls):\n pass", "def releaseController(self,entry):\n \n controllerName = entry.get('controller')\n \n if controllerName is None:\n self.logger.debug('Path: \"{}\" controller not decleared, we leave'.format(entry.get('path')))\n self.controller = Controller(self)\n return\n \n self.logger.debug(\"entrypath: {} controller: {}\".format(entry.get('path'),controllerName))\n\n sControllerPath = entry.get('path','').replace('/','.')\n sControllerPath = sControllerPath.lower()\n \n if sControllerPath.startswith('.'): sControllerPath = sControllerPath[1:]\n\n if sControllerPath == '':\n sControllerFile = 'mvc.controller.{}'.format(controllerName)\n else:\n sControllerFile = 'mvc.controller.{}.{}'.format(sControllerPath,controllerName)\n \n sControllerFile = self.settings.base+'/'+sControllerFile.replace('.','/')+'.py'\n sControllerFile = os.path.realpath(sControllerFile)\n \n if not os.path.isfile(sControllerFile):\n msg = 'Keinen Controller Datei {} gefunden'.format(sControllerFile)\n self.logger.debug(msg)\n self.content = msg\n Emergency.stop(msg)\n return\n\n if sControllerPath == '':\n sCommand = \"from mvc.controller.{0} import {0}\".format(controllerName)\n else:\n sCommand = \"from mvc.controller.{0}.{1} import {1}\".format(sControllerPath,controllerName)\n \n self.logger.debug('Import Controller over \"{}\"'.format(sCommand))\n try:\n exec(sCommand)\n except Exception as ex:\n msg = 'Fehler bei Import des Controller \"{}\": \"{}\"'.format(sCommand,ex)\n self.content = msg\n self.logger.debug(msg)\n Emergency.stop(msg)\n \n self.controller = None\n sCommand = \"{}(self)\".format(controllerName)\n self.logger.debug('Build controller by sentence: \"{}\"'.format(sCommand))\n\n try:\n self.controller = eval(sCommand)\n except Exception as ex:\n msg = 'Controller \"{}\" kann nicht initialiert werden; Meldung: \"{}\"'.format(sCommand,ex)\n self.content = msg\n self.logger.debug(msg)\n Emergency.stop(msg)\n\n \n self.prepareController()\n \n try:\n self.controller.get()\n except Exception as ex:\n msg = 'Fehler bei get() des Controller \"{}\": \"{}\" Abbruch'.format(controllerName,ex)\n self.logger.debug(msg)\n self.logger.debug(self.content)\n self.controller.status == self.controller.FAILED\n Emergency.stop(msg)", "def pre_routing_instance_create(self, resource_dict):\n pass", "def target_resource(self, target_resource):\n self._target_resource = target_resource" ]
[ "0.6103949", "0.6056706", "0.59474033", "0.5917977", "0.58889747", "0.58889747", "0.5795911", "0.5779954", "0.57289934", "0.5706962", "0.5686955", "0.5658022", "0.55966556", "0.55881155", "0.5557832", "0.55321854", "0.5516671", "0.55104506", "0.5504374", "0.5495111", "0.54755294", "0.5465437", "0.5447657", "0.54118544", "0.53996545", "0.53638494", "0.53637004", "0.5351829", "0.5348223", "0.53410304" ]
0.6852721
0
It returns the FastAPI router. Use it as if you are using the original one.
def route(self) -> APIRouter: return self.router
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, req):\n return self._router", "def create_router(self, environment, *, router=None):\n\n if router is None:\n router = self.router\n\n return utils.objects.ensure_instance(router, environment=environment)", "def __parse_controller_router(cls):\n router = getattr(cls, Controller.RC_KEY)\n\n dependencies = None\n if hasattr(cls, \"dependencies\"):\n dependencies = deepcopy(cls.dependencies)\n delattr(cls, \"dependencies\")\n\n for route in router.routes:\n # add class dependencies\n if dependencies:\n for depends in dependencies[::-1]:\n route.dependencies.insert(0, depends)\n\n # get the signature of the endpoint function\n signature = inspect.signature(route.endpoint)\n # get the parameters of the endpoint function\n signature_parameters = list(signature.parameters.values())\n\n # replace the class instance with the itself FastApi Dependecy\n signature_parameters[0] = signature_parameters[0].replace(\n default=Depends(cls)\n )\n\n # set self and after it the keyword args\n new_parameters = [signature_parameters[0]] + [\n parameter.replace(kind=inspect.Parameter.KEYWORD_ONLY)\n for parameter in signature_parameters[1:]\n ]\n\n new_signature = signature.replace(parameters=new_parameters)\n setattr(route.endpoint, Controller.SIGNATURE_KEY, new_signature)\n\n return router", "def get_api(self):\n from geoffrey.utils import get_api\n return get_api(self.app.routes, prefix=\"/\")", "def taskrouter(self):\n if self._taskrouter is None:\n self._taskrouter = Taskrouter(self)\n return self._taskrouter", "def useRouterOnly(self, router):\n return self.mountRouterOnly(router)", "def RemoteRouter(services):\n return PublicController(services)", "def _configure_api_routes(self, app: FastAPI):\n authenticator = JWTAuthenticator(self.signer)\n\n data_update_publisher: Optional[DataUpdatePublisher] = None\n if self.publisher is not None:\n data_update_publisher = DataUpdatePublisher(self.publisher)\n\n # Init api routers with required dependencies\n data_updates_router = init_data_updates_router(\n data_update_publisher,\n self.data_sources_config,\n authenticator\n )\n webhook_router = init_git_webhook_router(self.pubsub.endpoint, authenticator)\n security_router = init_security_router(self.signer, StaticBearerAuthenticator(self.master_token))\n\n # mount the api routes on the app object\n app.include_router(bundles_router, tags=[\"Bundle Server\"], dependencies=[Depends(authenticator)])\n app.include_router(data_updates_router, tags=[\"Data Updates\"], dependencies=[Depends(authenticator)])\n app.include_router(webhook_router, tags=[\"Github Webhook\"])\n app.include_router(security_router, tags=[\"Security\"])\n app.include_router(self.pubsub.router, tags=[\"Pub/Sub\"])\n\n if self.jwks_endpoint is not None:\n # mount jwts (static) route\n self.jwks_endpoint.configure_app(app)\n\n # top level routes (i.e: healthchecks)\n @app.get(\"/healthcheck\", include_in_schema=False)\n @app.get(\"/\", include_in_schema=False)\n def healthcheck():\n return {\"status\": \"ok\"}\n\n return app", "def create_app():\n\n app = FastAPI()\n add_root_route(app)\n\n return app", "def get_api(self):\n from geoffrey.utils import get_api\n return get_api(self.app.routes, prefix='/api')", "def getRoutes(self):\n pass", "def __init__(self, router):\n\n self.router = router", "def get_routers(self):", "def autodiscover_api_routers():\n # TODO: Support multiple API versions by allowing \"router\" to contain a dictionary\n api_router = SharedRootDefaultRouter()\n\n for app_config in apps.get_app_configs():\n app = app_config.name\n if app.startswith('django.'):\n # skip Django core apps to avoid false warnings\n continue\n\n api_module = _try_import_api(app)\n router = _try_get_router(app, api_module)\n if router:\n # if router is not None it is good\n api_router.register_router(router)\n logger.debug('registered \"%s\"', app_config.name)\n\n return api_router", "def __init__(self, router):\n self._router = router", "def get_router(self, containers):\n for container in containers:\n if container.name == 'router':\n return container\n return None", "def useRouter(self, predicate, router):\n return self.mountRouter(predicate, router)", "def _try_get_router(app, api_module):\n if not api_module:\n return\n\n router = getattr(api_module, 'router', None)\n\n if not router:\n logger.warn('%s contains an api module but it is missing a \"router\" variable.', app)\n return None\n\n if not isinstance(router, BaseRouter):\n logger.warn('%s contains an api.router, but the router is not derived from BaseRouter', app)\n return None\n\n return router", "def get_next_router(self):\n if not self._routers:\n self._can_failover = False\n router_settings = self._settings.copy()\n router_settings[\"host\"] = self._settings.get(\"host\", \"localhost\")\n router_settings[\"port\"] = self._settings.get(\"port\", 33060)\n return Router(router_settings)\n\n cur_priority = self.routers_priority_list[self._cur_priority_idx]\n routers_priority_len = len(self.routers_priority_list)\n\n search = True\n while search:\n router = self._get_random_connection_params(cur_priority)\n\n if router is not None or self._cur_priority_idx >= routers_priority_len:\n if (\n self._cur_priority_idx == routers_priority_len - 1\n and len(self._get_available_routers(cur_priority)) < 2\n ):\n self._can_failover = False\n break\n\n # Search on next group\n self._cur_priority_idx += 1\n if self._cur_priority_idx < routers_priority_len:\n cur_priority = self.routers_priority_list[self._cur_priority_idx]\n\n return router", "def app(self) -> traits.RESTAware:", "def app(self) -> traits.RESTAware:", "def mvcRouter(self, router):\n pass", "def routers():\n routers = []\n\n for app_controller in __app_controllers__:\n routers.append(app_controller.router())\n\n return routers", "def create_router(self, body=None):\r\n return self.post(self.routers_path, body=body)", "def get_router(self, ns):\r\n desc = self.sendAndRecv(\"GETINFO desc/id/\" + ns.idhex + \"\\r\\n\")[0][2]\r\n sig_start = desc.find(\"\\nrouter-signature\\n\")+len(\"\\nrouter-signature\\n\")\r\n fp_base64 = sha1(desc[:sig_start]).digest().encode(\"base64\")[:-2]\r\n r = Router.build_from_desc(desc.split(\"\\n\"), ns)\r\n if fp_base64 != ns.orhash:\r\n plog(\"INFO\", \"Router descriptor for \"+ns.idhex+\" does not match ns fingerprint (NS @ \"+str(ns.updated)+\" vs Desc @ \"+str(r.published)+\")\")\r\n return None\r\n else:\r\n return r", "def _init_app(self):\n\n self._app = FastAPI(**self._app_kws)\n\n for rt, kwargs in self._app_routers:\n self._app.include_router(rt, **kwargs)\n\n self._app.dependency_overrides[get_dataset] = lambda: self._obj\n self._app.dependency_overrides[get_cache] = lambda: self.cache\n\n return self._app", "def _create_router(self, method, api, header, data):\n self._execute_api(method, api, header, data)", "def create_app():\n app = FastAPI()\n configure_rest_server(app=app, router_configs=WEB_SERVICES_ROUTER_CONFIGS, db_configs=DB_CONFIGS)\n return app", "def _init_routes(self):\n before_hooks = [\n helpers.require_accepts_json,\n helpers.extract_project_id,\n\n # NOTE(kgriffs): Depends on project_id being extracted, above\n functools.partial(helpers.validate_queue_name,\n self._validate.queue_name)\n ]\n\n self.app = falcon.API(before=before_hooks)\n\n queue_controller = self._storage.queue_controller\n message_controller = self._storage.message_controller\n claim_controller = self._storage.claim_controller\n\n # Home\n self.app.add_route('/v1', v1.V1Resource())\n\n # Queues Endpoints\n queue_collection = queues.CollectionResource(self._validate,\n queue_controller)\n self.app.add_route('/v1/queues', queue_collection)\n\n queue_item = queues.ItemResource(queue_controller, message_controller)\n self.app.add_route('/v1/queues/{queue_name}', queue_item)\n\n stats_endpoint = stats.Resource(queue_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/stats', stats_endpoint)\n\n # Metadata Endpoints\n metadata_endpoint = metadata.Resource(self._wsgi_conf, self._validate,\n queue_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/metadata', metadata_endpoint)\n\n # Messages Endpoints\n msg_collection = messages.CollectionResource(self._wsgi_conf,\n self._validate,\n message_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/messages', msg_collection)\n\n msg_item = messages.ItemResource(message_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/messages/{message_id}', msg_item)\n\n # Claims Endpoints\n claim_collection = claims.CollectionResource(self._wsgi_conf,\n self._validate,\n claim_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/claims', claim_collection)\n\n claim_item = claims.ItemResource(self._wsgi_conf, self._validate,\n claim_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/claims/{claim_id}', claim_item)\n\n # Health\n self.app.add_route('/v1/health', health.HealthResource())", "def useRouterPath(self, path, router):\n return self.mount(path, router)" ]
[ "0.75668126", "0.62573475", "0.6220323", "0.61085886", "0.6030978", "0.602395", "0.60006815", "0.59523207", "0.5933353", "0.58949184", "0.5879139", "0.5846796", "0.5825418", "0.5804262", "0.57789963", "0.5770383", "0.5753781", "0.57239616", "0.56978655", "0.5636582", "0.5636582", "0.5620236", "0.5565216", "0.5535793", "0.55327725", "0.5522818", "0.5452654", "0.5436301", "0.54036504", "0.5396294" ]
0.81418586
0